diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c591459f01b..ba3e3c1175b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -95,7 +95,7 @@ Contributing to the Elasticsearch codebase JDK 10 is required to build Elasticsearch. You must have a JDK 10 installation with the environment variable `JAVA_HOME` referencing the path to Java home for your JDK 10 installation. By default, tests use the same runtime as `JAVA_HOME`. -However, since Elasticsearch, supports JDK 8 the build supports compiling with +However, since Elasticsearch supports JDK 8, the build supports compiling with JDK 10 and testing on a JDK 8 runtime; to do this, set `RUNTIME_JAVA_HOME` pointing to the Java home of a JDK 8 installation. Note that this mechanism can be used to test against other JDKs as well, this is not only limited to JDK 8. @@ -325,21 +325,19 @@ common configurations in our build and how we use them:
`compile`
Code that is on the classpath at both compile and -runtime. If the [`shadow`][shadow-plugin] plugin is applied to the project then -this code is bundled into the jar produced by the project.
+runtime.
`runtime`
Code that is not on the classpath at compile time but is on the classpath at runtime. We mostly use this configuration to make sure that we do not accidentally compile against dependencies of our dependencies also known as "transitive" dependencies".
-
`compileOnly`
Code that is on the classpath at comile time but that +
`compileOnly`
Code that is on the classpath at compile time but that should not be shipped with the project because it is "provided" by the runtime somehow. Elasticsearch plugins use this configuration to include dependencies that are bundled with Elasticsearch's server.
-
`shadow`
Only available in projects with the shadow plugin. Code -that is on the classpath at both compile and runtime but it *not* bundled into -the jar produced by the project. If you depend on a project with the `shadow` -plugin then you need to depend on this configuration because it will bring -along all of the dependencies you need at runtime.
+
`bundle`
Only available in projects with the shadow plugin, +dependencies with this configuration are bundled into the jar produced by the +build. Since IDEs do not understand this configuration we rig them to treat +dependencies in this configuration as `compile` dependencies.
`testCompile`
Code that is on the classpath for compiling tests that are part of this project but not production code. The canonical example of this is `junit`.
diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 0838af72871..992462b6fc0 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -21,10 +21,7 @@ apply plugin: 'elasticsearch.build' apply plugin: 'application' mainClassName = 'org.openjdk.jmh.Main' -// Not published so no need to assemble -tasks.remove(assemble) -build.dependsOn.remove('assemble') - +assemble.enabled = false archivesBaseName = 'elasticsearch-benchmarks' test.enabled = false diff --git a/build.gradle b/build.gradle index 341fbc04c0f..f6fead2bca4 100644 --- a/build.gradle +++ b/build.gradle @@ -16,21 +16,15 @@ * specific language governing permissions and limitations * under the License. */ + import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin +import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.BuildPlugin -import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionCollection import org.elasticsearch.gradle.VersionProperties +import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.gradle.plugins.ide.eclipse.model.SourceFolder -import org.gradle.util.GradleVersion -import org.gradle.util.DistributionLocator -import org.apache.tools.ant.taskdefs.condition.Os -import org.apache.tools.ant.filters.ReplaceTokens - -import java.nio.file.Files -import java.nio.file.Path -import java.security.MessageDigest plugins { id 'com.gradle.build-scan' version '1.13.2' @@ -304,7 +298,7 @@ subprojects { // org.elasticsearch:elasticsearch must be the last one or all the links for the // other packages (e.g org.elasticsearch.client) will point to server rather than // their own artifacts. - if (project.plugins.hasPlugin(BuildPlugin)) { + if (project.plugins.hasPlugin(BuildPlugin) || project.plugins.hasPlugin(PluginBuildPlugin)) { String artifactsHost = VersionProperties.elasticsearch.isSnapshot() ? "https://snapshots.elastic.co" : "https://artifacts.elastic.co" Closure sortClosure = { a, b -> b.group <=> a.group } Closure depJavadocClosure = { shadowed, dep -> @@ -322,13 +316,6 @@ subprojects { */ project.evaluationDependsOn(upstreamProject.path) project.javadoc.source += upstreamProject.javadoc.source - /* - * Do not add those projects to the javadoc classpath because - * we are going to resolve them with their source instead. - */ - project.javadoc.classpath = project.javadoc.classpath.filter { f -> - false == upstreamProject.configurations.archives.artifacts.files.files.contains(f) - } /* * Instead we need the upstream project's javadoc classpath so * we don't barf on the classes that it references. @@ -345,16 +332,16 @@ subprojects { project.configurations.compile.dependencies .findAll() .toSorted(sortClosure) - .each({ c -> depJavadocClosure(hasShadow, c) }) + .each({ c -> depJavadocClosure(false, c) }) project.configurations.compileOnly.dependencies .findAll() .toSorted(sortClosure) - .each({ c -> depJavadocClosure(hasShadow, c) }) + .each({ c -> depJavadocClosure(false, c) }) if (hasShadow) { - project.configurations.shadow.dependencies + project.configurations.bundle.dependencies .findAll() .toSorted(sortClosure) - .each({ c -> depJavadocClosure(false, c) }) + .each({ c -> depJavadocClosure(true, c) }) } } } @@ -518,30 +505,33 @@ allprojects { tasks.cleanEclipse.dependsOn(wipeEclipseSettings) // otherwise the eclipse merging is *super confusing* tasks.eclipse.dependsOn(cleanEclipse, copyEclipseSettings) + + // work arround https://github.com/gradle/gradle/issues/6582 + tasks.eclipseProject.mustRunAfter tasks.cleanEclipseProject + tasks.matching { it.name == 'eclipseClasspath' }.all { + it.mustRunAfter { tasks.cleanEclipseClasspath } + } + tasks.matching { it.name == 'eclipseJdt' }.all { + it.mustRunAfter { tasks.cleanEclipseJdt } + } + tasks.copyEclipseSettings.mustRunAfter tasks.wipeEclipseSettings } allprojects { /* * IntelliJ and Eclipse don't know about the shadow plugin so when we're - * in "IntelliJ mode" or "Eclipse mode" add "runtime" dependencies - * eveywhere where we see a "shadow" dependency which will cause them to - * reference shadowed projects directly rather than rely on the shadowing - * to include them. This is the correct thing for it to do because it - * doesn't run the jar shadowing at all. This isn't needed for the project + * in "IntelliJ mode" or "Eclipse mode" switch "bundle" dependencies into + * regular "compile" dependencies. This isn't needed for the project * itself because the IDE configuration is done by SourceSets but it is * *is* needed for projects that depends on the project doing the shadowing. * Without this they won't properly depend on the shadowed project. */ if (isEclipse || isIdea) { - configurations.all { Configuration configuration -> - dependencies.all { Dependency dep -> - if (dep instanceof ProjectDependency) { - if (dep.getTargetConfiguration() == 'shadow') { - configuration.dependencies.add(project.dependencies.project(path: dep.dependencyProject.path, configuration: 'runtime')) - } - } - } - } + project.plugins.withType(ShadowPlugin).whenPluginAdded { + project.afterEvaluate { + project.configurations.compile.extendsFrom project.configurations.bundle + } + } } } @@ -582,62 +572,6 @@ wrapper { } } -static void assertLinesInFile(final Path path, final List expectedLines) { - final List actualLines = Files.readAllLines(path) - int line = 0 - for (final String expectedLine : expectedLines) { - final String actualLine = actualLines.get(line) - if (expectedLine != actualLine) { - throw new GradleException("expected line [${line + 1}] in [${path}] to be [${expectedLine}] but was [${actualLine}]") - } - line++ - } -} - -/* - * Check that all generated JARs have our NOTICE.txt and an appropriate - * LICENSE.txt in them. We configurate this in gradle but we'd like to - * be extra paranoid. - */ -subprojects { project -> - project.tasks.withType(Jar).whenTaskAdded { jarTask -> - final Task extract = project.task("extract${jarTask.name.capitalize()}", type: LoggedExec) { - dependsOn jarTask - ext.destination = project.buildDir.toPath().resolve("jar-extracted/${jarTask.name}") - commandLine "${->new File(rootProject.compilerJavaHome, 'bin/jar')}", - 'xf', "${-> jarTask.outputs.files.singleFile}", 'META-INF/LICENSE.txt', 'META-INF/NOTICE.txt' - workingDir destination - onlyIf {jarTask.enabled} - doFirst { - project.delete(destination) - Files.createDirectories(destination) - } - } - - final Task checkNotice = project.task("verify${jarTask.name.capitalize()}Notice") { - dependsOn extract - onlyIf {jarTask.enabled} - doLast { - final List noticeLines = Files.readAllLines(project.noticeFile.toPath()) - final Path noticePath = extract.destination.resolve('META-INF/NOTICE.txt') - assertLinesInFile(noticePath, noticeLines) - } - } - project.check.dependsOn checkNotice - - final Task checkLicense = project.task("verify${jarTask.name.capitalize()}License") { - dependsOn extract - onlyIf {jarTask.enabled} - doLast { - final List licenseLines = Files.readAllLines(project.licenseFile.toPath()) - final Path licensePath = extract.destination.resolve('META-INF/LICENSE.txt') - assertLinesInFile(licensePath, licenseLines) - } - } - project.check.dependsOn checkLicense - } -} - /* Remove assemble/dependenciesInfo on all qa projects because we don't need to publish * artifacts for them. */ gradle.projectsEvaluated { @@ -645,13 +579,11 @@ gradle.projectsEvaluated { if (project.path.startsWith(':qa')) { Task assemble = project.tasks.findByName('assemble') if (assemble) { - project.tasks.remove(assemble) - project.build.dependsOn.remove('assemble') + assemble.enabled = false } Task dependenciesInfo = project.tasks.findByName('dependenciesInfo') if (dependenciesInfo) { - project.tasks.remove(dependenciesInfo) - project.precommit.dependsOn.remove('dependenciesInfo') + dependenciesInfo.enabled = false } } } diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 967c2e27ee8..da8ad788164 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -17,6 +17,7 @@ * under the License. */ import java.nio.file.Files +import org.gradle.util.GradleVersion plugins { id 'java-gradle-plugin' @@ -102,7 +103,6 @@ dependencies { compile 'com.netflix.nebula:gradle-info-plugin:3.0.3' compile 'org.eclipse.jgit:org.eclipse.jgit:3.2.0.201312181205-r' compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE.... - compile 'de.thetaphi:forbiddenapis:2.5' compile 'org.apache.rat:apache-rat:0.11' compile "org.elasticsearch:jna:4.5.1" compile 'com.github.jengelman.gradle.plugins:shadow:2.0.4' @@ -167,7 +167,6 @@ if (project != rootProject) { it.tasks.matching { it.name == 'publishNebulaPublicationToLocalTestRepository'} } exclude "**/*Tests.class" - include "**/*IT.class" testClassesDirs = sourceSets.test.output.classesDirs classpath = sourceSets.test.runtimeClasspath inputs.dir(file("src/testKit")) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index bf3ffcabe2f..110982e31e6 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -38,7 +38,6 @@ import org.gradle.api.artifacts.ModuleDependency import org.gradle.api.artifacts.ModuleVersionIdentifier import org.gradle.api.artifacts.ProjectDependency import org.gradle.api.artifacts.ResolvedArtifact -import org.gradle.api.artifacts.SelfResolvingDependency import org.gradle.api.artifacts.dsl.RepositoryHandler import org.gradle.api.execution.TaskExecutionGraph import org.gradle.api.plugins.JavaPlugin @@ -57,6 +56,7 @@ import org.gradle.util.GradleVersion import java.nio.charset.StandardCharsets import java.time.ZoneOffset import java.time.ZonedDateTime + /** * Encapsulates build configuration for elasticsearch projects. */ @@ -79,8 +79,9 @@ class BuildPlugin implements Plugin { } project.pluginManager.apply('java') project.pluginManager.apply('carrotsearch.randomized-testing') - // these plugins add lots of info to our jars + configureConfigurations(project) configureJars(project) // jar config must be added before info broker + // these plugins add lots of info to our jars project.pluginManager.apply('nebula.info-broker') project.pluginManager.apply('nebula.info-basic') project.pluginManager.apply('nebula.info-java') @@ -91,8 +92,8 @@ class BuildPlugin implements Plugin { globalBuildInfo(project) configureRepositories(project) - configureConfigurations(project) project.ext.versions = VersionProperties.versions + configureSourceSets(project) configureCompile(project) configureJavadoc(project) configureSourcesJar(project) @@ -211,6 +212,7 @@ class BuildPlugin implements Plugin { project.rootProject.ext.minimumRuntimeVersion = minimumRuntimeVersion project.rootProject.ext.inFipsJvm = inFipsJvm project.rootProject.ext.gradleJavaVersion = JavaVersion.toVersion(gradleJavaVersion) + project.rootProject.ext.java9Home = "${-> findJavaHome("9")}" } project.targetCompatibility = project.rootProject.ext.minimumRuntimeVersion @@ -224,6 +226,7 @@ class BuildPlugin implements Plugin { project.ext.javaVersions = project.rootProject.ext.javaVersions project.ext.inFipsJvm = project.rootProject.ext.inFipsJvm project.ext.gradleJavaVersion = project.rootProject.ext.gradleJavaVersion + project.ext.java9Home = project.rootProject.ext.java9Home } private static String getPaddedMajorVersion(JavaVersion compilerJavaVersionEnum) { @@ -421,8 +424,10 @@ class BuildPlugin implements Plugin { project.configurations.compile.dependencies.all(disableTransitiveDeps) project.configurations.testCompile.dependencies.all(disableTransitiveDeps) project.configurations.compileOnly.dependencies.all(disableTransitiveDeps) + project.plugins.withType(ShadowPlugin).whenPluginAdded { - project.configurations.shadow.dependencies.all(disableTransitiveDeps) + Configuration bundle = project.configurations.create('bundle') + bundle.dependencies.all(disableTransitiveDeps) } } @@ -528,16 +533,21 @@ class BuildPlugin implements Plugin { project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask -> // The GenerateMavenPom task is aggressive about setting the destination, instead of fighting it, // just make a copy. + generatePOMTask.ext.pomFileName = null doLast { project.copy { from generatePOMTask.destination into "${project.buildDir}/distributions" - rename { "${project.archivesBaseName}-${project.version}.pom" } + rename { + generatePOMTask.ext.pomFileName == null ? + "${project.archivesBaseName}-${project.version}.pom" : + generatePOMTask.ext.pomFileName + } } } // build poms with assemble (if the assemble task exists) Task assemble = project.tasks.findByName('assemble') - if (assemble) { + if (assemble && assemble.enabled) { assemble.dependsOn(generatePOMTask) } } @@ -555,30 +565,6 @@ class BuildPlugin implements Plugin { publications { nebula(MavenPublication) { artifacts = [ project.tasks.shadowJar ] - artifactId = project.archivesBaseName - /* - * Configure the pom to include the "shadow" as compile dependencies - * because that is how we're using them but remove all other dependencies - * because they've been shaded into the jar. - */ - pom.withXml { XmlProvider xml -> - Node root = xml.asNode() - root.remove(root.dependencies) - Node dependenciesNode = root.appendNode('dependencies') - project.configurations.shadow.allDependencies.each { - if (false == it instanceof SelfResolvingDependency) { - Node dependencyNode = dependenciesNode.appendNode('dependency') - dependencyNode.appendNode('groupId', it.group) - dependencyNode.appendNode('artifactId', it.name) - dependencyNode.appendNode('version', it.version) - dependencyNode.appendNode('scope', 'compile') - } - } - // Be tidy and remove the element if it is empty - if (dependenciesNode.children.empty) { - root.remove(dependenciesNode) - } - } } } } @@ -586,6 +572,20 @@ class BuildPlugin implements Plugin { } } + /** + * Add dependencies that we are going to bundle to the compile classpath. + */ + static void configureSourceSets(Project project) { + project.plugins.withType(ShadowPlugin).whenPluginAdded { + ['main', 'test'].each {name -> + SourceSet sourceSet = project.sourceSets.findByName(name) + if (sourceSet != null) { + sourceSet.compileClasspath += project.configurations.bundle + } + } + } + } + /** Adds compiler settings to the project */ static void configureCompile(Project project) { if (project.compilerJavaVersion < JavaVersion.VERSION_1_10) { @@ -603,7 +603,6 @@ class BuildPlugin implements Plugin { } else { options.fork = true options.forkOptions.javaHome = compilerJavaHomeFile - options.forkOptions.memoryMaximumSize = "512m" } if (targetCompatibilityVersion == JavaVersion.VERSION_1_8) { // compile with compact 3 profile by default @@ -741,6 +740,7 @@ class BuildPlugin implements Plugin { } from(project.noticeFile.parent) { include project.noticeFile.name + rename { 'NOTICE.txt' } } } } @@ -763,9 +763,16 @@ class BuildPlugin implements Plugin { * better to be safe */ mergeServiceFiles() + /* + * Bundle dependencies of the "bundled" configuration. + */ + configurations = [project.configurations.bundle] } // Make sure we assemble the shadow jar project.tasks.assemble.dependsOn project.tasks.shadowJar + project.artifacts { + apiElements project.tasks.shadowJar + } } } @@ -798,8 +805,6 @@ class BuildPlugin implements Plugin { systemProperty 'tests.task', path systemProperty 'tests.security.manager', 'true' systemProperty 'jna.nosys', 'true' - // TODO: remove this deprecation compatibility setting for 7.0 - systemProperty 'es.aggregations.enable_scripted_metric_agg_param', 'false' systemProperty 'compiler.java', project.ext.compilerJavaVersion.getMajorVersion() if (project.ext.inFipsJvm) { systemProperty 'runtime.java', project.ext.runtimeJavaVersion.getMajorVersion() + "FIPS" @@ -872,13 +877,8 @@ class BuildPlugin implements Plugin { exclude '**/*$*.class' project.plugins.withType(ShadowPlugin).whenPluginAdded { - /* - * If we make a shaded jar we test against it. - */ + // Test against a shadow jar if we made one classpath -= project.tasks.compileJava.outputs.files - classpath -= project.configurations.compile - classpath -= project.configurations.runtime - classpath += project.configurations.shadow classpath += project.tasks.shadowJar.outputs.files dependsOn project.tasks.shadowJar } @@ -904,26 +904,6 @@ class BuildPlugin implements Plugin { additionalTest.dependsOn(project.tasks.testClasses) project.check.dependsOn(additionalTest) }); - - project.plugins.withType(ShadowPlugin).whenPluginAdded { - /* - * We need somewhere to configure dependencies that we don't wish - * to shade into the jar. The shadow plugin creates a "shadow" - * configuration which is *almost* exactly that. It is never - * bundled into the shaded jar but is used for main source - * compilation. Unfortunately, by default it is not used for - * *test* source compilation and isn't used in tests at all. This - * change makes it available for test compilation. - * - * Note that this isn't going to work properly with qa projects - * but they have no business applying the shadow plugin in the - * firstplace. - */ - SourceSet testSourceSet = project.sourceSets.findByName('test') - if (testSourceSet != null) { - testSourceSet.compileClasspath += project.configurations.shadow - } - } } private static configurePrecommit(Project project) { @@ -935,7 +915,7 @@ class BuildPlugin implements Plugin { it.group.startsWith('org.elasticsearch') == false } - project.configurations.compileOnly project.plugins.withType(ShadowPlugin).whenPluginAdded { - project.dependencyLicenses.dependencies += project.configurations.shadow.fileCollection { + project.dependencyLicenses.dependencies += project.configurations.bundle.fileCollection { it.group.startsWith('org.elasticsearch') == false } } @@ -946,7 +926,7 @@ class BuildPlugin implements Plugin { deps.runtimeConfiguration = project.configurations.runtime project.plugins.withType(ShadowPlugin).whenPluginAdded { deps.runtimeConfiguration = project.configurations.create('infoDeps') - deps.runtimeConfiguration.extendsFrom(project.configurations.runtime, project.configurations.shadow) + deps.runtimeConfiguration.extendsFrom(project.configurations.runtime, project.configurations.bundle) } deps.compileOnlyConfiguration = project.configurations.compileOnly project.afterEvaluate { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy index 7d5b793254f..daab0efc8c6 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy @@ -138,9 +138,8 @@ class VersionCollection { break } } - // caveat 0 - now dip back 2 versions to get the last supported snapshot version of the line - Version highestMinor = getHighestPreviousMinor(currentVersion.major - 1) - maintenanceBugfixSnapshot = replaceAsSnapshot(highestMinor) + // caveat 0 - the last supported snapshot of the line is on a version that we don't support (N-2) + maintenanceBugfixSnapshot = null } else { // caveat 3 did not apply. version is not a X.0.0, so we are somewhere on a X.Y line // only check till minor == 0 of the major @@ -293,7 +292,8 @@ class VersionCollection { * If you have a list [5.0.2, 5.1.2, 6.0.1, 6.1.1] and pass in 6 for the nextMajorVersion, it will return you 5.1.2 */ private Version getHighestPreviousMinor(Integer nextMajorVersion) { - return versionSet.headSet(Version.fromString("${nextMajorVersion}.0.0")).last() + SortedSet result = versionSet.headSet(Version.fromString("${nextMajorVersion}.0.0")) + return result.isEmpty() ? null : result.last() } /** diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy index 27f122b8610..9b2b1ca2156 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -35,8 +35,7 @@ public class DocsTestPlugin extends RestTestPlugin { // The distribution can be configured with -Dtests.distribution on the command line project.integTestCluster.distribution = System.getProperty('tests.distribution', 'zip') // Docs are published separately so no need to assemble - project.tasks.remove(project.assemble) - project.build.dependsOn.remove('assemble') + project.tasks.assemble.enabled = false Map defaultSubstitutions = [ /* These match up with the asciidoc syntax for substitutions but * the values may differ. In particular {version} needs to resolve diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index 6f42e41beaa..a14a3a680da 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -19,23 +19,19 @@ package org.elasticsearch.gradle.plugin import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin -import nebula.plugin.info.scm.ScmInfoPlugin +import nebula.plugin.publishing.maven.MavenScmPlugin import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.NoticeTask import org.elasticsearch.gradle.test.RestIntegTestTask import org.elasticsearch.gradle.test.RunTask -import org.gradle.api.InvalidUserDataException import org.gradle.api.Project -import org.gradle.api.Task -import org.gradle.api.XmlProvider import org.gradle.api.publish.maven.MavenPublication import org.gradle.api.publish.maven.plugins.MavenPublishPlugin +import org.gradle.api.publish.maven.tasks.GenerateMavenPom import org.gradle.api.tasks.SourceSet import org.gradle.api.tasks.bundling.Zip +import org.gradle.jvm.tasks.Jar -import java.nio.file.Files -import java.nio.file.Path -import java.nio.file.StandardCopyOption import java.util.regex.Matcher import java.util.regex.Pattern /** @@ -55,16 +51,10 @@ public class PluginBuildPlugin extends BuildPlugin { String name = project.pluginProperties.extension.name project.archivesBaseName = name - if (project.pluginProperties.extension.hasClientJar) { - // for plugins which work with the transport client, we copy the jar - // file to a new name, copy the nebula generated pom to the same name, - // and generate a different pom for the zip - addClientJarPomGeneration(project) - addClientJarTask(project) - } - // while the jar isn't normally published, we still at least build a pom of deps - // in case it is published, for instance when other plugins extend this plugin - configureJarPom(project) + // set teh project description so it will be picked up by publishing + project.description = project.pluginProperties.extension.description + + configurePublishing(project) project.integTestCluster.dependsOn(project.bundlePlugin) project.tasks.run.dependsOn(project.bundlePlugin) @@ -94,6 +84,32 @@ public class PluginBuildPlugin extends BuildPlugin { project.tasks.create('run', RunTask) // allow running ES with this plugin in the foreground of a build } + private void configurePublishing(Project project) { + // Only configure publishing if applied externally + if (project.pluginProperties.extension.hasClientJar) { + project.plugins.apply(MavenScmPlugin.class) + // Only change Jar tasks, we don't want a -client zip so we can't change archivesBaseName + project.tasks.withType(Jar) { + baseName = baseName + "-client" + } + // always configure publishing for client jars + project.plugins.apply(MavenScmPlugin.class) + project.publishing.publications.nebula(MavenPublication).artifactId( + project.pluginProperties.extension.name + "-client" + ) + project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask -> + generatePOMTask.ext.pomFileName = "${project.archivesBaseName}-client-${project.version}.pom" + } + } else { + project.plugins.withType(MavenPublishPlugin).whenPluginAdded { + project.publishing.publications.nebula(MavenPublication).artifactId( + project.pluginProperties.extension.name + ) + } + + } + } + private static void configureDependencies(Project project) { project.dependencies { compileOnly "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}" @@ -141,11 +157,10 @@ public class PluginBuildPlugin extends BuildPlugin { from pluginMetadata // metadata (eg custom security policy) /* * If the plugin is using the shadow plugin then we need to bundle - * "shadow" things rather than the default jar and dependencies so - * we don't hit jar hell. + * that shadow jar. */ from { project.plugins.hasPlugin(ShadowPlugin) ? project.shadowJar : project.jar } - from { project.plugins.hasPlugin(ShadowPlugin) ? project.configurations.shadow : project.configurations.runtime - project.configurations.compileOnly } + from project.configurations.runtime - project.configurations.compileOnly // extra files for the plugin to go into the zip from('src/main/packaging') // TODO: move all config/bin/_size/etc into packaging from('src/main') { @@ -161,33 +176,6 @@ public class PluginBuildPlugin extends BuildPlugin { } /** Adds a task to move jar and associated files to a "-client" name. */ - protected static void addClientJarTask(Project project) { - Task clientJar = project.tasks.create('clientJar') - clientJar.dependsOn(project.jar, project.tasks.generatePomFileForClientJarPublication, project.javadocJar, project.sourcesJar) - clientJar.doFirst { - Path jarFile = project.jar.outputs.files.singleFile.toPath() - String clientFileName = jarFile.fileName.toString().replace(project.version, "client-${project.version}") - Files.copy(jarFile, jarFile.resolveSibling(clientFileName), StandardCopyOption.REPLACE_EXISTING) - - String clientPomFileName = clientFileName.replace('.jar', '.pom') - Files.copy( - project.tasks.generatePomFileForClientJarPublication.outputs.files.singleFile.toPath(), - jarFile.resolveSibling(clientPomFileName), - StandardCopyOption.REPLACE_EXISTING - ) - - String sourcesFileName = jarFile.fileName.toString().replace('.jar', '-sources.jar') - String clientSourcesFileName = clientFileName.replace('.jar', '-sources.jar') - Files.copy(jarFile.resolveSibling(sourcesFileName), jarFile.resolveSibling(clientSourcesFileName), - StandardCopyOption.REPLACE_EXISTING) - - String javadocFileName = jarFile.fileName.toString().replace('.jar', '-javadoc.jar') - String clientJavadocFileName = clientFileName.replace('.jar', '-javadoc.jar') - Files.copy(jarFile.resolveSibling(javadocFileName), jarFile.resolveSibling(clientJavadocFileName), - StandardCopyOption.REPLACE_EXISTING) - } - project.assemble.dependsOn(clientJar) - } static final Pattern GIT_PATTERN = Pattern.compile(/git@([^:]+):([^\.]+)\.git/) @@ -209,39 +197,11 @@ public class PluginBuildPlugin extends BuildPlugin { /** Adds nebula publishing task to generate a pom file for the plugin. */ protected static void addClientJarPomGeneration(Project project) { - project.plugins.apply(MavenPublishPlugin.class) - - project.publishing { - publications { - clientJar(MavenPublication) { - from project.components.java - artifactId = project.pluginProperties.extension.name + '-client' - pom.withXml { XmlProvider xml -> - Node root = xml.asNode() - root.appendNode('name', project.pluginProperties.extension.name) - root.appendNode('description', project.pluginProperties.extension.description) - root.appendNode('url', urlFromOrigin(project.scminfo.origin)) - Node scmNode = root.appendNode('scm') - scmNode.appendNode('url', project.scminfo.origin) - } - } - } - } + project.plugins.apply(MavenScmPlugin.class) + project.description = project.pluginProperties.extension.description } /** Configure the pom for the main jar of this plugin */ - protected static void configureJarPom(Project project) { - project.plugins.apply(ScmInfoPlugin.class) - project.plugins.apply(MavenPublishPlugin.class) - - project.publishing { - publications { - nebula(MavenPublication) { - artifactId project.pluginProperties.extension.name - } - } - } - } protected void addNoticeGeneration(Project project) { File licenseFile = project.pluginProperties.extension.licenseFile diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy index 656d5e0d35a..119a0276499 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy @@ -19,10 +19,11 @@ package org.elasticsearch.gradle.precommit +import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin import org.elasticsearch.gradle.LoggedExec import org.gradle.api.file.FileCollection +import org.gradle.api.tasks.Classpath import org.gradle.api.tasks.OutputFile - /** * Runs CheckJarHell on a classpath. */ @@ -34,11 +35,18 @@ public class JarHellTask extends LoggedExec { * inputs (ie the jars/class files). */ @OutputFile - File successMarker = new File(project.buildDir, 'markers/jarHell') + File successMarker + + @Classpath + FileCollection classpath public JarHellTask() { + successMarker = new File(project.buildDir, 'markers/jarHell-' + getName()) project.afterEvaluate { FileCollection classpath = project.sourceSets.test.runtimeClasspath + if (project.plugins.hasPlugin(ShadowPlugin)) { + classpath += project.configurations.bundle + } inputs.files(classpath) dependsOn(classpath) description = "Runs CheckJarHell on ${classpath}" diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 42dc29df058..06557d4ccfd 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -18,18 +18,12 @@ */ package org.elasticsearch.gradle.precommit -import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis -import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin import org.elasticsearch.gradle.ExportElasticsearchBuildResourcesTask -import org.gradle.api.JavaVersion import org.gradle.api.Project import org.gradle.api.Task -import org.gradle.api.file.FileCollection +import org.gradle.api.artifacts.Configuration import org.gradle.api.plugins.JavaBasePlugin import org.gradle.api.plugins.quality.Checkstyle -import org.gradle.api.tasks.JavaExec -import org.gradle.api.tasks.StopExecutionException - /** * Validation tasks which should be run before committing. These run before tests. */ @@ -37,20 +31,22 @@ class PrecommitTasks { /** Adds a precommit task, which depends on non-test verification tasks. */ public static Task create(Project project, boolean includeDependencyLicenses) { + project.configurations.create("forbiddenApisCliJar") + project.dependencies { + forbiddenApisCliJar ('de.thetaphi:forbiddenapis:2.5') + } + List precommitTasks = [ - configureForbiddenApis(project), configureCheckstyle(project), + configureForbiddenApisCli(project), configureNamingConventions(project), project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class), project.tasks.create('licenseHeaders', LicenseHeadersTask.class), project.tasks.create('filepermissions', FilePermissionsTask.class), - project.tasks.create('jarHell', JarHellTask.class), - project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class) + configureJarHell(project), + configureThirdPartyAudit(project) ] - // Configure it but don't add it as a dependency yet - configureForbiddenApisCli(project) - // tasks with just tests don't need dependency licenses, so this flag makes adding // the task optional if (includeDependencyLicenses) { @@ -84,77 +80,61 @@ class PrecommitTasks { return project.tasks.create(precommitOptions) } - private static Task configureForbiddenApis(Project project) { - project.pluginManager.apply(ForbiddenApisPlugin.class) - project.forbiddenApis { - failOnUnsupportedJava = false - bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-non-portable', 'jdk-system-out'] - signaturesURLs = [getClass().getResource('/forbidden/jdk-signatures.txt'), - getClass().getResource('/forbidden/es-all-signatures.txt')] - suppressAnnotations = ['**.SuppressForbidden'] - } - project.tasks.withType(CheckForbiddenApis) { - // we do not use the += operator to add signatures, as conventionMappings of Gradle do not work when it's configured using withType: - if (name.endsWith('Test')) { - signaturesURLs = project.forbiddenApis.signaturesURLs + - [ getClass().getResource('/forbidden/es-test-signatures.txt'), getClass().getResource('/forbidden/http-signatures.txt') ] - } else { - signaturesURLs = project.forbiddenApis.signaturesURLs + - [ getClass().getResource('/forbidden/es-server-signatures.txt') ] - } - // forbidden apis doesn't support Java 11, so stop at 10 - String targetMajorVersion = (project.compilerJavaVersion.compareTo(JavaVersion.VERSION_1_10) > 0 ? - JavaVersion.VERSION_1_10 : - project.compilerJavaVersion).getMajorVersion() - targetCompatibility = Integer.parseInt(targetMajorVersion) >= 9 ?targetMajorVersion : "1.${targetMajorVersion}" - } - Task forbiddenApis = project.tasks.findByName('forbiddenApis') - forbiddenApis.group = "" // clear group, so this does not show up under verification tasks + private static Task configureJarHell(Project project) { + Task task = project.tasks.create('jarHell', JarHellTask.class) + task.classpath = project.sourceSets.test.runtimeClasspath + return task + } - return forbiddenApis + private static Task configureThirdPartyAudit(Project project) { + ThirdPartyAuditTask thirdPartyAuditTask = project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class) + ExportElasticsearchBuildResourcesTask buildResources = project.tasks.getByName('buildResources') + thirdPartyAuditTask.configure { + dependsOn(buildResources) + signatureFile = buildResources.copy("forbidden/third-party-audit.txt") + javaHome = project.runtimeJavaHome + targetCompatibility = project.runtimeJavaVersion + } + return thirdPartyAuditTask } private static Task configureForbiddenApisCli(Project project) { - project.configurations.create("forbiddenApisCliJar") - project.dependencies { - forbiddenApisCliJar 'de.thetaphi:forbiddenapis:2.5' - } - Task forbiddenApisCli = project.tasks.create('forbiddenApisCli') - - project.sourceSets.forEach { sourceSet -> + Task forbiddenApisCli = project.tasks.create('forbiddenApis') + project.sourceSets.all { sourceSet -> forbiddenApisCli.dependsOn( - project.tasks.create(sourceSet.getTaskName('forbiddenApisCli', null), JavaExec) { + project.tasks.create(sourceSet.getTaskName('forbiddenApis', null), ForbiddenApisCliTask) { ExportElasticsearchBuildResourcesTask buildResources = project.tasks.getByName('buildResources') dependsOn(buildResources) - classpath = project.files( - project.configurations.forbiddenApisCliJar, - sourceSet.compileClasspath, - sourceSet.runtimeClasspath + it.sourceSet = sourceSet + javaHome = project.runtimeJavaHome + targetCompatibility = project.compilerJavaVersion + bundledSignatures = [ + "jdk-unsafe", "jdk-deprecated", "jdk-non-portable", "jdk-system-out" + ] + signaturesFiles = project.files( + buildResources.copy("forbidden/jdk-signatures.txt"), + buildResources.copy("forbidden/es-all-signatures.txt") ) - main = 'de.thetaphi.forbiddenapis.cli.CliMain' - executable = "${project.runtimeJavaHome}/bin/java" - args "-b", 'jdk-unsafe-1.8' - args "-b", 'jdk-deprecated-1.8' - args "-b", 'jdk-non-portable' - args "-b", 'jdk-system-out' - args "-f", buildResources.copy("forbidden/jdk-signatures.txt") - args "-f", buildResources.copy("forbidden/es-all-signatures.txt") - args "--suppressannotation", '**.SuppressForbidden' + suppressAnnotations = ['**.SuppressForbidden'] if (sourceSet.name == 'test') { - args "-f", buildResources.copy("forbidden/es-test-signatures.txt") - args "-f", buildResources.copy("forbidden/http-signatures.txt") + signaturesFiles += project.files( + buildResources.copy("forbidden/es-test-signatures.txt"), + buildResources.copy("forbidden/http-signatures.txt") + ) } else { - args "-f", buildResources.copy("forbidden/es-server-signatures.txt") + signaturesFiles += project.files(buildResources.copy("forbidden/es-server-signatures.txt")) } dependsOn sourceSet.classesTaskName - doFirst { - // Forbidden APIs expects only existing dirs, and requires at least one - FileCollection existingOutputs = sourceSet.output.classesDirs - .filter { it.exists() } - if (existingOutputs.isEmpty()) { - throw new StopExecutionException("${sourceSet.name} has no outputs") - } - existingOutputs.forEach { args "-d", it } + classesDirs = sourceSet.output.classesDirs + ext.replaceSignatureFiles = { String... names -> + signaturesFiles = project.files( + names.collect { buildResources.copy("forbidden/${it}.txt") } + ) + } + ext.addSignatureFiles = { String... names -> + signaturesFiles += project.files( + names.collect { buildResources.copy("forbidden/${it}.txt") } + ) } } ) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy deleted file mode 100644 index d6babbbfbb8..00000000000 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.gradle.precommit; - -import org.apache.tools.ant.BuildEvent; -import org.apache.tools.ant.BuildException; -import org.apache.tools.ant.BuildListener; -import org.apache.tools.ant.BuildLogger; -import org.apache.tools.ant.DefaultLogger; -import org.apache.tools.ant.Project; -import org.elasticsearch.gradle.AntTask; -import org.gradle.api.artifacts.Configuration; -import org.gradle.api.file.FileCollection; -import org.gradle.api.tasks.Input -import org.gradle.api.tasks.InputFiles -import org.gradle.api.tasks.OutputFile - -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.SimpleFileVisitor; -import java.nio.file.attribute.BasicFileAttributes; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -/** - * Basic static checking to keep tabs on third party JARs - */ -public class ThirdPartyAuditTask extends AntTask { - - // patterns for classes to exclude, because we understand their issues - private List excludes = []; - - /** - * Input for the task. Set javadoc for {#link getJars} for more. Protected - * so the afterEvaluate closure in the constructor can write it. - */ - protected FileCollection jars; - - /** - * Classpath against which to run the third patty audit. Protected so the - * afterEvaluate closure in the constructor can write it. - */ - protected FileCollection classpath; - - /** - * We use a simple "marker" file that we touch when the task succeeds - * as the task output. This is compared against the modified time of the - * inputs (ie the jars/class files). - */ - @OutputFile - File successMarker = new File(project.buildDir, 'markers/thirdPartyAudit') - - ThirdPartyAuditTask() { - // we depend on this because its the only reliable configuration - // this probably makes the build slower: gradle you suck here when it comes to configurations, you pay the price. - dependsOn(project.configurations.testCompile); - description = "Checks third party JAR bytecode for missing classes, use of internal APIs, and other horrors'"; - - project.afterEvaluate { - Configuration configuration = project.configurations.findByName('runtime') - Configuration compileOnly = project.configurations.findByName('compileOnly') - if (configuration == null) { - // some projects apparently do not have 'runtime'? what a nice inconsistency, - // basically only serves to waste time in build logic! - configuration = project.configurations.findByName('testCompile') - } - assert configuration != null - if (compileOnly == null) { - classpath = configuration - } else { - classpath = project.files(configuration, compileOnly) - } - - // we only want third party dependencies. - jars = configuration.fileCollection({ dependency -> - dependency.group.startsWith("org.elasticsearch") == false - }); - - // we don't want provided dependencies, which we have already scanned. e.g. don't - // scan ES core's dependencies for every single plugin - if (compileOnly != null) { - jars -= compileOnly - } - inputs.files(jars) - onlyIf { jars.isEmpty() == false } - } - } - - /** - * classes that should be excluded from the scan, - * e.g. because we know what sheisty stuff those particular classes are up to. - */ - public void setExcludes(String[] classes) { - for (String s : classes) { - if (s.indexOf('*') != -1) { - throw new IllegalArgumentException("illegal third party audit exclusion: '" + s + "', wildcards are not permitted!"); - } - } - excludes = classes.sort(); - } - - /** - * Returns current list of exclusions. - */ - @Input - public List getExcludes() { - return excludes; - } - - // yes, we parse Uwe Schindler's errors to find missing classes, and to keep a continuous audit. Just don't let him know! - static final Pattern MISSING_CLASS_PATTERN = - Pattern.compile(/WARNING: The referenced class '(.*)' cannot be loaded\. Please fix the classpath\!/); - - static final Pattern VIOLATION_PATTERN = - Pattern.compile(/\s\sin ([a-zA-Z0-9\$\.]+) \(.*\)/); - - // we log everything and capture errors and handle them with our whitelist - // this is important, as we detect stale whitelist entries, workaround forbidden apis bugs, - // and it also allows whitelisting missing classes! - static class EvilLogger extends DefaultLogger { - final Set missingClasses = new TreeSet<>(); - final Map> violations = new TreeMap<>(); - String previousLine = null; - - @Override - public void messageLogged(BuildEvent event) { - if (event.getTask().getClass() == de.thetaphi.forbiddenapis.ant.AntTask.class) { - if (event.getPriority() == Project.MSG_WARN) { - Matcher m = MISSING_CLASS_PATTERN.matcher(event.getMessage()); - if (m.matches()) { - missingClasses.add(m.group(1).replace('.', '/') + ".class"); - } - - // Reset the priority of the event to DEBUG, so it doesn't - // pollute the build output - event.setMessage(event.getMessage(), Project.MSG_DEBUG); - } else if (event.getPriority() == Project.MSG_ERR) { - Matcher m = VIOLATION_PATTERN.matcher(event.getMessage()); - if (m.matches()) { - String violation = previousLine + '\n' + event.getMessage(); - String clazz = m.group(1).replace('.', '/') + ".class"; - List current = violations.get(clazz); - if (current == null) { - current = new ArrayList<>(); - violations.put(clazz, current); - } - current.add(violation); - } - previousLine = event.getMessage(); - } - } - super.messageLogged(event); - } - } - - @Override - protected BuildLogger makeLogger(PrintStream stream, int outputLevel) { - DefaultLogger log = new EvilLogger(); - log.errorPrintStream = stream; - log.outputPrintStream = stream; - log.messageOutputLevel = outputLevel; - return log; - } - - @Override - protected void runAnt(AntBuilder ant) { - ant.project.addTaskDefinition('thirdPartyAudit', de.thetaphi.forbiddenapis.ant.AntTask); - - // print which jars we are going to scan, always - // this is not the time to try to be succinct! Forbidden will print plenty on its own! - Set names = new TreeSet<>(); - for (File jar : jars) { - names.add(jar.getName()); - } - - // TODO: forbidden-apis + zipfileset gives O(n^2) behavior unless we dump to a tmpdir first, - // and then remove our temp dir afterwards. don't complain: try it yourself. - // we don't use gradle temp dir handling, just google it, or try it yourself. - - File tmpDir = new File(project.buildDir, 'tmp/thirdPartyAudit'); - - // clean up any previous mess (if we failed), then unzip everything to one directory - ant.delete(dir: tmpDir.getAbsolutePath()); - tmpDir.mkdirs(); - for (File jar : jars) { - ant.unzip(src: jar.getAbsolutePath(), dest: tmpDir.getAbsolutePath()); - } - - // convert exclusion class names to binary file names - List excludedFiles = excludes.collect {it.replace('.', '/') + ".class"} - Set excludedSet = new TreeSet<>(excludedFiles); - - // jarHellReprise - Set sheistySet = getSheistyClasses(tmpDir.toPath()); - - try { - ant.thirdPartyAudit(failOnUnsupportedJava: false, - failOnMissingClasses: false, - classpath: classpath.asPath) { - fileset(dir: tmpDir) - signatures { - string(value: getClass().getResourceAsStream('/forbidden/third-party-audit.txt').getText('UTF-8')) - } - } - } catch (BuildException ignore) {} - - EvilLogger evilLogger = null; - for (BuildListener listener : ant.project.getBuildListeners()) { - if (listener instanceof EvilLogger) { - evilLogger = (EvilLogger) listener; - break; - } - } - assert evilLogger != null; - - // keep our whitelist up to date - Set bogusExclusions = new TreeSet<>(excludedSet); - bogusExclusions.removeAll(sheistySet); - bogusExclusions.removeAll(evilLogger.missingClasses); - bogusExclusions.removeAll(evilLogger.violations.keySet()); - if (!bogusExclusions.isEmpty()) { - throw new IllegalStateException("Invalid exclusions, nothing is wrong with these classes: " + bogusExclusions); - } - - // don't duplicate classes with the JDK - sheistySet.removeAll(excludedSet); - if (!sheistySet.isEmpty()) { - throw new IllegalStateException("JAR HELL WITH JDK! " + sheistySet); - } - - // don't allow a broken classpath - evilLogger.missingClasses.removeAll(excludedSet); - if (!evilLogger.missingClasses.isEmpty()) { - throw new IllegalStateException("CLASSES ARE MISSING! " + evilLogger.missingClasses); - } - - // don't use internal classes - evilLogger.violations.keySet().removeAll(excludedSet); - if (!evilLogger.violations.isEmpty()) { - throw new IllegalStateException("VIOLATIONS WERE FOUND! " + evilLogger.violations); - } - - // clean up our mess (if we succeed) - ant.delete(dir: tmpDir.getAbsolutePath()); - - successMarker.setText("", 'UTF-8') - } - - /** - * check for sheisty classes: if they also exist in the extensions classloader, its jar hell with the jdk! - */ - private Set getSheistyClasses(Path root) { - // system.parent = extensions loader. - // note: for jigsaw, this evilness will need modifications (e.g. use jrt filesystem!). - // but groovy/gradle needs to work at all first! - ClassLoader ext = ClassLoader.getSystemClassLoader().getParent(); - assert ext != null; - - Set sheistySet = new TreeSet<>(); - Files.walkFileTree(root, new SimpleFileVisitor() { - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - String entry = root.relativize(file).toString().replace('\\', '/'); - if (entry.endsWith(".class")) { - if (ext.getResource(entry) != null) { - sheistySet.add(entry); - } - } - return FileVisitResult.CONTINUE; - } - }); - return sheistySet; - } -} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 4ede349b206..ecf3e342040 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -337,7 +337,13 @@ class ClusterFormationTasks { if (node.nodeVersion.major >= 7) { esConfig['indices.breaker.total.use_real_memory'] = false } - esConfig.putAll(node.config.settings) + for (Map.Entry setting : node.config.settings) { + if (setting.value == null) { + esConfig.remove(setting.key) + } else { + esConfig.put(setting.key, setting.value) + } + } Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup) writeConfig.doFirst { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy index a2484e9c5fc..a5d3b41339d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy @@ -53,6 +53,8 @@ public class StandaloneRestTestPlugin implements Plugin { // only setup tests to build project.sourceSets.create('test') + // create a compileOnly configuration as others might expect it + project.configurations.create("compileOnly") project.dependencies.add('testCompile', "org.elasticsearch.test:framework:${VersionProperties.elasticsearch}") project.eclipse.classpath.sourceSets = [project.sourceSets.test] diff --git a/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java b/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java new file mode 100644 index 00000000000..6d256ba0449 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch; + +import org.gradle.api.Action; +import org.gradle.api.Project; +import org.gradle.api.file.CopySpec; +import org.gradle.api.file.FileTree; +import org.gradle.api.tasks.WorkResult; +import org.gradle.process.ExecResult; +import org.gradle.process.JavaExecSpec; + +import java.io.File; + +/** + * Facilitate access to Gradle services without a direct dependency on Project. + * + * In a future release Gradle will offer service injection, this adapter plays that role until that time. + * It exposes the service methods that are part of the public API as the classes implementing them are not. + * Today service injection is not available for + * extensions. + * + * Everything exposed here must be thread safe. That is the very reason why project is not passed in directly. + */ +public class GradleServicesAdapter { + + public final Project project; + + public GradleServicesAdapter(Project project) { + this.project = project; + } + + public static GradleServicesAdapter getInstance(Project project) { + return new GradleServicesAdapter(project); + } + + public WorkResult copy(Action action) { + return project.copy(action); + } + + public WorkResult sync(Action action) { + return project.sync(action); + } + + public ExecResult javaexec(Action action) { + return project.javaexec(action); + } + + public FileTree zipTree(File zipPath) { + return project.zipTree(zipPath); + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java b/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java new file mode 100644 index 00000000000..c926e70b3f7 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle; + +public enum Distribution { + + INTEG_TEST("integ-test-zip"), + ZIP("zip"), + ZIP_OSS("zip-oss"); + + private final String name; + + Distribution(String name) { + this.name = name; + } + + public String getName() { + return name; + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTask.java index 03c18f54e67..4af104093a5 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTask.java @@ -35,6 +35,7 @@ import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.StandardCopyOption; import java.util.Collections; import java.util.HashSet; import java.util.Set; @@ -105,7 +106,7 @@ public class ExportElasticsearchBuildResourcesTask extends DefaultTask { if (is == null) { throw new GradleException("Can't export `" + resourcePath + "` from build-tools: not found"); } - Files.copy(is, destination); + Files.copy(is, destination, StandardCopyOption.REPLACE_EXISTING); } catch (IOException e) { throw new GradleException("Can't write resource `" + resourcePath + "` to " + destination, e); } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/JdkJarHellCheck.java b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkJarHellCheck.java new file mode 100644 index 00000000000..60de1981f98 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkJarHellCheck.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle; + +import java.io.IOException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +public class JdkJarHellCheck { + + private Set detected = new HashSet<>(); + + private void scanForJDKJarHell(Path root) throws IOException { + // system.parent = extensions loader. + // note: for jigsaw, this evilness will need modifications (e.g. use jrt filesystem!) + ClassLoader ext = ClassLoader.getSystemClassLoader().getParent(); + assert ext != null; + + Files.walkFileTree(root, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) { + String entry = root.relativize(file).toString().replace('\\', '/'); + if (entry.endsWith(".class")) { + if (ext.getResource(entry) != null) { + detected.add( + entry + .replace("/", ".") + .replace(".class","") + ); + } + } + return FileVisitResult.CONTINUE; + } + }); + } + + public Set getDetected() { + return Collections.unmodifiableSet(detected); + } + + public static void main(String[] argv) throws IOException { + JdkJarHellCheck checker = new JdkJarHellCheck(); + for (String location : argv) { + Path path = Paths.get(location); + if (Files.exists(path) == false) { + throw new IllegalArgumentException("Path does not exist: " + path); + } + checker.scanForJDKJarHell(path); + } + if (checker.getDetected().isEmpty()) { + System.exit(0); + } else { + checker.getDetected().forEach(System.out::println); + System.exit(1); + } + } + +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ClusterformationPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ClusterformationPlugin.java new file mode 100644 index 00000000000..779e7b61ed9 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ClusterformationPlugin.java @@ -0,0 +1,110 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.clusterformation; + +import groovy.lang.Closure; +import org.elasticsearch.GradleServicesAdapter; +import org.gradle.api.NamedDomainObjectContainer; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.Task; +import org.gradle.api.execution.TaskActionListener; +import org.gradle.api.execution.TaskExecutionListener; +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; +import org.gradle.api.plugins.ExtraPropertiesExtension; +import org.gradle.api.tasks.TaskState; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class ClusterformationPlugin implements Plugin { + + public static final String LIST_TASK_NAME = "listElasticSearchClusters"; + public static final String EXTENSION_NAME = "elasticSearchClusters"; + + private final Logger logger = Logging.getLogger(ClusterformationPlugin.class); + + @Override + public void apply(Project project) { + NamedDomainObjectContainer container = project.container( + ElasticsearchNode.class, + (name) -> new ElasticsearchNode(name, GradleServicesAdapter.getInstance(project)) + ); + project.getExtensions().add(EXTENSION_NAME, container); + + Task listTask = project.getTasks().create(LIST_TASK_NAME); + listTask.setGroup("ES cluster formation"); + listTask.setDescription("Lists all ES clusters configured for this project"); + listTask.doLast((Task task) -> + container.forEach((ElasticsearchConfiguration cluster) -> + logger.lifecycle(" * {}: {}", cluster.getName(), cluster.getDistribution()) + ) + ); + + Map> taskToCluster = new HashMap<>(); + + // register an extension for all current and future tasks, so that any task can declare that it wants to use a + // specific cluster. + project.getTasks().all((Task task) -> + task.getExtensions().findByType(ExtraPropertiesExtension.class) + .set( + "useCluster", + new Closure(this, this) { + public void doCall(ElasticsearchConfiguration conf) { + taskToCluster.computeIfAbsent(task, k -> new ArrayList<>()).add(conf); + } + }) + ); + + project.getGradle().getTaskGraph().whenReady(taskExecutionGraph -> + taskExecutionGraph.getAllTasks() + .forEach(task -> + taskToCluster.getOrDefault(task, Collections.emptyList()).forEach(ElasticsearchConfiguration::claim) + ) + ); + project.getGradle().addListener( + new TaskActionListener() { + @Override + public void beforeActions(Task task) { + // we only start the cluster before the actions, so we'll not start it if the task is up-to-date + taskToCluster.getOrDefault(task, new ArrayList<>()).forEach(ElasticsearchConfiguration::start); + } + @Override + public void afterActions(Task task) {} + } + ); + project.getGradle().addListener( + new TaskExecutionListener() { + @Override + public void afterExecute(Task task, TaskState state) { + // always un-claim the cluster, even if _this_ task is up-to-date, as others might not have been and caused the + // cluster to start. + taskToCluster.getOrDefault(task, new ArrayList<>()).forEach(ElasticsearchConfiguration::unClaimAndStop); + } + @Override + public void beforeExecute(Task task) {} + } + ); + } + +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchConfiguration.java b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchConfiguration.java new file mode 100644 index 00000000000..913d88e9fa1 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchConfiguration.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.clusterformation; + +import org.elasticsearch.gradle.Distribution; +import org.elasticsearch.gradle.Version; + +import java.util.concurrent.Future; + +public interface ElasticsearchConfiguration { + String getName(); + + Version getVersion(); + + void setVersion(Version version); + + default void setVersion(String version) { + setVersion(Version.fromString(version)); + } + + Distribution getDistribution(); + + void setDistribution(Distribution distribution); + + void claim(); + + Future start(); + + void unClaimAndStop(); +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchNode.java new file mode 100644 index 00000000000..8b78fc2b627 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchNode.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.clusterformation; + +import org.elasticsearch.GradleServicesAdapter; +import org.elasticsearch.gradle.Distribution; +import org.elasticsearch.gradle.Version; +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; + +import java.util.Objects; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +public class ElasticsearchNode implements ElasticsearchConfiguration { + + private final String name; + private final GradleServicesAdapter services; + private final AtomicInteger noOfClaims = new AtomicInteger(); + private final AtomicBoolean started = new AtomicBoolean(false); + private final Logger logger = Logging.getLogger(ElasticsearchNode.class); + + private Distribution distribution; + private Version version; + + public ElasticsearchNode(String name, GradleServicesAdapter services) { + this.name = name; + this.services = services; + } + + @Override + public String getName() { + return name; + } + + @Override + public Version getVersion() { + return version; + } + + @Override + public void setVersion(Version version) { + checkNotRunning(); + this.version = version; + } + + @Override + public Distribution getDistribution() { + return distribution; + } + + @Override + public void setDistribution(Distribution distribution) { + checkNotRunning(); + this.distribution = distribution; + } + + @Override + public void claim() { + noOfClaims.incrementAndGet(); + } + + /** + * Start the cluster if not running. Does nothing if the cluster is already running. + * + * @return future of thread running in the background + */ + @Override + public Future start() { + if (started.getAndSet(true)) { + logger.lifecycle("Already started cluster: {}", name); + } else { + logger.lifecycle("Starting cluster: {}", name); + } + return null; + } + + /** + * Stops a running cluster if it's not claimed. Does nothing otherwise. + */ + @Override + public void unClaimAndStop() { + int decrementedClaims = noOfClaims.decrementAndGet(); + if (decrementedClaims > 0) { + logger.lifecycle("Not stopping {}, since cluster still has {} claim(s)", name, decrementedClaims); + return; + } + if (started.get() == false) { + logger.lifecycle("Asked to unClaimAndStop, but cluster was not running: {}", name); + return; + } + logger.lifecycle("Stopping {}, number of claims is {}", name, decrementedClaims); + } + + private void checkNotRunning() { + if (started.get()) { + throw new IllegalStateException("Configuration can not be altered while running "); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ElasticsearchNode that = (ElasticsearchNode) o; + return Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenApisCliTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenApisCliTask.java new file mode 100644 index 00000000000..aaa9564b0dc --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenApisCliTask.java @@ -0,0 +1,190 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.precommit; + +import org.gradle.api.DefaultTask; +import org.gradle.api.JavaVersion; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.file.FileCollection; +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; +import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.OutputFile; +import org.gradle.api.tasks.SkipWhenEmpty; +import org.gradle.api.tasks.SourceSet; +import org.gradle.api.tasks.TaskAction; +import org.gradle.process.JavaExecSpec; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; + +public class ForbiddenApisCliTask extends DefaultTask { + + private final Logger logger = Logging.getLogger(ForbiddenApisCliTask.class); + private FileCollection signaturesFiles; + private List signatures = new ArrayList<>(); + private Set bundledSignatures = new LinkedHashSet<>(); + private Set suppressAnnotations = new LinkedHashSet<>(); + private JavaVersion targetCompatibility; + private FileCollection classesDirs; + private SourceSet sourceSet; + // This needs to be an object so it can hold Groovy GStrings + private Object javaHome; + + @Input + public JavaVersion getTargetCompatibility() { + return targetCompatibility; + } + + public void setTargetCompatibility(JavaVersion targetCompatibility) { + if (targetCompatibility.compareTo(JavaVersion.VERSION_1_10) > 0) { + logger.warn( + "Target compatibility is set to {} but forbiddenapis only supports up to 10. Will cap at 10.", + targetCompatibility + ); + this.targetCompatibility = JavaVersion.VERSION_1_10; + } else { + this.targetCompatibility = targetCompatibility; + } + } + + @OutputFile + public File getMarkerFile() { + return new File( + new File(getProject().getBuildDir(), "precommit"), + getName() + ); + } + + @InputFiles + @SkipWhenEmpty + public FileCollection getClassesDirs() { + return classesDirs.filter(File::exists); + } + + public void setClassesDirs(FileCollection classesDirs) { + this.classesDirs = classesDirs; + } + + @InputFiles + public FileCollection getSignaturesFiles() { + return signaturesFiles; + } + + public void setSignaturesFiles(FileCollection signaturesFiles) { + this.signaturesFiles = signaturesFiles; + } + + @Input + public List getSignatures() { + return signatures; + } + + public void setSignatures(List signatures) { + this.signatures = signatures; + } + + @Input + public Set getBundledSignatures() { + return bundledSignatures; + } + + public void setBundledSignatures(Set bundledSignatures) { + this.bundledSignatures = bundledSignatures; + } + + @Input + public Set getSuppressAnnotations() { + return suppressAnnotations; + } + + public void setSuppressAnnotations(Set suppressAnnotations) { + this.suppressAnnotations = suppressAnnotations; + } + + @InputFiles + public FileCollection getClassPathFromSourceSet() { + return getProject().files( + sourceSet.getCompileClasspath(), + sourceSet.getRuntimeClasspath() + ); + } + + public void setSourceSet(SourceSet sourceSet) { + this.sourceSet = sourceSet; + } + + @InputFiles + public Configuration getForbiddenAPIsConfiguration() { + return getProject().getConfigurations().getByName("forbiddenApisCliJar"); + } + + @Input + public Object getJavaHome() { + return javaHome; + } + + public void setJavaHome(Object javaHome) { + this.javaHome = javaHome; + } + + @TaskAction + public void runForbiddenApisAndWriteMarker() throws IOException { + getProject().javaexec((JavaExecSpec spec) -> { + spec.classpath( + getForbiddenAPIsConfiguration(), + getClassPathFromSourceSet() + ); + spec.setExecutable(getJavaHome() + "/bin/java"); + spec.setMain("de.thetaphi.forbiddenapis.cli.CliMain"); + // build the command line + getSignaturesFiles().forEach(file -> spec.args("-f", file.getAbsolutePath())); + getSuppressAnnotations().forEach(annotation -> spec.args("--suppressannotation", annotation)); + getBundledSignatures().forEach(bundled -> { + // there's no option for target compatibility so we have to interpret it + final String prefix; + if (bundled.equals("jdk-system-out") || + bundled.equals("jdk-reflection") || + bundled.equals("jdk-non-portable")) { + prefix = ""; + } else { + prefix = "-" + ( + getTargetCompatibility().compareTo(JavaVersion.VERSION_1_9) >= 0 ? + getTargetCompatibility().getMajorVersion() : + "1." + getTargetCompatibility().getMajorVersion()) + ; + } + spec.args("-b", bundled + prefix); + } + ); + getClassesDirs().forEach(dir -> + spec.args("-d", dir) + ); + }); + Files.write(getMarkerFile().toPath(), Collections.emptyList()); + } + +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java new file mode 100644 index 00000000000..7e4766ada65 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java @@ -0,0 +1,309 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.precommit; + +import org.apache.commons.io.output.NullOutputStream; +import org.elasticsearch.gradle.JdkJarHellCheck; +import org.elasticsearch.test.NamingConventionsCheck; +import org.gradle.api.DefaultTask; +import org.gradle.api.GradleException; +import org.gradle.api.JavaVersion; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.file.FileCollection; +import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.InputFile; +import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.OutputDirectory; +import org.gradle.api.tasks.StopExecutionException; +import org.gradle.api.tasks.TaskAction; +import org.gradle.process.ExecResult; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Collections; +import java.util.Set; +import java.util.TreeSet; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +public class ThirdPartyAuditTask extends DefaultTask { + + private static final Pattern MISSING_CLASS_PATTERN = Pattern.compile( + "WARNING: The referenced class '(.*)' cannot be loaded\\. Please fix the classpath!" + ); + + private static final Pattern VIOLATION_PATTERN = Pattern.compile( + "\\s\\sin ([a-zA-Z0-9$.]+) \\(.*\\)" + ); + + /** + * patterns for classes to exclude, because we understand their issues + */ + private Set excludes = new TreeSet<>(); + + private File signatureFile; + + private String javaHome; + + private JavaVersion targetCompatibility; + + @Input + public JavaVersion getTargetCompatibility() { + return targetCompatibility; + } + + public void setTargetCompatibility(JavaVersion targetCompatibility) { + this.targetCompatibility = targetCompatibility; + } + + @InputFiles + public Configuration getForbiddenAPIsConfiguration() { + return getProject().getConfigurations().getByName("forbiddenApisCliJar"); + } + + @InputFile + public File getSignatureFile() { + return signatureFile; + } + + public void setSignatureFile(File signatureFile) { + this.signatureFile = signatureFile; + } + + @InputFiles + public Configuration getRuntimeConfiguration() { + Configuration runtime = getProject().getConfigurations().findByName("runtime"); + if (runtime == null) { + return getProject().getConfigurations().getByName("testCompile"); + } + return runtime; + } + + @Input + public String getJavaHome() { + return javaHome; + } + + public void setJavaHome(String javaHome) { + this.javaHome = javaHome; + } + + @InputFiles + public Configuration getCompileOnlyConfiguration() { + return getProject().getConfigurations().getByName("compileOnly"); + } + + @OutputDirectory + public File getJarExpandDir() { + return new File( + new File(getProject().getBuildDir(), "precommit/thirdPartyAudit"), + getName() + ); + } + + public void setExcludes(String... classes) { + excludes.clear(); + for (String each : classes) { + if (each.indexOf('*') != -1) { + throw new IllegalArgumentException("illegal third party audit exclusion: '" + each + "', wildcards are not permitted!"); + } + excludes.add(each); + } + } + + @Input + public Set getExcludes() { + return Collections.unmodifiableSet(excludes); + } + + @TaskAction + public void runThirdPartyAudit() throws IOException { + FileCollection jars = getJarsToScan(); + + extractJars(jars); + + final String forbiddenApisOutput = runForbiddenAPIsCli(); + + final Set missingClasses = new TreeSet<>(); + Matcher missingMatcher = MISSING_CLASS_PATTERN.matcher(forbiddenApisOutput); + while (missingMatcher.find()) { + missingClasses.add(missingMatcher.group(1)); + } + + final Set violationsClasses = new TreeSet<>(); + Matcher violationMatcher = VIOLATION_PATTERN.matcher(forbiddenApisOutput); + while (violationMatcher.find()) { + violationsClasses.add(violationMatcher.group(1)); + } + + Set jdkJarHellClasses = runJdkJarHellCheck(); + + assertNoPointlessExclusions(missingClasses, violationsClasses, jdkJarHellClasses); + + assertNoMissingAndViolations(missingClasses, violationsClasses); + + assertNoJarHell(jdkJarHellClasses); + } + + private void extractJars(FileCollection jars) { + File jarExpandDir = getJarExpandDir(); + // We need to clean up to make sure old dependencies don't linger + getProject().delete(jarExpandDir); + jars.forEach(jar -> + getProject().copy(spec -> { + spec.from(getProject().zipTree(jar)); + spec.into(jarExpandDir); + // Exclude classes for multi release jars above target + for (int i = Integer.parseInt(targetCompatibility.getMajorVersion()) + 1; + i <= Integer.parseInt(JavaVersion.VERSION_HIGHER.getMajorVersion()); + i++ + ) { + spec.exclude("META-INF/versions/" + i + "/**"); + } + }) + ); + } + + private void assertNoJarHell(Set jdkJarHellClasses) { + jdkJarHellClasses.removeAll(excludes); + if (jdkJarHellClasses.isEmpty() == false) { + throw new IllegalStateException("Jar Hell with the JDK:" + formatClassList(jdkJarHellClasses)); + } + } + + private void assertNoMissingAndViolations(Set missingClasses, Set violationsClasses) { + missingClasses.removeAll(excludes); + violationsClasses.removeAll(excludes); + String missingText = formatClassList(missingClasses); + String violationsText = formatClassList(violationsClasses); + if (missingText.isEmpty() && violationsText.isEmpty()) { + getLogger().info("Third party audit passed successfully"); + } else { + throw new IllegalStateException( + "Audit of third party dependencies failed:\n" + + (missingText.isEmpty() ? "" : "Missing classes:\n" + missingText) + + (violationsText.isEmpty() ? "" : "Classes with violations:\n" + violationsText) + ); + } + } + + private void assertNoPointlessExclusions(Set missingClasses, Set violationsClasses, Set jdkJarHellClasses) { + // keep our whitelist up to date + Set bogusExclusions = new TreeSet<>(excludes); + bogusExclusions.removeAll(missingClasses); + bogusExclusions.removeAll(jdkJarHellClasses); + bogusExclusions.removeAll(violationsClasses); + if (bogusExclusions.isEmpty() == false) { + throw new IllegalStateException( + "Invalid exclusions, nothing is wrong with these classes: " + formatClassList(bogusExclusions) + ); + } + } + + private String runForbiddenAPIsCli() throws IOException { + ByteArrayOutputStream errorOut = new ByteArrayOutputStream(); + getProject().javaexec(spec -> { + spec.setExecutable(javaHome + "/bin/java"); + spec.classpath( + getForbiddenAPIsConfiguration(), + getRuntimeConfiguration(), + getCompileOnlyConfiguration() + ); + spec.setMain("de.thetaphi.forbiddenapis.cli.CliMain"); + spec.args( + "-f", getSignatureFile().getAbsolutePath(), + "-d", getJarExpandDir(), + "--allowmissingclasses" + ); + spec.setErrorOutput(errorOut); + if (getLogger().isInfoEnabled() == false) { + spec.setStandardOutput(new NullOutputStream()); + } + spec.setIgnoreExitValue(true); + }); + final String forbiddenApisOutput; + try (ByteArrayOutputStream outputStream = errorOut) { + forbiddenApisOutput = outputStream.toString(StandardCharsets.UTF_8.name()); + } + if (getLogger().isInfoEnabled()) { + getLogger().info(forbiddenApisOutput); + } + return forbiddenApisOutput; + } + + private FileCollection getJarsToScan() { + FileCollection jars = getRuntimeConfiguration() + .fileCollection(dep -> dep.getGroup().startsWith("org.elasticsearch") == false); + Configuration compileOnlyConfiguration = getCompileOnlyConfiguration(); + // don't scan provided dependencies that we already scanned, e.x. don't scan cores dependencies for every plugin + if (compileOnlyConfiguration != null) { + jars.minus(compileOnlyConfiguration); + } + if (jars.isEmpty()) { + throw new StopExecutionException("No jars to scan"); + } + return jars; + } + + private String formatClassList(Set classList) { + return classList.stream() + .map(name -> " * " + name) + .collect(Collectors.joining("\n")); + } + + private Set runJdkJarHellCheck() throws IOException { + ByteArrayOutputStream standardOut = new ByteArrayOutputStream(); + ExecResult execResult = getProject().javaexec(spec -> { + URL location = NamingConventionsCheck.class.getProtectionDomain().getCodeSource().getLocation(); + if (location.getProtocol().equals("file") == false) { + throw new GradleException("Unexpected location for NamingConventionCheck class: " + location); + } + try { + spec.classpath( + location.toURI().getPath(), + getRuntimeConfiguration(), + getCompileOnlyConfiguration() + ); + } catch (URISyntaxException e) { + throw new AssertionError(e); + } + spec.setMain(JdkJarHellCheck.class.getName()); + spec.args(getJarExpandDir()); + spec.setIgnoreExitValue(true); + spec.setExecutable(javaHome + "/bin/java"); + spec.setStandardOutput(standardOut); + }); + if (execResult.getExitValue() == 0) { + return Collections.emptySet(); + } + final String jdkJarHellCheckList; + try (ByteArrayOutputStream outputStream = standardOut) { + jdkJarHellCheckList = outputStream.toString(StandardCharsets.UTF_8.name()); + } + return new TreeSet<>(Arrays.asList(jdkJarHellCheckList.split("\\r?\\n"))); + } + + +} diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.clusterformation.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.clusterformation.properties new file mode 100644 index 00000000000..dfd6cd9956a --- /dev/null +++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.clusterformation.properties @@ -0,0 +1 @@ +implementation-class=org.elasticsearch.gradle.clusterformation.ClusterformationPlugin diff --git a/buildSrc/src/main/resources/checkstyle.xml b/buildSrc/src/main/resources/checkstyle.xml index 033f020fde0..e1000b3e4a9 100644 --- a/buildSrc/src/main/resources/checkstyle.xml +++ b/buildSrc/src/main/resources/checkstyle.xml @@ -23,6 +23,7 @@ unfair. --> + diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 420ed3b10b4..94bea76fe4b 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -9,6 +9,7 @@ + @@ -359,13 +360,7 @@ - - - - - - - + @@ -467,7 +462,6 @@ - @@ -539,7 +533,6 @@ - @@ -641,8 +634,6 @@ - - diff --git a/buildSrc/src/main/resources/minimumGradleVersion b/buildSrc/src/main/resources/minimumGradleVersion index 899dd4f5927..9add3349f9e 100644 --- a/buildSrc/src/main/resources/minimumGradleVersion +++ b/buildSrc/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -4.9 \ No newline at end of file +4.10 \ No newline at end of file diff --git a/buildSrc/src/test/groovy/org/elasticsearch/gradle/VersionCollectionTests.groovy b/buildSrc/src/test/groovy/org/elasticsearch/gradle/VersionCollectionTests.groovy index ad36c840783..f6b9cb5fc95 100644 --- a/buildSrc/src/test/groovy/org/elasticsearch/gradle/VersionCollectionTests.groovy +++ b/buildSrc/src/test/groovy/org/elasticsearch/gradle/VersionCollectionTests.groovy @@ -26,7 +26,7 @@ class VersionCollectionTests extends GradleUnitTestCase { assertEquals(vc.nextMinorSnapshot, Version.fromString("6.3.0-SNAPSHOT")) assertEquals(vc.stagedMinorSnapshot, Version.fromString("6.2.0-SNAPSHOT")) assertEquals(vc.nextBugfixSnapshot, Version.fromString("6.1.1-SNAPSHOT")) - assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("5.2.1-SNAPSHOT")) + assertNull(vc.maintenanceBugfixSnapshot) vc.indexCompatible.containsAll(vc.versions) @@ -65,7 +65,7 @@ class VersionCollectionTests extends GradleUnitTestCase { assertEquals(vc.nextMinorSnapshot, Version.fromString("6.3.0-SNAPSHOT")) assertEquals(vc.stagedMinorSnapshot, null) assertEquals(vc.nextBugfixSnapshot, Version.fromString("6.2.1-SNAPSHOT")) - assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("5.2.1-SNAPSHOT")) + assertNull(vc.maintenanceBugfixSnapshot) vc.indexCompatible.containsAll(vc.versions) diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java index 9b63d6f45e0..aca99067011 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java @@ -153,12 +153,4 @@ public class BuildExamplePluginsIT extends GradleIntegrationTestCase { } } - private String getLocalTestRepoPath() { - String property = System.getProperty("test.local-test-repo-path"); - Objects.requireNonNull(property, "test.local-test-repo-path not passed to tests"); - File file = new File(property); - assertTrue("Expected " + property + " to exist, but it did not!", file.exists()); - return file.getAbsolutePath(); - } - } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildPluginIT.java new file mode 100644 index 00000000000..dd0dbb25208 --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildPluginIT.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle; + +import org.apache.commons.io.IOUtils; +import org.elasticsearch.gradle.test.GradleIntegrationTestCase; +import org.gradle.testkit.runner.BuildResult; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.util.zip.ZipEntry; +import java.util.zip.ZipFile; + +public class BuildPluginIT extends GradleIntegrationTestCase { + + public void testPluginCanBeApplied() { + BuildResult result = getGradleRunner("elasticsearch.build") + .withArguments("hello", "-s") + .build(); + assertTaskSuccessful(result, ":hello"); + assertOutputContains("build plugin can be applied"); + } + + public void testCheckTask() { + BuildResult result = getGradleRunner("elasticsearch.build") + .withArguments("check", "assemble", "-s", "-Dlocal.repo.path=" + getLocalTestRepoPath()) + .build(); + assertTaskSuccessful(result, ":check"); + } + + public void testLicenseAndNotice() throws IOException { + BuildResult result = getGradleRunner("elasticsearch.build") + .withArguments("clean", "assemble", "-s", "-Dlocal.repo.path=" + getLocalTestRepoPath()) + .build(); + + assertTaskSuccessful(result, ":assemble"); + + assertBuildFileExists(result, "elasticsearch.build", "distributions/elasticsearch.build.jar"); + + try (ZipFile zipFile = new ZipFile(new File( + getBuildDir("elasticsearch.build"), "distributions/elasticsearch.build.jar" + ))) { + ZipEntry licenseEntry = zipFile.getEntry("META-INF/LICENSE.txt"); + ZipEntry noticeEntry = zipFile.getEntry("META-INF/NOTICE.txt"); + assertNotNull("Jar does not have META-INF/LICENSE.txt", licenseEntry); + assertNotNull("Jar does not have META-INF/NOTICE.txt", noticeEntry); + try ( + InputStream license = zipFile.getInputStream(licenseEntry); + InputStream notice = zipFile.getInputStream(noticeEntry) + ) { + assertEquals("this is a test license file", IOUtils.toString(license, StandardCharsets.UTF_8.name())); + assertEquals("this is a test notice file", IOUtils.toString(notice, StandardCharsets.UTF_8.name())); + } + } + } + + +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java index 98fea2ea15a..99afd0bcbe0 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java @@ -40,7 +40,7 @@ public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTe .withArguments("buildResources", "-s", "-i") .withPluginClasspath() .build(); - assertTaskSuccessfull(result, ":buildResources"); + assertTaskSuccessful(result, ":buildResources"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle_suppressions.xml"); @@ -61,8 +61,8 @@ public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTe .withPluginClasspath() .build(); - assertTaskSuccessfull(result, ":buildResources"); - assertTaskSuccessfull(result, ":sampleCopyAll"); + assertTaskSuccessful(result, ":buildResources"); + assertTaskSuccessful(result, ":sampleCopyAll"); assertBuildFileExists(result, PROJECT_NAME, "sampleCopyAll/checkstyle.xml"); // This is a side effect of compile time reference assertBuildFileExists(result, PROJECT_NAME, "sampleCopyAll/checkstyle_suppressions.xml"); @@ -75,7 +75,7 @@ public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTe .withPluginClasspath() .build(); - assertTaskSuccessfull(result, ":sample"); + assertTaskSuccessful(result, ":sample"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle_suppressions.xml"); } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/clusterformation/ClusterformationPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/clusterformation/ClusterformationPluginIT.java new file mode 100644 index 00000000000..c690557537d --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/clusterformation/ClusterformationPluginIT.java @@ -0,0 +1,144 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.clusterformation; + +import org.elasticsearch.gradle.test.GradleIntegrationTestCase; +import org.gradle.testkit.runner.BuildResult; +import org.gradle.testkit.runner.GradleRunner; +import org.gradle.testkit.runner.TaskOutcome; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +public class ClusterformationPluginIT extends GradleIntegrationTestCase { + + public void testListClusters() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("listElasticSearchClusters", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.SUCCESS, result.task(":listElasticSearchClusters").getOutcome()); + assertOutputContains( + result.getOutput(), + " * myTestCluster:" + ); + + } + + public void testUseClusterByOne() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("user1", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.SUCCESS, result.task(":user1").getOutcome()); + assertOutputContains( + result.getOutput(), + "Starting cluster: myTestCluster", + "Stopping myTestCluster, number of claims is 0" + ); + } + + public void testUseClusterByOneWithDryRun() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("user1", "-s", "--dry-run") + .withPluginClasspath() + .build(); + + assertNull(result.task(":user1")); + assertOutputDoesNotContain( + result.getOutput(), + "Starting cluster: myTestCluster", + "Stopping myTestCluster, number of claims is 0" + ); + } + + public void testUseClusterByTwo() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("user1", "user2", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.SUCCESS, result.task(":user1").getOutcome()); + assertEquals(TaskOutcome.SUCCESS, result.task(":user2").getOutcome()); + assertOutputContains( + result.getOutput(), + "Starting cluster: myTestCluster", + "Not stopping myTestCluster, since cluster still has 1 claim(s)", + "Stopping myTestCluster, number of claims is 0" + ); + } + + public void testUseClusterByUpToDateTask() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("upToDate1", "upToDate2", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.UP_TO_DATE, result.task(":upToDate1").getOutcome()); + assertEquals(TaskOutcome.UP_TO_DATE, result.task(":upToDate2").getOutcome()); + assertOutputContains( + result.getOutput(), + "Not stopping myTestCluster, since cluster still has 1 claim(s)", + "cluster was not running: myTestCluster" + ); + assertOutputDoesNotContain(result.getOutput(), "Starting cluster: myTestCluster"); + } + + public void testUseClusterBySkippedTask() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("skipped1", "skipped2", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.SKIPPED, result.task(":skipped1").getOutcome()); + assertEquals(TaskOutcome.SKIPPED, result.task(":skipped2").getOutcome()); + assertOutputContains( + result.getOutput(), + "Not stopping myTestCluster, since cluster still has 1 claim(s)", + "cluster was not running: myTestCluster" + ); + assertOutputDoesNotContain(result.getOutput(), "Starting cluster: myTestCluster"); + } + + public void tetUseClusterBySkippedAndWorkingTask() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("skipped1", "user1", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.SKIPPED, result.task(":skipped1").getOutcome()); + assertEquals(TaskOutcome.SUCCESS, result.task(":user1").getOutcome()); + assertOutputContains( + result.getOutput(), + "> Task :user1", + "Starting cluster: myTestCluster", + "Stopping myTestCluster, number of claims is 0" + ); + } + +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java new file mode 100644 index 00000000000..03f2022bc66 --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java @@ -0,0 +1,42 @@ +package org.elasticsearch.gradle.precommit; + +import org.elasticsearch.gradle.test.GradleIntegrationTestCase; +import org.gradle.testkit.runner.BuildResult; +import org.gradle.testkit.runner.GradleRunner; + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +public class JarHellTaskIT extends GradleIntegrationTestCase { + + public void testJarHellDetected() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("jarHell")) + .withArguments("clean", "precommit", "-s", "-Dlocal.repo.path=" + getLocalTestRepoPath()) + .withPluginClasspath() + .buildAndFail(); + + assertTaskFailed(result, ":jarHell"); + assertOutputContains( + result.getOutput(), + "Exception in thread \"main\" java.lang.IllegalStateException: jar hell!", + "class: org.apache.logging.log4j.Logger" + ); + } + +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/NamingConventionsTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/NamingConventionsTaskIT.java index 7e469e8597d..745c63cd4dc 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/NamingConventionsTaskIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/NamingConventionsTaskIT.java @@ -2,74 +2,57 @@ package org.elasticsearch.gradle.precommit; import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; -import org.gradle.testkit.runner.GradleRunner; -import org.gradle.testkit.runner.TaskOutcome; import java.util.Arrays; +import java.util.HashSet; public class NamingConventionsTaskIT extends GradleIntegrationTestCase { - public void testPluginCanBeApplied() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("namingConventionsSelfTest")) - .withArguments("hello", "-s", "-PcheckForTestsInMain=false") - .withPluginClasspath() - .build(); - - assertEquals(TaskOutcome.SUCCESS, result.task(":hello").getOutcome()); - String output = result.getOutput(); - assertTrue(output, output.contains("build plugin can be applied")); - } - public void testNameCheckFailsAsItShould() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("namingConventionsSelfTest")) + BuildResult result = getGradleRunner("namingConventionsSelfTest") .withArguments("namingConventions", "-s", "-PcheckForTestsInMain=false") - .withPluginClasspath() .buildAndFail(); - assertNotNull("task did not run", result.task(":namingConventions")); - assertEquals(TaskOutcome.FAILED, result.task(":namingConventions").getOutcome()); - String output = result.getOutput(); - for (String line : Arrays.asList( - "Found inner classes that are tests, which are excluded from the test runner:", - "* org.elasticsearch.test.NamingConventionsCheckInMainIT$InternalInvalidTests", - "Classes ending with [Tests] must subclass [UnitTestCase]:", - "* org.elasticsearch.test.NamingConventionsCheckInMainTests", - "* org.elasticsearch.test.NamingConventionsCheckInMainIT", - "Not all subclasses of UnitTestCase match the naming convention. Concrete classes must end with [Tests]:", - "* org.elasticsearch.test.WrongName")) { - assertTrue( - "expected: '" + line + "' but it was not found in the output:\n" + output, - output.contains(line) - ); - } + assertTaskFailed(result, ":namingConventions"); + assertOutputContains( + result.getOutput(), + // TODO: java9 Set.of + new HashSet<>( + Arrays.asList( + "Not all subclasses of UnitTestCase match the naming convention. Concrete classes must end with [Tests]:", + "* org.elasticsearch.test.WrongName", + "Found inner classes that are tests, which are excluded from the test runner:", + "* org.elasticsearch.test.NamingConventionsCheckInMainIT$InternalInvalidTests", + "Classes ending with [Tests] must subclass [UnitTestCase]:", + "* org.elasticsearch.test.NamingConventionsCheckInMainTests", + "* org.elasticsearch.test.NamingConventionsCheckInMainIT" + ) + ) + ); } public void testNameCheckFailsAsItShouldWithMain() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("namingConventionsSelfTest")) + BuildResult result = getGradleRunner("namingConventionsSelfTest") .withArguments("namingConventions", "-s", "-PcheckForTestsInMain=true") - .withPluginClasspath() .buildAndFail(); - assertNotNull("task did not run", result.task(":namingConventions")); - assertEquals(TaskOutcome.FAILED, result.task(":namingConventions").getOutcome()); - - String output = result.getOutput(); - for (String line : Arrays.asList( - "Classes ending with [Tests] or [IT] or extending [UnitTestCase] must be in src/test/java:", - "* org.elasticsearch.test.NamingConventionsCheckBadClasses$DummyInterfaceTests", - "* org.elasticsearch.test.NamingConventionsCheckBadClasses$DummyAbstractTests", - "* org.elasticsearch.test.NamingConventionsCheckBadClasses$InnerTests", - "* org.elasticsearch.test.NamingConventionsCheckBadClasses$NotImplementingTests", - "* org.elasticsearch.test.NamingConventionsCheckBadClasses$WrongNameTheSecond", - "* org.elasticsearch.test.NamingConventionsCheckBadClasses$WrongName")) { - assertTrue( - "expected: '" + line + "' but it was not found in the output:\n"+output, - output.contains(line) - ); - } + assertTaskFailed(result, ":namingConventions"); + assertOutputContains( + result.getOutput(), + // TODO: java9 Set.of + new HashSet<>( + Arrays.asList( + "Classes ending with [Tests] or [IT] or extending [UnitTestCase] must be in src/test/java:", + "* org.elasticsearch.test.NamingConventionsCheckBadClasses$DummyInterfaceTests", + "* org.elasticsearch.test.NamingConventionsCheckBadClasses$DummyInterfaceTests", + "* org.elasticsearch.test.NamingConventionsCheckBadClasses$DummyAbstractTests", + "* org.elasticsearch.test.NamingConventionsCheckBadClasses$InnerTests", + "* org.elasticsearch.test.NamingConventionsCheckBadClasses$NotImplementingTests", + "* org.elasticsearch.test.NamingConventionsCheckBadClasses$WrongNameTheSecond", + "* org.elasticsearch.test.NamingConventionsCheckBadClasses$WrongName" + ) + ) + ); } } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java index f00ab406a6c..f8e3cf88c40 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java @@ -9,6 +9,8 @@ import java.io.File; import java.nio.file.Files; import java.nio.file.Path; import java.util.List; +import java.util.Objects; +import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -46,6 +48,12 @@ public abstract class GradleIntegrationTestCase extends GradleUnitTestCase { } } + protected void assertOutputContains(String output, Set lines) { + for (String line : lines) { + assertOutputContains(output, line); + } + } + protected void assertOutputContains(String output, String line) { assertTrue( "Expected the following line in output:\n\n" + line + "\n\nOutput is:\n" + output, @@ -66,15 +74,24 @@ public abstract class GradleIntegrationTestCase extends GradleUnitTestCase { } } - protected void assertTaskSuccessfull(BuildResult result, String taskName) { + protected void assertTaskFailed(BuildResult result, String taskName) { + assertTaskOutcome(result, taskName, TaskOutcome.FAILED); + } + + protected void assertTaskSuccessful(BuildResult result, String taskName) { + assertTaskOutcome(result, taskName, TaskOutcome.SUCCESS); + } + + private void assertTaskOutcome(BuildResult result, String taskName, TaskOutcome taskOutcome) { BuildTask task = result.task(taskName); if (task == null) { - fail("Expected task `" + taskName + "` to be successful, but it did not run"); + fail("Expected task `" + taskName + "` to be " + taskOutcome +", but it did not run" + + "\n\nOutput is:\n" + result.getOutput()); } assertEquals( - "Expected task to be successful but it was: " + task.getOutcome() + - "\n\nOutput is:\n" + result.getOutput() , - TaskOutcome.SUCCESS, + "Expected task `" + taskName +"` to be successful but it was: " + task.getOutcome() + + taskOutcome + "\n\nOutput is:\n" + result.getOutput() , + taskOutcome, task.getOutcome() ); } @@ -109,4 +126,17 @@ public abstract class GradleIntegrationTestCase extends GradleUnitTestCase { Files.exists(absPath) ); } + + protected String getLocalTestRepoPath() { + String property = System.getProperty("test.local-test-repo-path"); + Objects.requireNonNull(property, "test.local-test-repo-path not passed to tests"); + File file = new File(property); + assertTrue("Expected " + property + " to exist, but it did not!", file.exists()); + if (File.separator.equals("\\")) { + // Use / on Windows too, the build script is not happy with \ + return file.getAbsolutePath().replace(File.separator, "/"); + } else { + return file.getAbsolutePath(); + } + } } diff --git a/buildSrc/src/testKit/clusterformation/build.gradle b/buildSrc/src/testKit/clusterformation/build.gradle new file mode 100644 index 00000000000..ae9dd8a2c33 --- /dev/null +++ b/buildSrc/src/testKit/clusterformation/build.gradle @@ -0,0 +1,41 @@ +plugins { + id 'elasticsearch.clusterformation' +} + +elasticSearchClusters { + myTestCluster { + distribution = 'ZIP' + } +} + +task user1 { + useCluster elasticSearchClusters.myTestCluster + doLast { + println "user1 executing" + } +} + +task user2 { + useCluster elasticSearchClusters.myTestCluster + doLast { + println "user2 executing" + } +} + +task upToDate1 { + useCluster elasticSearchClusters.myTestCluster +} + +task upToDate2 { + useCluster elasticSearchClusters.myTestCluster +} + +task skipped1 { + enabled = false + useCluster elasticSearchClusters.myTestCluster +} + +task skipped2 { + enabled = false + useCluster elasticSearchClusters.myTestCluster +} diff --git a/buildSrc/src/testKit/elasticsearch.build/LICENSE b/buildSrc/src/testKit/elasticsearch.build/LICENSE new file mode 100644 index 00000000000..cf6ea07b188 --- /dev/null +++ b/buildSrc/src/testKit/elasticsearch.build/LICENSE @@ -0,0 +1 @@ +this is a test license file \ No newline at end of file diff --git a/buildSrc/src/testKit/elasticsearch.build/NOTICE b/buildSrc/src/testKit/elasticsearch.build/NOTICE new file mode 100644 index 00000000000..0c070fe7424 --- /dev/null +++ b/buildSrc/src/testKit/elasticsearch.build/NOTICE @@ -0,0 +1 @@ +this is a test notice file \ No newline at end of file diff --git a/buildSrc/src/testKit/elasticsearch.build/build.gradle b/buildSrc/src/testKit/elasticsearch.build/build.gradle new file mode 100644 index 00000000000..2a9e8fa3ec9 --- /dev/null +++ b/buildSrc/src/testKit/elasticsearch.build/build.gradle @@ -0,0 +1,36 @@ +plugins { + id 'java' + id 'elasticsearch.build' +} + +ext.licenseFile = file("LICENSE") +ext.noticeFile = file("NOTICE") + +dependencies { + compile "junit:junit:${versions.junit}" + // missing classes in thirdparty audit + compile 'org.hamcrest:hamcrest-core:1.3' +} + +repositories { + mavenCentral() + repositories { + maven { + url System.getProperty("local.repo.path") + } + } +} + +// todo remove offending rules +forbiddenApisMain.enabled = false +forbiddenApisTest.enabled = false +// requires dependency on testing fw +jarHell.enabled = false +// we don't have tests for now +test.enabled = false + +task hello { + doFirst { + println "build plugin can be applied" + } +} diff --git a/buildSrc/src/testKit/elasticsearch.build/licenses/hamcrest-core-1.3.jar.sha1 b/buildSrc/src/testKit/elasticsearch.build/licenses/hamcrest-core-1.3.jar.sha1 new file mode 100644 index 00000000000..1085ece454c --- /dev/null +++ b/buildSrc/src/testKit/elasticsearch.build/licenses/hamcrest-core-1.3.jar.sha1 @@ -0,0 +1 @@ +42a25dc3219429f0e5d060061f71acb49bf010a0 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/jackson-core-asl-NOTICE.txt b/buildSrc/src/testKit/elasticsearch.build/licenses/hamcrest-core-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/jackson-core-asl-NOTICE.txt rename to buildSrc/src/testKit/elasticsearch.build/licenses/hamcrest-core-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-NOTICE.txt b/buildSrc/src/testKit/elasticsearch.build/licenses/hamcrest-core-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/proto-google-common-protos-NOTICE.txt rename to buildSrc/src/testKit/elasticsearch.build/licenses/hamcrest-core-NOTICE.txt diff --git a/buildSrc/src/testKit/elasticsearch.build/licenses/junit-4.12.jar.sha1 b/buildSrc/src/testKit/elasticsearch.build/licenses/junit-4.12.jar.sha1 new file mode 100644 index 00000000000..94d69f8b715 --- /dev/null +++ b/buildSrc/src/testKit/elasticsearch.build/licenses/junit-4.12.jar.sha1 @@ -0,0 +1 @@ +2973d150c0dc1fefe998f834810d68f278ea58ec \ No newline at end of file diff --git a/x-pack/plugin/ml/log-structure-finder/licenses/super-csv-NOTICE.txt b/buildSrc/src/testKit/elasticsearch.build/licenses/junit-LICENSE.txt similarity index 100% rename from x-pack/plugin/ml/log-structure-finder/licenses/super-csv-NOTICE.txt rename to buildSrc/src/testKit/elasticsearch.build/licenses/junit-LICENSE.txt diff --git a/buildSrc/src/testKit/elasticsearch.build/licenses/junit-NOTICE.txt b/buildSrc/src/testKit/elasticsearch.build/licenses/junit-NOTICE.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/package-info.java b/buildSrc/src/testKit/elasticsearch.build/src/main/java/org/elasticsearch/SampleClass.java similarity index 85% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/package-info.java rename to buildSrc/src/testKit/elasticsearch.build/src/main/java/org/elasticsearch/SampleClass.java index b1e4c6c0d4e..defed880495 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/package-info.java +++ b/buildSrc/src/testKit/elasticsearch.build/src/main/java/org/elasticsearch/SampleClass.java @@ -16,9 +16,11 @@ * specific language governing permissions and limitations * under the License. */ +package org.elasticsearch; /** - * Request and Response objects for the default distribution's Machine - * Learning APIs. + * This is just a test class */ -package org.elasticsearch.protocol.xpack.ml; +public class SampleClass { + +} diff --git a/buildSrc/src/testKit/jarHell/build.gradle b/buildSrc/src/testKit/jarHell/build.gradle new file mode 100644 index 00000000000..17ff43fc740 --- /dev/null +++ b/buildSrc/src/testKit/jarHell/build.gradle @@ -0,0 +1,29 @@ +plugins { + id 'java' + id 'elasticsearch.build' +} + +dependencyLicenses.enabled = false +dependenciesInfo.enabled = false +forbiddenApisMain.enabled = false +forbiddenApisTest.enabled = false +thirdPartyAudit.enabled = false +namingConventions.enabled = false +ext.licenseFile = file("$buildDir/dummy/license") +ext.noticeFile = file("$buildDir/dummy/notice") + +repositories { + mavenCentral() + repositories { + maven { + url System.getProperty("local.repo.path") + } + } +} + +dependencies { + // Needed for the JarHell task + testCompile ("org.elasticsearch.test:framework:${versions.elasticsearch}") + // causes jar hell with local sources + compile "org.apache.logging.log4j:log4j-api:${versions.log4j}" +} diff --git a/buildSrc/src/testKit/jarHell/src/main/java/org/apache/logging/log4j/Logger.java b/buildSrc/src/testKit/jarHell/src/main/java/org/apache/logging/log4j/Logger.java new file mode 100644 index 00000000000..a4332c664fa --- /dev/null +++ b/buildSrc/src/testKit/jarHell/src/main/java/org/apache/logging/log4j/Logger.java @@ -0,0 +1,7 @@ +package org.apache.logging.log4j; + +// Jar Hell ! +public class Logger { + +} + diff --git a/buildSrc/src/testKit/namingConventionsSelfTest/build.gradle b/buildSrc/src/testKit/namingConventionsSelfTest/build.gradle index 47e0e94b86a..b1c56ddc804 100644 --- a/buildSrc/src/testKit/namingConventionsSelfTest/build.gradle +++ b/buildSrc/src/testKit/namingConventionsSelfTest/build.gradle @@ -13,14 +13,8 @@ thirdPartyAudit.enabled = false ext.licenseFile = file("$buildDir/dummy/license") ext.noticeFile = file("$buildDir/dummy/notice") -task hello { - doFirst { - println "build plugin can be applied" - } -} - dependencies { - compile "junit:junit:${versions.junit}" + compile "junit:junit:4.12" } namingConventions { diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 34c266913d0..914bae4d2c8 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0-alpha1 -lucene = 7.5.0-snapshot-13b9e28f9d +lucene = 8.0.0-snapshot-66c671ea80 # optional dependencies spatial4j = 0.7 diff --git a/client/benchmark/build.gradle b/client/benchmark/build.gradle index c67120c7cf5..a53f1020340 100644 --- a/client/benchmark/build.gradle +++ b/client/benchmark/build.gradle @@ -23,8 +23,7 @@ apply plugin: 'application' group = 'org.elasticsearch.client' // Not published so no need to assemble -tasks.remove(assemble) -build.dependsOn.remove('assemble') +assemble.enabled = true archivesBaseName = 'client-benchmarks' mainClassName = 'org.elasticsearch.client.benchmark.BenchmarkMain' diff --git a/client/client-benchmark-noop-api-plugin/build.gradle b/client/client-benchmark-noop-api-plugin/build.gradle index cc84207d90d..b5a5fb5dc5e 100644 --- a/client/client-benchmark-noop-api-plugin/build.gradle +++ b/client/client-benchmark-noop-api-plugin/build.gradle @@ -28,8 +28,7 @@ esplugin { } // Not published so no need to assemble -tasks.remove(assemble) -build.dependsOn.remove('assemble') +assemble.enabled = false dependencyLicenses.enabled = false dependenciesInfo.enabled = false diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 6f5eab6e1db..c608d7c91f1 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -16,8 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks import org.elasticsearch.gradle.test.RestIntegTestTask import org.gradle.api.internal.provider.Providers @@ -47,13 +45,13 @@ dependencies { * Everything in the "shadow" configuration is *not* copied into the * shadowJar. */ - shadow "org.elasticsearch:elasticsearch:${version}" - shadow "org.elasticsearch.client:elasticsearch-rest-client:${version}" - shadow "org.elasticsearch.plugin:parent-join-client:${version}" - shadow "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}" - shadow "org.elasticsearch.plugin:rank-eval-client:${version}" - shadow "org.elasticsearch.plugin:lang-mustache-client:${version}" - compile project(':x-pack:protocol') + compile "org.elasticsearch:elasticsearch:${version}" + compile "org.elasticsearch.client:elasticsearch-rest-client:${version}" + compile "org.elasticsearch.plugin:parent-join-client:${version}" + compile "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}" + compile "org.elasticsearch.plugin:rank-eval-client:${version}" + compile "org.elasticsearch.plugin:lang-mustache-client:${version}" + bundle project(':x-pack:protocol') testCompile "org.elasticsearch.client:test:${version}" testCompile "org.elasticsearch.test:framework:${version}" @@ -75,10 +73,31 @@ dependencyLicenses { forbiddenApisMain { // core does not depend on the httpclient for compile so we add the signatures here. We don't add them for test as they are already // specified - signaturesURLs += [PrecommitTasks.getResource('/forbidden/http-signatures.txt')] - signaturesURLs += [file('src/main/resources/forbidden/rest-high-level-signatures.txt').toURI().toURL()] + addSignatureFiles 'http-signatures' + signaturesFiles += files('src/main/resources/forbidden/rest-high-level-signatures.txt') +} + +integTestRunner { + systemProperty 'tests.rest.cluster.username', System.getProperty('tests.rest.cluster.username', 'test_user') + systemProperty 'tests.rest.cluster.password', System.getProperty('tests.rest.cluster.password', 'test-password') } integTestCluster { setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'true' + setupCommand 'setupDummyUser', + 'bin/elasticsearch-users', + 'useradd', System.getProperty('tests.rest.cluster.username', 'test_user'), + '-p', System.getProperty('tests.rest.cluster.password', 'test-password'), + '-r', 'superuser' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: System.getProperty('tests.rest.cluster.username', 'test_user'), + password: System.getProperty('tests.rest.cluster.password', 'test-password'), + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java index b72a21ed7d1..f9b1474c69a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java @@ -56,7 +56,7 @@ public final class ClusterClient { */ public ClusterUpdateSettingsResponse putSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings, + return restHighLevelClient.performRequestAndParseEntity(clusterUpdateSettingsRequest, ClusterRequestConverters::clusterPutSettings, options, ClusterUpdateSettingsResponse::fromXContent, emptySet()); } @@ -70,7 +70,7 @@ public final class ClusterClient { */ public void putSettingsAsync(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings, + restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, ClusterRequestConverters::clusterPutSettings, options, ClusterUpdateSettingsResponse::fromXContent, listener, emptySet()); } @@ -85,7 +85,7 @@ public final class ClusterClient { */ public ClusterGetSettingsResponse getSettings(ClusterGetSettingsRequest clusterGetSettingsRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(clusterGetSettingsRequest, RequestConverters::clusterGetSettings, + return restHighLevelClient.performRequestAndParseEntity(clusterGetSettingsRequest, ClusterRequestConverters::clusterGetSettings, options, ClusterGetSettingsResponse::fromXContent, emptySet()); } @@ -99,7 +99,7 @@ public final class ClusterClient { */ public void getSettingsAsync(ClusterGetSettingsRequest clusterGetSettingsRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(clusterGetSettingsRequest, RequestConverters::clusterGetSettings, + restHighLevelClient.performRequestAsyncAndParseEntity(clusterGetSettingsRequest, ClusterRequestConverters::clusterGetSettings, options, ClusterGetSettingsResponse::fromXContent, listener, emptySet()); } @@ -115,7 +115,7 @@ public final class ClusterClient { * @throws IOException in case there is a problem sending the request or parsing back the response */ public ClusterHealthResponse health(ClusterHealthRequest healthRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(healthRequest, RequestConverters::clusterHealth, options, + return restHighLevelClient.performRequestAndParseEntity(healthRequest, ClusterRequestConverters::clusterHealth, options, ClusterHealthResponse::fromXContent, singleton(RestStatus.REQUEST_TIMEOUT.getStatus())); } @@ -129,7 +129,7 @@ public final class ClusterClient { * @param listener the listener to be notified upon request completion */ public void healthAsync(ClusterHealthRequest healthRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(healthRequest, RequestConverters::clusterHealth, options, + restHighLevelClient.performRequestAsyncAndParseEntity(healthRequest, ClusterRequestConverters::clusterHealth, options, ClusterHealthResponse::fromXContent, listener, singleton(RestStatus.REQUEST_TIMEOUT.getStatus())); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterRequestConverters.java new file mode 100644 index 00000000000..d6c41e804df --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterRequestConverters.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsRequest; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.common.Strings; + +import java.io.IOException; + +final class ClusterRequestConverters { + + static Request clusterPutSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest) throws IOException { + Request request = new Request(HttpPut.METHOD_NAME, "/_cluster/settings"); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withTimeout(clusterUpdateSettingsRequest.timeout()); + parameters.withMasterTimeout(clusterUpdateSettingsRequest.masterNodeTimeout()); + + request.setEntity(RequestConverters.createEntity(clusterUpdateSettingsRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request clusterGetSettings(ClusterGetSettingsRequest clusterGetSettingsRequest) throws IOException { + Request request = new Request(HttpGet.METHOD_NAME, "/_cluster/settings"); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withLocal(clusterGetSettingsRequest.local()); + parameters.withIncludeDefaults(clusterGetSettingsRequest.includeDefaults()); + parameters.withMasterTimeout(clusterGetSettingsRequest.masterNodeTimeout()); + + return request; + } + + static Request clusterHealth(ClusterHealthRequest healthRequest) { + String[] indices = healthRequest.indices() == null ? Strings.EMPTY_ARRAY : healthRequest.indices(); + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_cluster/health") + .addCommaSeparatedPathParts(indices) + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + + new RequestConverters.Params(request) + .withWaitForStatus(healthRequest.waitForStatus()) + .withWaitForNoRelocatingShards(healthRequest.waitForNoRelocatingShards()) + .withWaitForNoInitializingShards(healthRequest.waitForNoInitializingShards()) + .withWaitForActiveShards(healthRequest.waitForActiveShards(), ActiveShardCount.NONE) + .withWaitForNodes(healthRequest.waitForNodes()) + .withWaitForEvents(healthRequest.waitForEvents()) + .withTimeout(healthRequest.timeout()) + .withMasterTimeout(healthRequest.masterNodeTimeout()) + .withLocal(healthRequest.local()) + .withLevel(healthRequest.level()); + return request; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java new file mode 100644 index 00000000000..5099bf8d51d --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; + +import java.io.IOException; + +import static java.util.Collections.emptySet; + + +public class GraphClient { + private final RestHighLevelClient restHighLevelClient; + + GraphClient(RestHighLevelClient restHighLevelClient) { + this.restHighLevelClient = restHighLevelClient; + } + + /** + * Executes an exploration request using the Graph API. + * + * See Graph API + * on elastic.co. + */ + public final GraphExploreResponse explore(GraphExploreRequest graphExploreRequest, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(graphExploreRequest, GraphRequestConverters::explore, + options, GraphExploreResponse::fromXContext, emptySet()); + } + + /** + * Asynchronously executes an exploration request using the Graph API. + * + * See Graph API + * on elastic.co. + */ + public final void exploreAsync(GraphExploreRequest graphExploreRequest, + RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(graphExploreRequest, GraphRequestConverters::explore, + options, GraphExploreResponse::fromXContext, listener, emptySet()); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphRequestConverters.java new file mode 100644 index 00000000000..c1f1e1d115f --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphRequestConverters.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpGet; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; + +import java.io.IOException; + +public class GraphRequestConverters { + + static Request explore(GraphExploreRequest exploreRequest) throws IOException { + String endpoint = RequestConverters.endpoint(exploreRequest.indices(), exploreRequest.types(), "_xpack/graph/_explore"); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + request.setEntity(RequestConverters.createEntity(exploreRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return request; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java index 99d50f6b46b..eb070759ed9 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java @@ -55,7 +55,7 @@ public final class IngestClient { * @throws IOException in case there is a problem sending the request or parsing back the response */ public AcknowledgedResponse putPipeline(PutPipelineRequest request, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::putPipeline, options, + return restHighLevelClient.performRequestAndParseEntity( request, IngestRequestConverters::putPipeline, options, AcknowledgedResponse::fromXContent, emptySet()); } @@ -68,7 +68,7 @@ public final class IngestClient { * @param listener the listener to be notified upon request completion */ public void putPipelineAsync(PutPipelineRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::putPipeline, options, + restHighLevelClient.performRequestAsyncAndParseEntity( request, IngestRequestConverters::putPipeline, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -82,7 +82,7 @@ public final class IngestClient { * @throws IOException in case there is a problem sending the request or parsing back the response */ public GetPipelineResponse getPipeline(GetPipelineRequest request, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::getPipeline, options, + return restHighLevelClient.performRequestAndParseEntity( request, IngestRequestConverters::getPipeline, options, GetPipelineResponse::fromXContent, emptySet()); } @@ -95,7 +95,7 @@ public final class IngestClient { * @param listener the listener to be notified upon request completion */ public void getPipelineAsync(GetPipelineRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::getPipeline, options, + restHighLevelClient.performRequestAsyncAndParseEntity( request, IngestRequestConverters::getPipeline, options, GetPipelineResponse::fromXContent, listener, emptySet()); } @@ -110,7 +110,7 @@ public final class IngestClient { * @throws IOException in case there is a problem sending the request or parsing back the response */ public AcknowledgedResponse deletePipeline(DeletePipelineRequest request, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::deletePipeline, options, + return restHighLevelClient.performRequestAndParseEntity( request, IngestRequestConverters::deletePipeline, options, AcknowledgedResponse::fromXContent, emptySet()); } @@ -124,7 +124,7 @@ public final class IngestClient { * @param listener the listener to be notified upon request completion */ public void deletePipelineAsync(DeletePipelineRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::deletePipeline, options, + restHighLevelClient.performRequestAsyncAndParseEntity( request, IngestRequestConverters::deletePipeline, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -140,7 +140,7 @@ public final class IngestClient { * @throws IOException in case there is a problem sending the request or parsing back the response */ public SimulatePipelineResponse simulate(SimulatePipelineRequest request, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::simulatePipeline, options, + return restHighLevelClient.performRequestAndParseEntity( request, IngestRequestConverters::simulatePipeline, options, SimulatePipelineResponse::fromXContent, emptySet()); } @@ -157,7 +157,7 @@ public final class IngestClient { public void simulateAsync(SimulatePipelineRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::simulatePipeline, options, + restHighLevelClient.performRequestAsyncAndParseEntity( request, IngestRequestConverters::simulatePipeline, options, SimulatePipelineResponse::fromXContent, listener, emptySet()); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestRequestConverters.java new file mode 100644 index 00000000000..e81d716b60f --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestRequestConverters.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.action.ingest.DeletePipelineRequest; +import org.elasticsearch.action.ingest.GetPipelineRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.ingest.SimulatePipelineRequest; + +import java.io.IOException; + +public class IngestRequestConverters { + + static Request getPipeline(GetPipelineRequest getPipelineRequest) { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_ingest/pipeline") + .addCommaSeparatedPathParts(getPipelineRequest.getIds()) + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withMasterTimeout(getPipelineRequest.masterNodeTimeout()); + return request; + } + + static Request putPipeline(PutPipelineRequest putPipelineRequest) throws IOException { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_ingest/pipeline") + .addPathPart(putPipelineRequest.getId()) + .build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withTimeout(putPipelineRequest.timeout()); + parameters.withMasterTimeout(putPipelineRequest.masterNodeTimeout()); + + request.setEntity(RequestConverters.createEntity(putPipelineRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request deletePipeline(DeletePipelineRequest deletePipelineRequest) { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_ingest/pipeline") + .addPathPart(deletePipelineRequest.getId()) + .build(); + Request request = new Request(HttpDelete.METHOD_NAME, endpoint); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withTimeout(deletePipelineRequest.timeout()); + parameters.withMasterTimeout(deletePipelineRequest.masterNodeTimeout()); + + return request; + } + + static Request simulatePipeline(SimulatePipelineRequest simulatePipelineRequest) throws IOException { + RequestConverters.EndpointBuilder builder = new RequestConverters.EndpointBuilder().addPathPartAsIs("_ingest/pipeline"); + if (simulatePipelineRequest.getId() != null && !simulatePipelineRequest.getId().isEmpty()) { + builder.addPathPart(simulatePipelineRequest.getId()); + } + builder.addPathPartAsIs("_simulate"); + String endpoint = builder.build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(request); + params.putParam("verbose", Boolean.toString(simulatePipelineRequest.isVerbose())); + request.setEntity(RequestConverters.createEntity(simulatePipelineRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return request; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseClient.java index ca6539daa04..bf8abc21fe1 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseClient.java @@ -65,7 +65,7 @@ public final class LicenseClient { * @throws IOException in case there is a problem sending the request or parsing back the response */ public PutLicenseResponse putLicense(PutLicenseRequest request, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::putLicense, options, + return restHighLevelClient.performRequestAndParseEntity(request, LicenseRequestConverters::putLicense, options, PutLicenseResponse::fromXContent, emptySet()); } @@ -75,7 +75,7 @@ public final class LicenseClient { * @param listener the listener to be notified upon request completion */ public void putLicenseAsync(PutLicenseRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::putLicense, options, + restHighLevelClient.performRequestAsyncAndParseEntity(request, LicenseRequestConverters::putLicense, options, PutLicenseResponse::fromXContent, listener, emptySet()); } @@ -86,7 +86,7 @@ public final class LicenseClient { * @throws IOException in case there is a problem sending the request or parsing back the response */ public GetLicenseResponse getLicense(GetLicenseRequest request, RequestOptions options) throws IOException { - return restHighLevelClient.performRequest(request, RequestConverters::getLicense, options, + return restHighLevelClient.performRequest(request, LicenseRequestConverters::getLicense, options, response -> new GetLicenseResponse(convertResponseToJson(response)), emptySet()); } @@ -96,7 +96,7 @@ public final class LicenseClient { * @param listener the listener to be notified upon request completion */ public void getLicenseAsync(GetLicenseRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsync(request, RequestConverters::getLicense, options, + restHighLevelClient.performRequestAsync(request, LicenseRequestConverters::getLicense, options, response -> new GetLicenseResponse(convertResponseToJson(response)), listener, emptySet()); } @@ -107,7 +107,7 @@ public final class LicenseClient { * @throws IOException in case there is a problem sending the request or parsing back the response */ public AcknowledgedResponse deleteLicense(DeleteLicenseRequest request, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::deleteLicense, options, + return restHighLevelClient.performRequestAndParseEntity(request, LicenseRequestConverters::deleteLicense, options, AcknowledgedResponse::fromXContent, emptySet()); } @@ -117,7 +117,7 @@ public final class LicenseClient { * @param listener the listener to be notified upon request completion */ public void deleteLicenseAsync(DeleteLicenseRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::deleteLicense, options, + restHighLevelClient.performRequestAsyncAndParseEntity(request, LicenseRequestConverters::deleteLicense, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseRequestConverters.java new file mode 100644 index 00000000000..7c2c049324e --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseRequestConverters.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.protocol.xpack.license.DeleteLicenseRequest; +import org.elasticsearch.protocol.xpack.license.GetLicenseRequest; +import org.elasticsearch.protocol.xpack.license.PutLicenseRequest; + +public class LicenseRequestConverters { + static Request putLicense(PutLicenseRequest putLicenseRequest) { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("license") + .build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withTimeout(putLicenseRequest.timeout()); + parameters.withMasterTimeout(putLicenseRequest.masterNodeTimeout()); + if (putLicenseRequest.isAcknowledge()) { + parameters.putParam("acknowledge", "true"); + } + request.setJsonEntity(putLicenseRequest.getLicenseDefinition()); + return request; + } + + static Request getLicense(GetLicenseRequest getLicenseRequest) { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("license") + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withLocal(getLicenseRequest.local()); + return request; + } + + static Request deleteLicense(DeleteLicenseRequest deleteLicenseRequest) { + Request request = new Request(HttpDelete.METHOD_NAME, "/_xpack/license"); + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withTimeout(deleteLicenseRequest.timeout()); + parameters.withMasterTimeout(deleteLicenseRequest.masterNodeTimeout()); + return request; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java index e26a4c629a0..ecbe7f2d3a5 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java @@ -19,17 +19,35 @@ package org.elasticsearch.client; +import org.apache.http.HttpEntity; import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; +import org.apache.http.entity.ByteArrayEntity; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.client.RequestConverters.EndpointBuilder; -import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; -import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; -import org.elasticsearch.protocol.xpack.ml.PutJobRequest; +import org.elasticsearch.client.ml.CloseJobRequest; +import org.elasticsearch.client.ml.DeleteJobRequest; +import org.elasticsearch.client.ml.FlushJobRequest; +import org.elasticsearch.client.ml.ForecastJobRequest; +import org.elasticsearch.client.ml.GetBucketsRequest; +import org.elasticsearch.client.ml.GetInfluencersRequest; +import org.elasticsearch.client.ml.GetJobRequest; +import org.elasticsearch.client.ml.GetJobStatsRequest; +import org.elasticsearch.client.ml.GetOverallBucketsRequest; +import org.elasticsearch.client.ml.GetRecordsRequest; +import org.elasticsearch.client.ml.OpenJobRequest; +import org.elasticsearch.client.ml.PostDataRequest; +import org.elasticsearch.client.ml.PutJobRequest; +import org.elasticsearch.client.ml.UpdateJobRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import java.io.IOException; import static org.elasticsearch.client.RequestConverters.REQUEST_BODY_CONTENT_TYPE; +import static org.elasticsearch.client.RequestConverters.createContentType; import static org.elasticsearch.client.RequestConverters.createEntity; final class MLRequestConverters { @@ -48,6 +66,40 @@ final class MLRequestConverters { return request; } + static Request getJob(GetJobRequest getJobRequest) { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(Strings.collectionToCommaDelimitedString(getJobRequest.getJobIds())) + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + + RequestConverters.Params params = new RequestConverters.Params(request); + if (getJobRequest.isAllowNoJobs() != null) { + params.putParam("allow_no_jobs", Boolean.toString(getJobRequest.isAllowNoJobs())); + } + + return request; + } + + static Request getJobStats(GetJobStatsRequest getJobStatsRequest) { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(Strings.collectionToCommaDelimitedString(getJobStatsRequest.getJobIds())) + .addPathPartAsIs("_stats") + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + + RequestConverters.Params params = new RequestConverters.Params(request); + if (getJobStatsRequest.isAllowNoJobs() != null) { + params.putParam("allow_no_jobs", Boolean.toString(getJobStatsRequest.isAllowNoJobs())); + } + return request; + } + static Request openJob(OpenJobRequest openJobRequest) throws IOException { String endpoint = new EndpointBuilder() .addPathPartAsIs("_xpack") @@ -57,7 +109,20 @@ final class MLRequestConverters { .addPathPartAsIs("_open") .build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - request.setJsonEntity(openJobRequest.toString()); + request.setEntity(createEntity(openJobRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request closeJob(CloseJobRequest closeJobRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(Strings.collectionToCommaDelimitedString(closeJobRequest.getJobIds())) + .addPathPartAsIs("_close") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + request.setEntity(createEntity(closeJobRequest, REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -75,4 +140,128 @@ final class MLRequestConverters { return request; } + + static Request flushJob(FlushJobRequest flushJobRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(flushJobRequest.getJobId()) + .addPathPartAsIs("_flush") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + request.setEntity(createEntity(flushJobRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request forecastJob(ForecastJobRequest forecastJobRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(forecastJobRequest.getJobId()) + .addPathPartAsIs("_forecast") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + request.setEntity(createEntity(forecastJobRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request updateJob(UpdateJobRequest updateJobRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(updateJobRequest.getJobUpdate().getJobId()) + .addPathPartAsIs("_update") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + request.setEntity(createEntity(updateJobRequest.getJobUpdate(), REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request getBuckets(GetBucketsRequest getBucketsRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(getBucketsRequest.getJobId()) + .addPathPartAsIs("results") + .addPathPartAsIs("buckets") + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + request.setEntity(createEntity(getBucketsRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request getOverallBuckets(GetOverallBucketsRequest getOverallBucketsRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(Strings.collectionToCommaDelimitedString(getOverallBucketsRequest.getJobIds())) + .addPathPartAsIs("results") + .addPathPartAsIs("overall_buckets") + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + request.setEntity(createEntity(getOverallBucketsRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request getRecords(GetRecordsRequest getRecordsRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(getRecordsRequest.getJobId()) + .addPathPartAsIs("results") + .addPathPartAsIs("records") + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + request.setEntity(createEntity(getRecordsRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request postData(PostDataRequest postDataRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(postDataRequest.getJobId()) + .addPathPartAsIs("_data") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + + RequestConverters.Params params = new RequestConverters.Params(request); + if (postDataRequest.getResetStart() != null) { + params.putParam(PostDataRequest.RESET_START.getPreferredName(), postDataRequest.getResetStart()); + } + if (postDataRequest.getResetEnd() != null) { + params.putParam(PostDataRequest.RESET_END.getPreferredName(), postDataRequest.getResetEnd()); + } + BytesReference content = postDataRequest.getContent(); + if (content != null) { + BytesRef source = postDataRequest.getContent().toBytesRef(); + HttpEntity byteEntity = new ByteArrayEntity(source.bytes, + source.offset, + source.length, + createContentType(postDataRequest.getXContentType())); + request.setEntity(byteEntity); + } + return request; + } + + static Request getInfluencers(GetInfluencersRequest getInfluencersRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(getInfluencersRequest.getJobId()) + .addPathPartAsIs("results") + .addPathPartAsIs("influencers") + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + request.setEntity(createEntity(getInfluencersRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java index 32b6cd6cf2c..85c5771f345 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java @@ -19,12 +19,34 @@ package org.elasticsearch.client; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; -import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse; -import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; -import org.elasticsearch.protocol.xpack.ml.OpenJobResponse; -import org.elasticsearch.protocol.xpack.ml.PutJobRequest; -import org.elasticsearch.protocol.xpack.ml.PutJobResponse; +import org.elasticsearch.client.ml.ForecastJobRequest; +import org.elasticsearch.client.ml.ForecastJobResponse; +import org.elasticsearch.client.ml.PostDataRequest; +import org.elasticsearch.client.ml.PostDataResponse; +import org.elasticsearch.client.ml.UpdateJobRequest; +import org.elasticsearch.client.ml.CloseJobRequest; +import org.elasticsearch.client.ml.CloseJobResponse; +import org.elasticsearch.client.ml.DeleteJobRequest; +import org.elasticsearch.client.ml.DeleteJobResponse; +import org.elasticsearch.client.ml.FlushJobRequest; +import org.elasticsearch.client.ml.FlushJobResponse; +import org.elasticsearch.client.ml.GetBucketsRequest; +import org.elasticsearch.client.ml.GetBucketsResponse; +import org.elasticsearch.client.ml.GetInfluencersRequest; +import org.elasticsearch.client.ml.GetInfluencersResponse; +import org.elasticsearch.client.ml.GetJobRequest; +import org.elasticsearch.client.ml.GetJobResponse; +import org.elasticsearch.client.ml.GetJobStatsRequest; +import org.elasticsearch.client.ml.GetJobStatsResponse; +import org.elasticsearch.client.ml.GetOverallBucketsRequest; +import org.elasticsearch.client.ml.GetOverallBucketsResponse; +import org.elasticsearch.client.ml.GetRecordsRequest; +import org.elasticsearch.client.ml.GetRecordsResponse; +import org.elasticsearch.client.ml.OpenJobRequest; +import org.elasticsearch.client.ml.OpenJobResponse; +import org.elasticsearch.client.ml.PutJobRequest; +import org.elasticsearch.client.ml.PutJobResponse; +import org.elasticsearch.client.ml.job.stats.JobStats; import java.io.IOException; import java.util.Collections; @@ -50,9 +72,9 @@ public final class MachineLearningClient { * For additional info * see ML PUT job documentation * - * @param request the PutJobRequest containing the {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} settings + * @param request The PutJobRequest containing the {@link org.elasticsearch.client.ml.job.config.Job} settings * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return PutJobResponse with enclosed {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} object + * @return PutJobResponse with enclosed {@link org.elasticsearch.client.ml.job.config.Job} object * @throws IOException when there is a serialization issue sending the request or receiving the response */ public PutJobResponse putJob(PutJobRequest request, RequestOptions options) throws IOException { @@ -69,7 +91,7 @@ public final class MachineLearningClient { * For additional info * see ML PUT job documentation * - * @param request the request containing the {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} settings + * @param request The request containing the {@link org.elasticsearch.client.ml.job.config.Job} settings * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion */ @@ -82,13 +104,95 @@ public final class MachineLearningClient { Collections.emptySet()); } + /** + * Gets one or more Machine Learning job configuration info. + * + *

+ * For additional info + * see + *

+ * @param request {@link GetJobRequest} Request containing a list of jobId(s) and additional options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return {@link GetJobResponse} response object containing + * the {@link org.elasticsearch.client.ml.job.config.Job} objects and the number of jobs found + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public GetJobResponse getJob(GetJobRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::getJob, + options, + GetJobResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Gets one or more Machine Learning job configuration info, asynchronously. + * + *

+ * For additional info + * see + *

+ * @param request {@link GetJobRequest} Request containing a list of jobId(s) and additional options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified with {@link GetJobResponse} upon request completion + */ + public void getJobAsync(GetJobRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::getJob, + options, + GetJobResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Gets usage statistics for one or more Machine Learning jobs + * + *

+ * For additional info + * see Get Job stats docs + *

+ * @param request {@link GetJobStatsRequest} Request containing a list of jobId(s) and additional options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return {@link GetJobStatsResponse} response object containing + * the {@link JobStats} objects and the number of jobs found + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public GetJobStatsResponse getJobStats(GetJobStatsRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::getJobStats, + options, + GetJobStatsResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Gets one or more Machine Learning job configuration info, asynchronously. + * + *

+ * For additional info + * see Get Job stats docs + *

+ * @param request {@link GetJobStatsRequest} Request containing a list of jobId(s) and additional options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified with {@link GetJobStatsResponse} upon request completion + */ + public void getJobStatsAsync(GetJobStatsRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::getJobStats, + options, + GetJobStatsResponse::fromXContent, + listener, + Collections.emptySet()); + } + /** * Deletes the given Machine Learning Job *

* For additional info * see ML Delete Job documentation *

- * @param request the request to delete the job + * @param request The request to delete the job * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return action acknowledgement * @throws IOException when there is a serialization issue sending the request or receiving the response @@ -107,7 +211,7 @@ public final class MachineLearningClient { * For additional info * see ML Delete Job documentation *

- * @param request the request to delete the job + * @param request The request to delete the job * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion */ @@ -131,7 +235,7 @@ public final class MachineLearningClient { * For additional info * see *

- * @param request request containing job_id and additional optional options + * @param request Request containing job_id and additional optional options * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return response containing if the job was successfully opened or not. * @throws IOException when there is a serialization issue sending the request or receiving the response @@ -154,7 +258,7 @@ public final class MachineLearningClient { * For additional info * see *

- * @param request request containing job_id and additional optional options + * @param request Request containing job_id and additional optional options * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion */ @@ -166,4 +270,367 @@ public final class MachineLearningClient { listener, Collections.emptySet()); } + + /** + * Closes one or more Machine Learning Jobs. A job can be opened and closed multiple times throughout its lifecycle. + * + * A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. + * + * @param request Request containing job_ids and additional options. See {@link CloseJobRequest} + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return response containing if the job was successfully closed or not. + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public CloseJobResponse closeJob(CloseJobRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::closeJob, + options, + CloseJobResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Closes one or more Machine Learning Jobs asynchronously, notifies listener on completion + * + * A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. + * + * @param request Request containing job_ids and additional options. See {@link CloseJobRequest} + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void closeJobAsync(CloseJobRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::closeJob, + options, + CloseJobResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Flushes internally buffered data for the given Machine Learning Job ensuring all data sent to the has been processed. + * This may cause new results to be calculated depending on the contents of the buffer + * + * Both flush and close operations are similar, + * however the flush is more efficient if you are expecting to send more data for analysis. + * + * When flushing, the job remains open and is available to continue analyzing data. + * A close operation additionally prunes and persists the model state to disk and the + * job must be opened again before analyzing further data. + * + *

+ * For additional info + * see Flush ML job documentation + * + * @param request The {@link FlushJobRequest} object enclosing the `jobId` and additional request options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public FlushJobResponse flushJob(FlushJobRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::flushJob, + options, + FlushJobResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Flushes internally buffered data for the given Machine Learning Job asynchronously ensuring all data sent to the has been processed. + * This may cause new results to be calculated depending on the contents of the buffer + * + * Both flush and close operations are similar, + * however the flush is more efficient if you are expecting to send more data for analysis. + * + * When flushing, the job remains open and is available to continue analyzing data. + * A close operation additionally prunes and persists the model state to disk and the + * job must be opened again before analyzing further data. + * + *

+ * For additional info + * see Flush ML job documentation + * + * @param request The {@link FlushJobRequest} object enclosing the `jobId` and additional request options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void flushJobAsync(FlushJobRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::flushJob, + options, + FlushJobResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Creates a forecast of an existing, opened Machine Learning Job + * + * This predicts the future behavior of a time series by using its historical behavior. + * + *

+ * For additional info + * see Forecast ML Job Documentation + *

+ * @param request ForecastJobRequest with forecasting options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return response containing forecast acknowledgement and new forecast's ID + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public ForecastJobResponse forecastJob(ForecastJobRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::forecastJob, + options, + ForecastJobResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Updates a Machine Learning {@link org.elasticsearch.client.ml.job.config.Job} + * + * @param request the {@link UpdateJobRequest} object enclosing the desired updates + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return a PutJobResponse object containing the updated job object + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public PutJobResponse updateJob(UpdateJobRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::updateJob, + options, + PutJobResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Creates a forecast of an existing, opened Machine Learning Job asynchronously + * + * This predicts the future behavior of a time series by using its historical behavior. + * + *

+ * For additional info + * see Forecast ML Job Documentation + *

+ * @param request ForecastJobRequest with forecasting options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void forecastJobAsync(ForecastJobRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::forecastJob, + options, + ForecastJobResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Updates a Machine Learning {@link org.elasticsearch.client.ml.job.config.Job} asynchronously + * + * @param request the {@link UpdateJobRequest} object enclosing the desired updates + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void updateJobAsync(UpdateJobRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::updateJob, + options, + PutJobResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Gets the buckets for a Machine Learning Job. + *

+ * For additional info + * see ML GET buckets documentation + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + */ + public GetBucketsResponse getBuckets(GetBucketsRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::getBuckets, + options, + GetBucketsResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Gets the buckets for a Machine Learning Job, notifies listener once the requested buckets are retrieved. + *

+ * For additional info + * see ML GET buckets documentation + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void getBucketsAsync(GetBucketsRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::getBuckets, + options, + GetBucketsResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Gets overall buckets for a set of Machine Learning Jobs. + *

+ * For additional info + * see + * ML GET overall buckets documentation + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + */ + public GetOverallBucketsResponse getOverallBuckets(GetOverallBucketsRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::getOverallBuckets, + options, + GetOverallBucketsResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Gets overall buckets for a set of Machine Learning Jobs, notifies listener once the requested buckets are retrieved. + *

+ * For additional info + * see + * ML GET overall buckets documentation + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void getOverallBucketsAsync(GetOverallBucketsRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::getOverallBuckets, + options, + GetOverallBucketsResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Gets the records for a Machine Learning Job. + *

+ * For additional info + * see ML GET records documentation + * + * @param request the request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + */ + public GetRecordsResponse getRecords(GetRecordsRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::getRecords, + options, + GetRecordsResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Gets the records for a Machine Learning Job, notifies listener once the requested records are retrieved. + *

+ * For additional info + * see ML GET records documentation + * + * @param request the request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void getRecordsAsync(GetRecordsRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::getRecords, + options, + GetRecordsResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Sends data to an anomaly detection job for analysis. + * + * NOTE: The job must have a state of open to receive and process the data. + * + *

+ * For additional info + * see ML POST Data documentation + *

+ * + * @param request PostDataRequest containing the data to post and some additional options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return response containing operational progress about the job + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public PostDataResponse postData(PostDataRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::postData, + options, + PostDataResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Sends data to an anomaly detection job for analysis, asynchronously + * + * NOTE: The job must have a state of open to receive and process the data. + * + *

+ * For additional info + * see ML POST Data documentation + *

+ * + * @param request PostDataRequest containing the data to post and some additional options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void postDataAsync(PostDataRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::postData, + options, + PostDataResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Gets the influencers for a Machine Learning Job. + *

+ * For additional info + * see + * ML GET influencers documentation + * + * @param request the request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + */ + public GetInfluencersResponse getInfluencers(GetInfluencersRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::getInfluencers, + options, + GetInfluencersResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Gets the influencers for a Machine Learning Job, notifies listener once the requested influencers are retrieved. + *

+ * For additional info + * * see + * ML GET influencers documentation + * + * @param request the request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void getInfluencersAsync(GetInfluencersRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::getInfluencers, + options, + GetInfluencersResponse::fromXContent, + listener, + Collections.emptySet()); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationClient.java index 7da38329947..8717943d797 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationClient.java @@ -49,7 +49,7 @@ public final class MigrationClient { * @throws IOException in case there is a problem sending the request or parsing back the response */ public IndexUpgradeInfoResponse getAssistance(IndexUpgradeInfoRequest request, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::getMigrationAssistance, options, + return restHighLevelClient.performRequestAndParseEntity(request, MigrationRequestConverters::getMigrationAssistance, options, IndexUpgradeInfoResponse::fromXContent, Collections.emptySet()); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationRequestConverters.java new file mode 100644 index 00000000000..2f5309350df --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationRequestConverters.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpGet; +import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; + +public class MigrationRequestConverters { + + static Request getMigrationAssistance(IndexUpgradeInfoRequest indexUpgradeInfoRequest) { + RequestConverters.EndpointBuilder endpointBuilder = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_xpack/migration/assistance") + .addCommaSeparatedPathParts(indexUpgradeInfoRequest.indices()); + String endpoint = endpointBuilder.build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withIndicesOptions(indexUpgradeInfoRequest.indicesOptions()); + return request; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 0e5fce5b227..840bc4f0c4d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -30,21 +30,9 @@ import org.apache.http.entity.ContentType; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; -import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; -import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; -import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; -import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; -import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsRequest; -import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; -import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; -import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; -import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; -import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; -import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; @@ -76,10 +64,6 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.ingest.DeletePipelineRequest; -import org.elasticsearch.action.ingest.GetPipelineRequest; -import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.action.ingest.SimulatePipelineRequest; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.SearchRequest; @@ -88,6 +72,7 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.security.RefreshPolicy; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; @@ -106,14 +91,10 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.rankeval.RankEvalRequest; -import org.elasticsearch.protocol.xpack.XPackInfoRequest; -import org.elasticsearch.protocol.xpack.XPackUsageRequest; -import org.elasticsearch.protocol.xpack.license.DeleteLicenseRequest; -import org.elasticsearch.protocol.xpack.license.GetLicenseRequest; -import org.elasticsearch.protocol.xpack.license.PutLicenseRequest; -import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; -import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; -import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; +import org.elasticsearch.index.reindex.AbstractBulkByScrollRequest; +import org.elasticsearch.index.reindex.DeleteByQueryRequest; +import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.script.mustache.MultiSearchTemplateRequest; import org.elasticsearch.script.mustache.SearchTemplateRequest; @@ -125,10 +106,8 @@ import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.nio.charset.Charset; -import java.util.EnumSet; import java.util.Locale; import java.util.StringJoiner; -import java.util.stream.Collectors; final class RequestConverters { static final XContentType REQUEST_BODY_CONTENT_TYPE = XContentType.JSON; @@ -137,17 +116,6 @@ final class RequestConverters { // Contains only status utility methods } - static Request cancelTasks(CancelTasksRequest cancelTasksRequest) { - Request request = new Request(HttpPost.METHOD_NAME, "/_tasks/_cancel"); - Params params = new Params(request); - params.withTimeout(cancelTasksRequest.getTimeout()) - .withTaskId(cancelTasksRequest.getTaskId()) - .withNodes(cancelTasksRequest.getNodes()) - .withParentTaskId(cancelTasksRequest.getParentTaskId()) - .withActions(cancelTasksRequest.getActions()); - return request; - } - static Request delete(DeleteRequest deleteRequest) { String endpoint = endpoint(deleteRequest.index(), deleteRequest.type(), deleteRequest.id()); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); @@ -718,104 +686,71 @@ final class RequestConverters { return request; } - static Request clusterPutSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest) throws IOException { - Request request = new Request(HttpPut.METHOD_NAME, "/_cluster/settings"); + static Request reindex(ReindexRequest reindexRequest) throws IOException { + String endpoint = new EndpointBuilder().addPathPart("_reindex").build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + Params params = new Params(request) + .withRefresh(reindexRequest.isRefresh()) + .withTimeout(reindexRequest.getTimeout()) + .withWaitForActiveShards(reindexRequest.getWaitForActiveShards()); - Params parameters = new Params(request); - parameters.withTimeout(clusterUpdateSettingsRequest.timeout()); - parameters.withMasterTimeout(clusterUpdateSettingsRequest.masterNodeTimeout()); - - request.setEntity(createEntity(clusterUpdateSettingsRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request clusterGetSettings(ClusterGetSettingsRequest clusterGetSettingsRequest) throws IOException { - Request request = new Request(HttpGet.METHOD_NAME, "/_cluster/settings"); - - Params parameters = new Params(request); - parameters.withLocal(clusterGetSettingsRequest.local()); - parameters.withIncludeDefaults(clusterGetSettingsRequest.includeDefaults()); - parameters.withMasterTimeout(clusterGetSettingsRequest.masterNodeTimeout()); - - return request; - } - - static Request getPipeline(GetPipelineRequest getPipelineRequest) { - String endpoint = new EndpointBuilder() - .addPathPartAsIs("_ingest/pipeline") - .addCommaSeparatedPathParts(getPipelineRequest.getIds()) - .build(); - Request request = new Request(HttpGet.METHOD_NAME, endpoint); - - Params parameters = new Params(request); - parameters.withMasterTimeout(getPipelineRequest.masterNodeTimeout()); - return request; - } - - static Request putPipeline(PutPipelineRequest putPipelineRequest) throws IOException { - String endpoint = new EndpointBuilder() - .addPathPartAsIs("_ingest/pipeline") - .addPathPart(putPipelineRequest.getId()) - .build(); - Request request = new Request(HttpPut.METHOD_NAME, endpoint); - - Params parameters = new Params(request); - parameters.withTimeout(putPipelineRequest.timeout()); - parameters.withMasterTimeout(putPipelineRequest.masterNodeTimeout()); - - request.setEntity(createEntity(putPipelineRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request deletePipeline(DeletePipelineRequest deletePipelineRequest) { - String endpoint = new EndpointBuilder() - .addPathPartAsIs("_ingest/pipeline") - .addPathPart(deletePipelineRequest.getId()) - .build(); - Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - - Params parameters = new Params(request); - parameters.withTimeout(deletePipelineRequest.timeout()); - parameters.withMasterTimeout(deletePipelineRequest.masterNodeTimeout()); - - return request; - } - - static Request listTasks(ListTasksRequest listTaskRequest) { - if (listTaskRequest.getTaskId() != null && listTaskRequest.getTaskId().isSet()) { - throw new IllegalArgumentException("TaskId cannot be used for list tasks request"); + if (reindexRequest.getScrollTime() != null) { + params.putParam("scroll", reindexRequest.getScrollTime()); } - Request request = new Request(HttpGet.METHOD_NAME, "/_tasks"); - Params params = new Params(request); - params.withTimeout(listTaskRequest.getTimeout()) - .withDetailed(listTaskRequest.getDetailed()) - .withWaitForCompletion(listTaskRequest.getWaitForCompletion()) - .withParentTaskId(listTaskRequest.getParentTaskId()) - .withNodes(listTaskRequest.getNodes()) - .withActions(listTaskRequest.getActions()) - .putParam("group_by", "none"); + request.setEntity(createEntity(reindexRequest, REQUEST_BODY_CONTENT_TYPE)); return request; } - static Request clusterHealth(ClusterHealthRequest healthRequest) { - String[] indices = healthRequest.indices() == null ? Strings.EMPTY_ARRAY : healthRequest.indices(); - String endpoint = new EndpointBuilder() - .addPathPartAsIs("_cluster/health") - .addCommaSeparatedPathParts(indices) - .build(); - Request request = new Request(HttpGet.METHOD_NAME, endpoint); + static Request updateByQuery(UpdateByQueryRequest updateByQueryRequest) throws IOException { + String endpoint = + endpoint(updateByQueryRequest.indices(), updateByQueryRequest.getDocTypes(), "_update_by_query"); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + Params params = new Params(request) + .withRouting(updateByQueryRequest.getRouting()) + .withPipeline(updateByQueryRequest.getPipeline()) + .withRefresh(updateByQueryRequest.isRefresh()) + .withTimeout(updateByQueryRequest.getTimeout()) + .withWaitForActiveShards(updateByQueryRequest.getWaitForActiveShards()) + .withIndicesOptions(updateByQueryRequest.indicesOptions()); + if (updateByQueryRequest.isAbortOnVersionConflict() == false) { + params.putParam("conflicts", "proceed"); + } + if (updateByQueryRequest.getBatchSize() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_SIZE) { + params.putParam("scroll_size", Integer.toString(updateByQueryRequest.getBatchSize())); + } + if (updateByQueryRequest.getScrollTime() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_TIMEOUT) { + params.putParam("scroll", updateByQueryRequest.getScrollTime()); + } + if (updateByQueryRequest.getSize() > 0) { + params.putParam("size", Integer.toString(updateByQueryRequest.getSize())); + } + request.setEntity(createEntity(updateByQueryRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } - new Params(request) - .withWaitForStatus(healthRequest.waitForStatus()) - .withWaitForNoRelocatingShards(healthRequest.waitForNoRelocatingShards()) - .withWaitForNoInitializingShards(healthRequest.waitForNoInitializingShards()) - .withWaitForActiveShards(healthRequest.waitForActiveShards(), ActiveShardCount.NONE) - .withWaitForNodes(healthRequest.waitForNodes()) - .withWaitForEvents(healthRequest.waitForEvents()) - .withTimeout(healthRequest.timeout()) - .withMasterTimeout(healthRequest.masterNodeTimeout()) - .withLocal(healthRequest.local()) - .withLevel(healthRequest.level()); + static Request deleteByQuery(DeleteByQueryRequest deleteByQueryRequest) throws IOException { + String endpoint = + endpoint(deleteByQueryRequest.indices(), deleteByQueryRequest.getDocTypes(), "_delete_by_query"); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + Params params = new Params(request) + .withRouting(deleteByQueryRequest.getRouting()) + .withRefresh(deleteByQueryRequest.isRefresh()) + .withTimeout(deleteByQueryRequest.getTimeout()) + .withWaitForActiveShards(deleteByQueryRequest.getWaitForActiveShards()) + .withIndicesOptions(deleteByQueryRequest.indicesOptions()); + if (deleteByQueryRequest.isAbortOnVersionConflict() == false) { + params.putParam("conflicts", "proceed"); + } + if (deleteByQueryRequest.getBatchSize() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_SIZE) { + params.putParam("scroll_size", Integer.toString(deleteByQueryRequest.getBatchSize())); + } + if (deleteByQueryRequest.getScrollTime() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_TIMEOUT) { + params.putParam("scroll", deleteByQueryRequest.getScrollTime()); + } + if (deleteByQueryRequest.getSize() > 0) { + params.putParam("size", Integer.toString(deleteByQueryRequest.getSize())); + } + request.setEntity(createEntity(deleteByQueryRequest, REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -898,126 +833,6 @@ final class RequestConverters { return request; } - static Request getRepositories(GetRepositoriesRequest getRepositoriesRequest) { - String[] repositories = getRepositoriesRequest.repositories() == null ? Strings.EMPTY_ARRAY : getRepositoriesRequest.repositories(); - String endpoint = new EndpointBuilder().addPathPartAsIs("_snapshot").addCommaSeparatedPathParts(repositories).build(); - Request request = new Request(HttpGet.METHOD_NAME, endpoint); - - Params parameters = new Params(request); - parameters.withMasterTimeout(getRepositoriesRequest.masterNodeTimeout()); - parameters.withLocal(getRepositoriesRequest.local()); - return request; - } - - static Request createRepository(PutRepositoryRequest putRepositoryRequest) throws IOException { - String endpoint = new EndpointBuilder().addPathPart("_snapshot").addPathPart(putRepositoryRequest.name()).build(); - Request request = new Request(HttpPut.METHOD_NAME, endpoint); - - Params parameters = new Params(request); - parameters.withMasterTimeout(putRepositoryRequest.masterNodeTimeout()); - parameters.withTimeout(putRepositoryRequest.timeout()); - parameters.withVerify(putRepositoryRequest.verify()); - - request.setEntity(createEntity(putRepositoryRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request deleteRepository(DeleteRepositoryRequest deleteRepositoryRequest) { - String endpoint = new EndpointBuilder().addPathPartAsIs("_snapshot").addPathPart(deleteRepositoryRequest.name()).build(); - Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - - Params parameters = new Params(request); - parameters.withMasterTimeout(deleteRepositoryRequest.masterNodeTimeout()); - parameters.withTimeout(deleteRepositoryRequest.timeout()); - return request; - } - - static Request verifyRepository(VerifyRepositoryRequest verifyRepositoryRequest) { - String endpoint = new EndpointBuilder().addPathPartAsIs("_snapshot") - .addPathPart(verifyRepositoryRequest.name()) - .addPathPartAsIs("_verify") - .build(); - Request request = new Request(HttpPost.METHOD_NAME, endpoint); - - Params parameters = new Params(request); - parameters.withMasterTimeout(verifyRepositoryRequest.masterNodeTimeout()); - parameters.withTimeout(verifyRepositoryRequest.timeout()); - return request; - } - - static Request createSnapshot(CreateSnapshotRequest createSnapshotRequest) throws IOException { - String endpoint = new EndpointBuilder().addPathPart("_snapshot") - .addPathPart(createSnapshotRequest.repository()) - .addPathPart(createSnapshotRequest.snapshot()) - .build(); - Request request = new Request(HttpPut.METHOD_NAME, endpoint); - Params params = new Params(request); - params.withMasterTimeout(createSnapshotRequest.masterNodeTimeout()); - params.withWaitForCompletion(createSnapshotRequest.waitForCompletion()); - request.setEntity(createEntity(createSnapshotRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request getSnapshots(GetSnapshotsRequest getSnapshotsRequest) { - EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPartAsIs("_snapshot") - .addPathPart(getSnapshotsRequest.repository()); - String endpoint; - if (getSnapshotsRequest.snapshots().length == 0) { - endpoint = endpointBuilder.addPathPart("_all").build(); - } else { - endpoint = endpointBuilder.addCommaSeparatedPathParts(getSnapshotsRequest.snapshots()).build(); - } - - Request request = new Request(HttpGet.METHOD_NAME, endpoint); - - Params parameters = new Params(request); - parameters.withMasterTimeout(getSnapshotsRequest.masterNodeTimeout()); - parameters.putParam("ignore_unavailable", Boolean.toString(getSnapshotsRequest.ignoreUnavailable())); - parameters.putParam("verbose", Boolean.toString(getSnapshotsRequest.verbose())); - - return request; - } - - static Request snapshotsStatus(SnapshotsStatusRequest snapshotsStatusRequest) { - String endpoint = new EndpointBuilder().addPathPartAsIs("_snapshot") - .addPathPart(snapshotsStatusRequest.repository()) - .addCommaSeparatedPathParts(snapshotsStatusRequest.snapshots()) - .addPathPartAsIs("_status") - .build(); - Request request = new Request(HttpGet.METHOD_NAME, endpoint); - - Params parameters = new Params(request); - parameters.withMasterTimeout(snapshotsStatusRequest.masterNodeTimeout()); - parameters.withIgnoreUnavailable(snapshotsStatusRequest.ignoreUnavailable()); - return request; - } - - static Request restoreSnapshot(RestoreSnapshotRequest restoreSnapshotRequest) throws IOException { - String endpoint = new EndpointBuilder().addPathPartAsIs("_snapshot") - .addPathPart(restoreSnapshotRequest.repository()) - .addPathPart(restoreSnapshotRequest.snapshot()) - .addPathPartAsIs("_restore") - .build(); - Request request = new Request(HttpPost.METHOD_NAME, endpoint); - Params parameters = new Params(request); - parameters.withMasterTimeout(restoreSnapshotRequest.masterNodeTimeout()); - parameters.withWaitForCompletion(restoreSnapshotRequest.waitForCompletion()); - request.setEntity(createEntity(restoreSnapshotRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request deleteSnapshot(DeleteSnapshotRequest deleteSnapshotRequest) { - String endpoint = new EndpointBuilder().addPathPartAsIs("_snapshot") - .addPathPart(deleteSnapshotRequest.repository()) - .addPathPart(deleteSnapshotRequest.snapshot()) - .build(); - Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - - Params parameters = new Params(request); - parameters.withMasterTimeout(deleteSnapshotRequest.masterNodeTimeout()); - return request; - } - static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) throws IOException { String endpoint = new EndpointBuilder().addPathPartAsIs("_template").addPathPart(putIndexTemplateRequest.name()).build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); @@ -1047,20 +862,6 @@ final class RequestConverters { return request; } - static Request simulatePipeline(SimulatePipelineRequest simulatePipelineRequest) throws IOException { - EndpointBuilder builder = new EndpointBuilder().addPathPartAsIs("_ingest/pipeline"); - if (simulatePipelineRequest.getId() != null && !simulatePipelineRequest.getId().isEmpty()) { - builder.addPathPart(simulatePipelineRequest.getId()); - } - builder.addPathPartAsIs("_simulate"); - String endpoint = builder.build(); - Request request = new Request(HttpPost.METHOD_NAME, endpoint); - Params params = new Params(request); - params.putParam("verbose", Boolean.toString(simulatePipelineRequest.isVerbose())); - request.setEntity(createEntity(simulatePipelineRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - static Request getAlias(GetAliasesRequest getAliasesRequest) { String[] indices = getAliasesRequest.indices() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.indices(); String[] aliases = getAliasesRequest.aliases() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.aliases(); @@ -1082,6 +883,19 @@ final class RequestConverters { return request; } + static Request putScript(PutStoredScriptRequest putStoredScriptRequest) throws IOException { + String endpoint = new EndpointBuilder().addPathPartAsIs("_scripts").addPathPart(putStoredScriptRequest.id()).build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + Params params = new Params(request); + params.withTimeout(putStoredScriptRequest.timeout()); + params.withMasterTimeout(putStoredScriptRequest.masterNodeTimeout()); + if (Strings.hasText(putStoredScriptRequest.context())) { + params.putParam("context", putStoredScriptRequest.context()); + } + request.setEntity(createEntity(putStoredScriptRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request analyze(AnalyzeRequest request) throws IOException { EndpointBuilder builder = new EndpointBuilder(); String index = request.index(); @@ -1111,103 +925,6 @@ final class RequestConverters { return request; } - static Request xPackInfo(XPackInfoRequest infoRequest) { - Request request = new Request(HttpGet.METHOD_NAME, "/_xpack"); - if (false == infoRequest.isVerbose()) { - request.addParameter("human", "false"); - } - if (false == infoRequest.getCategories().equals(EnumSet.allOf(XPackInfoRequest.Category.class))) { - request.addParameter("categories", infoRequest.getCategories().stream() - .map(c -> c.toString().toLowerCase(Locale.ROOT)) - .collect(Collectors.joining(","))); - } - return request; - } - - static Request xPackWatcherPutWatch(PutWatchRequest putWatchRequest) { - String endpoint = new EndpointBuilder() - .addPathPartAsIs("_xpack") - .addPathPartAsIs("watcher") - .addPathPartAsIs("watch") - .addPathPart(putWatchRequest.getId()) - .build(); - - Request request = new Request(HttpPut.METHOD_NAME, endpoint); - Params params = new Params(request).withVersion(putWatchRequest.getVersion()); - if (putWatchRequest.isActive() == false) { - params.putParam("active", "false"); - } - ContentType contentType = createContentType(putWatchRequest.xContentType()); - BytesReference source = putWatchRequest.getSource(); - request.setEntity(new ByteArrayEntity(source.toBytesRef().bytes, 0, source.length(), contentType)); - return request; - } - - static Request xPackWatcherDeleteWatch(DeleteWatchRequest deleteWatchRequest) { - String endpoint = new EndpointBuilder() - .addPathPartAsIs("_xpack") - .addPathPartAsIs("watcher") - .addPathPartAsIs("watch") - .addPathPart(deleteWatchRequest.getId()) - .build(); - - Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - return request; - } - - static Request xpackUsage(XPackUsageRequest usageRequest) { - Request request = new Request(HttpGet.METHOD_NAME, "/_xpack/usage"); - Params parameters = new Params(request); - parameters.withMasterTimeout(usageRequest.masterNodeTimeout()); - return request; - } - - static Request putLicense(PutLicenseRequest putLicenseRequest) { - String endpoint = new EndpointBuilder() - .addPathPartAsIs("_xpack") - .addPathPartAsIs("license") - .build(); - Request request = new Request(HttpPut.METHOD_NAME, endpoint); - Params parameters = new Params(request); - parameters.withTimeout(putLicenseRequest.timeout()); - parameters.withMasterTimeout(putLicenseRequest.masterNodeTimeout()); - if (putLicenseRequest.isAcknowledge()) { - parameters.putParam("acknowledge", "true"); - } - request.setJsonEntity(putLicenseRequest.getLicenseDefinition()); - return request; - } - - static Request getLicense(GetLicenseRequest getLicenseRequest) { - String endpoint = new EndpointBuilder() - .addPathPartAsIs("_xpack") - .addPathPartAsIs("license") - .build(); - Request request = new Request(HttpGet.METHOD_NAME, endpoint); - Params parameters = new Params(request); - parameters.withLocal(getLicenseRequest.local()); - return request; - } - - static Request deleteLicense(DeleteLicenseRequest deleteLicenseRequest) { - Request request = new Request(HttpDelete.METHOD_NAME, "/_xpack/license"); - Params parameters = new Params(request); - parameters.withTimeout(deleteLicenseRequest.timeout()); - parameters.withMasterTimeout(deleteLicenseRequest.masterNodeTimeout()); - return request; - } - - static Request getMigrationAssistance(IndexUpgradeInfoRequest indexUpgradeInfoRequest) { - EndpointBuilder endpointBuilder = new EndpointBuilder() - .addPathPartAsIs("_xpack/migration/assistance") - .addCommaSeparatedPathParts(indexUpgradeInfoRequest.indices()); - String endpoint = endpointBuilder.build(); - Request request = new Request(HttpGet.METHOD_NAME, endpoint); - Params parameters = new Params(request); - parameters.withIndicesOptions(indexUpgradeInfoRequest.indicesOptions()); - return request; - } - static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef(); return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); @@ -1329,11 +1046,16 @@ final class RequestConverters { Params withRefresh(boolean refresh) { if (refresh) { - return withRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + return withRefreshPolicy(RefreshPolicy.IMMEDIATE); } return this; } + /** + * @deprecated If creating a new HLRC ReST API call, use {@link RefreshPolicy} + * instead of {@link WriteRequest.RefreshPolicy} from the server project + */ + @Deprecated Params withRefreshPolicy(WriteRequest.RefreshPolicy refreshPolicy) { if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) { return putParam("refresh", refreshPolicy.getValue()); @@ -1341,6 +1063,13 @@ final class RequestConverters { return this; } + Params withRefreshPolicy(RefreshPolicy refreshPolicy) { + if (refreshPolicy != RefreshPolicy.NONE) { + return putParam("refresh", refreshPolicy.getValue()); + } + return this; + } + Params withRetryOnConflict(int retryOnConflict) { if (retryOnConflict > 0) { return putParam("retry_on_conflict", String.valueOf(retryOnConflict)); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index e705ca12806..687290abe88 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -19,7 +19,6 @@ package org.elasticsearch.client; -import org.apache.http.Header; import org.apache.http.HttpEntity; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; @@ -29,6 +28,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; +import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteRequest; @@ -65,6 +65,10 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.rankeval.RankEvalRequest; import org.elasticsearch.index.rankeval.RankEvalResponse; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.DeleteByQueryRequest; +import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.plugins.spi.NamedXContentProvider; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestStatus; @@ -117,38 +121,38 @@ import org.elasticsearch.search.aggregations.bucket.terms.ParsedDoubleTerms; import org.elasticsearch.search.aggregations.bucket.terms.ParsedLongTerms; import org.elasticsearch.search.aggregations.bucket.terms.ParsedStringTerms; import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.avg.ParsedAvg; -import org.elasticsearch.search.aggregations.metrics.cardinality.CardinalityAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.cardinality.ParsedCardinality; -import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBoundsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.geobounds.ParsedGeoBounds; -import org.elasticsearch.search.aggregations.metrics.geocentroid.GeoCentroidAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.geocentroid.ParsedGeoCentroid; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.max.ParsedMax; -import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.min.ParsedMin; -import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.InternalHDRPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.InternalHDRPercentiles; -import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.ParsedHDRPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.ParsedHDRPercentiles; -import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.InternalTDigestPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.InternalTDigestPercentiles; -import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.ParsedTDigestPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.ParsedTDigestPercentiles; -import org.elasticsearch.search.aggregations.metrics.scripted.ParsedScriptedMetric; -import org.elasticsearch.search.aggregations.metrics.scripted.ScriptedMetricAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.stats.ParsedStats; -import org.elasticsearch.search.aggregations.metrics.stats.StatsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStatsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.stats.extended.ParsedExtendedStats; -import org.elasticsearch.search.aggregations.metrics.sum.ParsedSum; -import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.tophits.ParsedTopHits; -import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.valuecount.ParsedValueCount; -import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.CardinalityAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ExtendedStatsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.GeoBoundsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.GeoCentroidAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentileRanks; +import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentiles; +import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentileRanks; +import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentiles; +import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ParsedAvg; +import org.elasticsearch.search.aggregations.metrics.ParsedCardinality; +import org.elasticsearch.search.aggregations.metrics.ParsedExtendedStats; +import org.elasticsearch.search.aggregations.metrics.ParsedGeoBounds; +import org.elasticsearch.search.aggregations.metrics.ParsedGeoCentroid; +import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentileRanks; +import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentiles; +import org.elasticsearch.search.aggregations.metrics.ParsedMax; +import org.elasticsearch.search.aggregations.metrics.ParsedMin; +import org.elasticsearch.search.aggregations.metrics.ParsedScriptedMetric; +import org.elasticsearch.search.aggregations.metrics.ParsedStats; +import org.elasticsearch.search.aggregations.metrics.ParsedSum; +import org.elasticsearch.search.aggregations.metrics.ParsedTDigestPercentileRanks; +import org.elasticsearch.search.aggregations.metrics.ParsedTDigestPercentiles; +import org.elasticsearch.search.aggregations.metrics.ParsedTopHits; +import org.elasticsearch.search.aggregations.metrics.ParsedValueCount; +import org.elasticsearch.search.aggregations.metrics.ScriptedMetricAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.StatsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.TopHitsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; import org.elasticsearch.search.aggregations.pipeline.ParsedSimpleValue; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue; @@ -177,6 +181,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.ServiceLoader; import java.util.Set; import java.util.function.Function; @@ -209,9 +214,11 @@ public class RestHighLevelClient implements Closeable { private final TasksClient tasksClient = new TasksClient(this); private final XPackClient xPackClient = new XPackClient(this); private final WatcherClient watcherClient = new WatcherClient(this); + private final GraphClient graphClient = new GraphClient(this); private final LicenseClient licenseClient = new LicenseClient(this); private final MigrationClient migrationClient = new MigrationClient(this); private final MachineLearningClient machineLearningClient = new MachineLearningClient(this); + private final SecurityClient securityClient = new SecurityClient(this); /** * Creates a {@link RestHighLevelClient} given the low level {@link RestClientBuilder} that allows to build the @@ -325,6 +332,16 @@ public class RestHighLevelClient implements Closeable { */ public WatcherClient watcher() { return watcherClient; } + /** + * Provides methods for accessing the Elastic Licensed Graph explore API that + * is shipped with the default distribution of Elasticsearch. All of + * these APIs will 404 if run against the OSS distribution of Elasticsearch. + *

+ * See the + * Graph API on elastic.co for more information. + */ + public GraphClient graph() { return graphClient; } + /** * Provides methods for accessing the Elastic Licensed Licensing APIs that * are shipped with the default distribution of Elasticsearch. All of @@ -361,6 +378,20 @@ public class RestHighLevelClient implements Closeable { return machineLearningClient; } + /** + * Provides methods for accessing the Elastic Licensed Security APIs that + * are shipped with the Elastic Stack distribution of Elasticsearch. All of + * these APIs will 404 if run against the OSS distribution of Elasticsearch. + *

+ * See the + * Security APIs on elastic.co for more information. + * + * @return the client wrapper for making Security API calls + */ + public SecurityClient security() { + return securityClient; + } + /** * Executes a bulk request using the Bulk API. * See Bulk API on elastic.co @@ -384,6 +415,91 @@ public class RestHighLevelClient implements Closeable { performRequestAsyncAndParseEntity(bulkRequest, RequestConverters::bulk, options, BulkResponse::fromXContent, listener, emptySet()); } + /** + * Executes a reindex request. + * See Reindex API on elastic.co + * @param reindexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final BulkByScrollResponse reindex(ReindexRequest reindexRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity( + reindexRequest, RequestConverters::reindex, options, BulkByScrollResponse::fromXContent, emptySet() + ); + } + + /** + * Asynchronously executes a reindex request. + * See Reindex API on elastic.co + * @param reindexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void reindexAsync(ReindexRequest reindexRequest, RequestOptions options, ActionListener listener) { + performRequestAsyncAndParseEntity( + reindexRequest, RequestConverters::reindex, options, BulkByScrollResponse::fromXContent, listener, emptySet() + ); + } + + /** + * Executes a update by query request. + * See + * Update By Query API on elastic.co + * @param updateByQueryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final BulkByScrollResponse updateByQuery(UpdateByQueryRequest updateByQueryRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity( + updateByQueryRequest, RequestConverters::updateByQuery, options, BulkByScrollResponse::fromXContent, emptySet() + ); + } + + /** + * Asynchronously executes an update by query request. + * See + * Update By Query API on elastic.co + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void updateByQueryAsync(UpdateByQueryRequest reindexRequest, RequestOptions options, + ActionListener listener) { + performRequestAsyncAndParseEntity( + reindexRequest, RequestConverters::updateByQuery, options, BulkByScrollResponse::fromXContent, listener, emptySet() + ); + } + + /** + * Executes a delete by query request. + * See + * Delete By Query API on elastic.co + * @param deleteByQueryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final BulkByScrollResponse deleteByQuery(DeleteByQueryRequest deleteByQueryRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity( + deleteByQueryRequest, RequestConverters::deleteByQuery, options, BulkByScrollResponse::fromXContent, emptySet() + ); + } + + /** + * Asynchronously executes a delete by query request. + * See + * Delete By Query API on elastic.co + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void deleteByQueryAsync(DeleteByQueryRequest reindexRequest, RequestOptions options, + ActionListener listener) { + performRequestAsyncAndParseEntity( + reindexRequest, RequestConverters::deleteByQuery, options, BulkByScrollResponse::fromXContent, listener, emptySet() + ); + } + /** * Pings the remote Elasticsearch cluster and returns true if the ping succeeded, false otherwise * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized @@ -935,6 +1051,35 @@ public class RestHighLevelClient implements Closeable { AcknowledgedResponse::fromXContent, listener, emptySet()); } + /** + * Puts an stored script using the Scripting API. + * See Scripting API + * on elastic.co + * @param putStoredScriptRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse putScript(PutStoredScriptRequest putStoredScriptRequest, + RequestOptions options) throws IOException { + return performRequestAndParseEntity(putStoredScriptRequest, RequestConverters::putScript, options, + AcknowledgedResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously puts an stored script using the Scripting API. + * See Scripting API + * on elastic.co + * @param putStoredScriptRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void putScriptAsync(PutStoredScriptRequest putStoredScriptRequest, RequestOptions options, + ActionListener listener) { + performRequestAsyncAndParseEntity(putStoredScriptRequest, RequestConverters::putScript, options, + AcknowledgedResponse::fromXContent, listener, emptySet()); + } + /** * Asynchronously executes a request using the Field Capabilities API. * See Field Capabilities API @@ -949,6 +1094,11 @@ public class RestHighLevelClient implements Closeable { FieldCapabilitiesResponse::fromXContent, listener, emptySet()); } + /** + * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation + * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. + */ + @Deprecated protected final Resp performRequestAndParseEntity(Req request, CheckedFunction requestConverter, RequestOptions options, @@ -958,15 +1108,58 @@ public class RestHighLevelClient implements Closeable { response -> parseEntity(response.getEntity(), entityParser), ignores); } + /** + * Defines a helper method for performing a request and then parsing the returned entity using the provided entityParser. + */ + protected final Resp performRequestAndParseEntity(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction entityParser, + Set ignores) throws IOException { + return performRequest(request, requestConverter, options, + response -> parseEntity(response.getEntity(), entityParser), ignores); + } + + /** + * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation + * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. + */ + @Deprecated protected final Resp performRequest(Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - Set ignores) throws IOException { + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + Set ignores) throws IOException { ActionRequestValidationException validationException = request.validate(); - if (validationException != null) { + if (validationException != null && validationException.validationErrors().isEmpty() == false) { throw validationException; } + return internalPerformRequest(request, requestConverter, options, responseConverter, ignores); + } + + /** + * Defines a helper method for performing a request. + */ + protected final Resp performRequest(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + Set ignores) throws IOException { + Optional validationException = request.validate(); + if (validationException != null && validationException.isPresent()) { + throw validationException.get(); + } + return internalPerformRequest(request, requestConverter, options, responseConverter, ignores); + } + + /** + * Provides common functionality for performing a request. + */ + private Resp internalPerformRequest(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + Set ignores) throws IOException { Request req = requestConverter.apply(request); req.setOptions(options); Response response; @@ -994,25 +1187,75 @@ public class RestHighLevelClient implements Closeable { } } + /** + * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation + * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. + */ + @Deprecated protected final void performRequestAsyncAndParseEntity(Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction entityParser, - ActionListener listener, Set ignores) { + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction entityParser, + ActionListener listener, Set ignores) { performRequestAsync(request, requestConverter, options, response -> parseEntity(response.getEntity(), entityParser), listener, ignores); } + /** + * Defines a helper method for asynchronously performing a request. + */ + protected final void performRequestAsyncAndParseEntity(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction entityParser, + ActionListener listener, Set ignores) { + performRequestAsync(request, requestConverter, options, + response -> parseEntity(response.getEntity(), entityParser), listener, ignores); + } + + + /** + * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation + * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. + */ + @Deprecated protected final void performRequestAsync(Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - ActionListener listener, Set ignores) { + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + ActionListener listener, Set ignores) { ActionRequestValidationException validationException = request.validate(); - if (validationException != null) { + if (validationException != null && validationException.validationErrors().isEmpty() == false) { listener.onFailure(validationException); return; } + internalPerformRequestAsync(request, requestConverter, options, responseConverter, listener, ignores); + } + + /** + * Defines a helper method for asynchronously performing a request. + */ + protected final void performRequestAsync(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + ActionListener listener, Set ignores) { + Optional validationException = request.validate(); + if (validationException != null && validationException.isPresent()) { + listener.onFailure(validationException.get()); + return; + } + internalPerformRequestAsync(request, requestConverter, options, responseConverter, listener, ignores); + } + + /** + * Provides common functionality for asynchronously performing a request. + */ + private void internalPerformRequestAsync(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + ActionListener listener, Set ignores) { Request req; try { req = requestConverter.apply(request); @@ -1026,6 +1269,7 @@ public class RestHighLevelClient implements Closeable { client.performRequestAsync(req, responseListener); } + final ResponseListener wrapResponseListener(CheckedFunction responseConverter, ActionListener actionListener, Set ignores) { return new ResponseListener() { @@ -1108,15 +1352,6 @@ public class RestHighLevelClient implements Closeable { } } - private static RequestOptions optionsForHeaders(Header[] headers) { - RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); - for (Header header : headers) { - Objects.requireNonNull(header, "header cannot be null"); - options.addHeader(header.getName(), header.getValue()); - } - return options.build(); - } - static boolean convertExistsResponse(Response response) { return response.getStatusLine().getStatusCode() == 200; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java new file mode 100644 index 00000000000..a4bc34004c2 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java @@ -0,0 +1,128 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.security.DisableUserRequest; +import org.elasticsearch.client.security.EnableUserRequest; +import org.elasticsearch.client.security.PutUserRequest; +import org.elasticsearch.client.security.PutUserResponse; +import org.elasticsearch.client.security.EmptyResponse; + +import java.io.IOException; + +import static java.util.Collections.emptySet; + +/** + * A wrapper for the {@link RestHighLevelClient} that provides methods for accessing the Security APIs. + *

+ * See Security APIs on elastic.co + */ +public final class SecurityClient { + + private final RestHighLevelClient restHighLevelClient; + + SecurityClient(RestHighLevelClient restHighLevelClient) { + this.restHighLevelClient = restHighLevelClient; + } + + /** + * Create/update a user in the native realm synchronously. + * See + * the docs for more. + * @param request the request with the user's information + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response from the put user call + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public PutUserResponse putUser(PutUserRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, SecurityRequestConverters::putUser, options, + PutUserResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously create/update a user in the native realm. + * See + * the docs for more. + * @param request the request with the user's information + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void putUserAsync(PutUserRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::putUser, options, + PutUserResponse::fromXContent, listener, emptySet()); + } + + /** + * Enable a native realm or built-in user synchronously. + * See + * the docs for more. + * @param request the request with the user to enable + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response from the enable user call + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public EmptyResponse enableUser(EnableUserRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, SecurityRequestConverters::enableUser, options, + EmptyResponse::fromXContent, emptySet()); + } + + /** + * Enable a native realm or built-in user asynchronously. + * See + * the docs for more. + * @param request the request with the user to enable + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void enableUserAsync(EnableUserRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::enableUser, options, + EmptyResponse::fromXContent, listener, emptySet()); + } + + /** + * Disable a native realm or built-in user synchronously. + * See + * the docs for more. + * @param request the request with the user to disable + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response from the enable user call + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public EmptyResponse disableUser(DisableUserRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, SecurityRequestConverters::disableUser, options, + EmptyResponse::fromXContent, emptySet()); + } + + /** + * Disable a native realm or built-in user asynchronously. + * See + * the docs for more. + * @param request the request with the user to disable + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void disableUserAsync(DisableUserRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::disableUser, options, + EmptyResponse::fromXContent, listener, emptySet()); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java new file mode 100644 index 00000000000..8533e0f1b4c --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.security.DisableUserRequest; +import org.elasticsearch.client.security.EnableUserRequest; +import org.elasticsearch.client.security.PutUserRequest; +import org.elasticsearch.client.security.SetUserEnabledRequest; + +import java.io.IOException; + +import static org.elasticsearch.client.RequestConverters.REQUEST_BODY_CONTENT_TYPE; +import static org.elasticsearch.client.RequestConverters.createEntity; + +final class SecurityRequestConverters { + + private SecurityRequestConverters() {} + + static Request putUser(PutUserRequest putUserRequest) throws IOException { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_xpack/security/user") + .addPathPart(putUserRequest.getUsername()) + .build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + request.setEntity(createEntity(putUserRequest, REQUEST_BODY_CONTENT_TYPE)); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withRefreshPolicy(putUserRequest.getRefreshPolicy()); + return request; + } + + static Request enableUser(EnableUserRequest enableUserRequest) { + return setUserEnabled(enableUserRequest); + } + + static Request disableUser(DisableUserRequest disableUserRequest) { + return setUserEnabled(disableUserRequest); + } + + private static Request setUserEnabled(SetUserEnabledRequest setUserEnabledRequest) { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_xpack/security/user") + .addPathPart(setUserEnabledRequest.getUsername()) + .addPathPart(setUserEnabledRequest.isEnabled() ? "_enable" : "_disable") + .build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withRefreshPolicy(setUserEnabledRequest.getRefreshPolicy()); + return request; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java index 7df0df4836d..f3a49f06459 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java @@ -65,7 +65,7 @@ public final class SnapshotClient { */ public GetRepositoriesResponse getRepository(GetRepositoriesRequest getRepositoriesRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(getRepositoriesRequest, RequestConverters::getRepositories, options, + return restHighLevelClient.performRequestAndParseEntity(getRepositoriesRequest, SnapshotRequestConverters::getRepositories, options, GetRepositoriesResponse::fromXContent, emptySet()); } @@ -80,7 +80,7 @@ public final class SnapshotClient { */ public void getRepositoryAsync(GetRepositoriesRequest getRepositoriesRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getRepositoriesRequest, RequestConverters::getRepositories, options, + restHighLevelClient.performRequestAsyncAndParseEntity(getRepositoriesRequest, SnapshotRequestConverters::getRepositories, options, GetRepositoriesResponse::fromXContent, listener, emptySet()); } @@ -94,7 +94,7 @@ public final class SnapshotClient { * @throws IOException in case there is a problem sending the request or parsing back the response */ public AcknowledgedResponse createRepository(PutRepositoryRequest putRepositoryRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(putRepositoryRequest, RequestConverters::createRepository, options, + return restHighLevelClient.performRequestAndParseEntity(putRepositoryRequest, SnapshotRequestConverters::createRepository, options, AcknowledgedResponse::fromXContent, emptySet()); } @@ -108,7 +108,7 @@ public final class SnapshotClient { */ public void createRepositoryAsync(PutRepositoryRequest putRepositoryRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(putRepositoryRequest, RequestConverters::createRepository, options, + restHighLevelClient.performRequestAsyncAndParseEntity(putRepositoryRequest, SnapshotRequestConverters::createRepository, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -123,8 +123,8 @@ public final class SnapshotClient { */ public AcknowledgedResponse deleteRepository(DeleteRepositoryRequest deleteRepositoryRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(deleteRepositoryRequest, RequestConverters::deleteRepository, options, - AcknowledgedResponse::fromXContent, emptySet()); + return restHighLevelClient.performRequestAndParseEntity(deleteRepositoryRequest, SnapshotRequestConverters::deleteRepository, + options, AcknowledgedResponse::fromXContent, emptySet()); } /** @@ -137,7 +137,7 @@ public final class SnapshotClient { */ public void deleteRepositoryAsync(DeleteRepositoryRequest deleteRepositoryRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(deleteRepositoryRequest, RequestConverters::deleteRepository, options, + restHighLevelClient.performRequestAsyncAndParseEntity(deleteRepositoryRequest, SnapshotRequestConverters::deleteRepository, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -152,8 +152,8 @@ public final class SnapshotClient { */ public VerifyRepositoryResponse verifyRepository(VerifyRepositoryRequest verifyRepositoryRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(verifyRepositoryRequest, RequestConverters::verifyRepository, options, - VerifyRepositoryResponse::fromXContent, emptySet()); + return restHighLevelClient.performRequestAndParseEntity(verifyRepositoryRequest, SnapshotRequestConverters::verifyRepository, + options, VerifyRepositoryResponse::fromXContent, emptySet()); } /** @@ -166,7 +166,7 @@ public final class SnapshotClient { */ public void verifyRepositoryAsync(VerifyRepositoryRequest verifyRepositoryRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(verifyRepositoryRequest, RequestConverters::verifyRepository, options, + restHighLevelClient.performRequestAsyncAndParseEntity(verifyRepositoryRequest, SnapshotRequestConverters::verifyRepository, options, VerifyRepositoryResponse::fromXContent, listener, emptySet()); } @@ -178,7 +178,7 @@ public final class SnapshotClient { */ public CreateSnapshotResponse create(CreateSnapshotRequest createSnapshotRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(createSnapshotRequest, RequestConverters::createSnapshot, options, + return restHighLevelClient.performRequestAndParseEntity(createSnapshotRequest, SnapshotRequestConverters::createSnapshot, options, CreateSnapshotResponse::fromXContent, emptySet()); } @@ -190,7 +190,7 @@ public final class SnapshotClient { */ public void createAsync(CreateSnapshotRequest createSnapshotRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(createSnapshotRequest, RequestConverters::createSnapshot, options, + restHighLevelClient.performRequestAsyncAndParseEntity(createSnapshotRequest, SnapshotRequestConverters::createSnapshot, options, CreateSnapshotResponse::fromXContent, listener, emptySet()); } @@ -205,7 +205,7 @@ public final class SnapshotClient { * @throws IOException in case there is a problem sending the request or parsing back the response */ public GetSnapshotsResponse get(GetSnapshotsRequest getSnapshotsRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(getSnapshotsRequest, RequestConverters::getSnapshots, options, + return restHighLevelClient.performRequestAndParseEntity(getSnapshotsRequest, SnapshotRequestConverters::getSnapshots, options, GetSnapshotsResponse::fromXContent, emptySet()); } @@ -219,7 +219,7 @@ public final class SnapshotClient { * @param listener the listener to be notified upon request completion */ public void getAsync(GetSnapshotsRequest getSnapshotsRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getSnapshotsRequest, RequestConverters::getSnapshots, options, + restHighLevelClient.performRequestAsyncAndParseEntity(getSnapshotsRequest, SnapshotRequestConverters::getSnapshots, options, GetSnapshotsResponse::fromXContent, listener, emptySet()); } @@ -234,7 +234,7 @@ public final class SnapshotClient { */ public SnapshotsStatusResponse status(SnapshotsStatusRequest snapshotsStatusRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(snapshotsStatusRequest, RequestConverters::snapshotsStatus, options, + return restHighLevelClient.performRequestAndParseEntity(snapshotsStatusRequest, SnapshotRequestConverters::snapshotsStatus, options, SnapshotsStatusResponse::fromXContent, emptySet()); } @@ -248,7 +248,7 @@ public final class SnapshotClient { */ public void statusAsync(SnapshotsStatusRequest snapshotsStatusRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(snapshotsStatusRequest, RequestConverters::snapshotsStatus, options, + restHighLevelClient.performRequestAsyncAndParseEntity(snapshotsStatusRequest, SnapshotRequestConverters::snapshotsStatus, options, SnapshotsStatusResponse::fromXContent, listener, emptySet()); } @@ -263,7 +263,7 @@ public final class SnapshotClient { * @throws IOException in case there is a problem sending the request or parsing back the response */ public RestoreSnapshotResponse restore(RestoreSnapshotRequest restoreSnapshotRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(restoreSnapshotRequest, RequestConverters::restoreSnapshot, options, + return restHighLevelClient.performRequestAndParseEntity(restoreSnapshotRequest, SnapshotRequestConverters::restoreSnapshot, options, RestoreSnapshotResponse::fromXContent, emptySet()); } @@ -278,7 +278,7 @@ public final class SnapshotClient { */ public void restoreAsync(RestoreSnapshotRequest restoreSnapshotRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(restoreSnapshotRequest, RequestConverters::restoreSnapshot, options, + restHighLevelClient.performRequestAsyncAndParseEntity(restoreSnapshotRequest, SnapshotRequestConverters::restoreSnapshot, options, RestoreSnapshotResponse::fromXContent, listener, emptySet()); } @@ -293,7 +293,7 @@ public final class SnapshotClient { * @throws IOException in case there is a problem sending the request or parsing back the response */ public AcknowledgedResponse delete(DeleteSnapshotRequest deleteSnapshotRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(deleteSnapshotRequest, RequestConverters::deleteSnapshot, options, + return restHighLevelClient.performRequestAndParseEntity(deleteSnapshotRequest, SnapshotRequestConverters::deleteSnapshot, options, AcknowledgedResponse::fromXContent, emptySet()); } @@ -308,7 +308,7 @@ public final class SnapshotClient { */ public void deleteAsync(DeleteSnapshotRequest deleteSnapshotRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(deleteSnapshotRequest, RequestConverters::deleteSnapshot, options, + restHighLevelClient.performRequestAsyncAndParseEntity(deleteSnapshotRequest, SnapshotRequestConverters::deleteSnapshot, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotRequestConverters.java new file mode 100644 index 00000000000..7ddd0892585 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotRequestConverters.java @@ -0,0 +1,162 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; +import org.elasticsearch.common.Strings; + +import java.io.IOException; + +public class SnapshotRequestConverters { + + static Request getRepositories(GetRepositoriesRequest getRepositoriesRequest) { + String[] repositories = getRepositoriesRequest.repositories() == null ? Strings.EMPTY_ARRAY : getRepositoriesRequest.repositories(); + String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_snapshot").addCommaSeparatedPathParts(repositories) + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withMasterTimeout(getRepositoriesRequest.masterNodeTimeout()); + parameters.withLocal(getRepositoriesRequest.local()); + return request; + } + + static Request createRepository(PutRepositoryRequest putRepositoryRequest) throws IOException { + String endpoint = new RequestConverters.EndpointBuilder().addPathPart("_snapshot").addPathPart(putRepositoryRequest.name()).build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withMasterTimeout(putRepositoryRequest.masterNodeTimeout()); + parameters.withTimeout(putRepositoryRequest.timeout()); + parameters.withVerify(putRepositoryRequest.verify()); + + request.setEntity(RequestConverters.createEntity(putRepositoryRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request deleteRepository(DeleteRepositoryRequest deleteRepositoryRequest) { + String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_snapshot").addPathPart(deleteRepositoryRequest.name()) + .build(); + Request request = new Request(HttpDelete.METHOD_NAME, endpoint); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withMasterTimeout(deleteRepositoryRequest.masterNodeTimeout()); + parameters.withTimeout(deleteRepositoryRequest.timeout()); + return request; + } + + static Request verifyRepository(VerifyRepositoryRequest verifyRepositoryRequest) { + String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_snapshot") + .addPathPart(verifyRepositoryRequest.name()) + .addPathPartAsIs("_verify") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withMasterTimeout(verifyRepositoryRequest.masterNodeTimeout()); + parameters.withTimeout(verifyRepositoryRequest.timeout()); + return request; + } + + static Request createSnapshot(CreateSnapshotRequest createSnapshotRequest) throws IOException { + String endpoint = new RequestConverters.EndpointBuilder().addPathPart("_snapshot") + .addPathPart(createSnapshotRequest.repository()) + .addPathPart(createSnapshotRequest.snapshot()) + .build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(createSnapshotRequest.masterNodeTimeout()); + params.withWaitForCompletion(createSnapshotRequest.waitForCompletion()); + request.setEntity(RequestConverters.createEntity(createSnapshotRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request getSnapshots(GetSnapshotsRequest getSnapshotsRequest) { + RequestConverters.EndpointBuilder endpointBuilder = new RequestConverters.EndpointBuilder().addPathPartAsIs("_snapshot") + .addPathPart(getSnapshotsRequest.repository()); + String endpoint; + if (getSnapshotsRequest.snapshots().length == 0) { + endpoint = endpointBuilder.addPathPart("_all").build(); + } else { + endpoint = endpointBuilder.addCommaSeparatedPathParts(getSnapshotsRequest.snapshots()).build(); + } + + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withMasterTimeout(getSnapshotsRequest.masterNodeTimeout()); + parameters.putParam("ignore_unavailable", Boolean.toString(getSnapshotsRequest.ignoreUnavailable())); + parameters.putParam("verbose", Boolean.toString(getSnapshotsRequest.verbose())); + + return request; + } + + static Request snapshotsStatus(SnapshotsStatusRequest snapshotsStatusRequest) { + String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_snapshot") + .addPathPart(snapshotsStatusRequest.repository()) + .addCommaSeparatedPathParts(snapshotsStatusRequest.snapshots()) + .addPathPartAsIs("_status") + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withMasterTimeout(snapshotsStatusRequest.masterNodeTimeout()); + parameters.withIgnoreUnavailable(snapshotsStatusRequest.ignoreUnavailable()); + return request; + } + + static Request restoreSnapshot(RestoreSnapshotRequest restoreSnapshotRequest) throws IOException { + String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_snapshot") + .addPathPart(restoreSnapshotRequest.repository()) + .addPathPart(restoreSnapshotRequest.snapshot()) + .addPathPartAsIs("_restore") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withMasterTimeout(restoreSnapshotRequest.masterNodeTimeout()); + parameters.withWaitForCompletion(restoreSnapshotRequest.waitForCompletion()); + request.setEntity(RequestConverters.createEntity(restoreSnapshotRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request deleteSnapshot(DeleteSnapshotRequest deleteSnapshotRequest) { + String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_snapshot") + .addPathPart(deleteSnapshotRequest.repository()) + .addPathPart(deleteSnapshotRequest.snapshot()) + .build(); + Request request = new Request(HttpDelete.METHOD_NAME, endpoint); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withMasterTimeout(deleteSnapshotRequest.masterNodeTimeout()); + return request; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java index ebba636b8fa..3b957b2defb 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java @@ -51,7 +51,7 @@ public final class TasksClient { * @throws IOException in case there is a problem sending the request or parsing back the response */ public ListTasksResponse list(ListTasksRequest request, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::listTasks, options, + return restHighLevelClient.performRequestAndParseEntity(request, TasksRequestConverters::listTasks, options, ListTasksResponse::fromXContent, emptySet()); } @@ -64,7 +64,7 @@ public final class TasksClient { * @param listener the listener to be notified upon request completion */ public void listAsync(ListTasksRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::listTasks, options, + restHighLevelClient.performRequestAsyncAndParseEntity(request, TasksRequestConverters::listTasks, options, ListTasksResponse::fromXContent, listener, emptySet()); } @@ -82,7 +82,7 @@ public final class TasksClient { public CancelTasksResponse cancel(CancelTasksRequest cancelTasksRequest, RequestOptions options ) throws IOException { return restHighLevelClient.performRequestAndParseEntity( cancelTasksRequest, - RequestConverters::cancelTasks, + TasksRequestConverters::cancelTasks, options, CancelTasksResponse::fromXContent, emptySet() @@ -101,7 +101,7 @@ public final class TasksClient { public void cancelAsync(CancelTasksRequest cancelTasksRequest, RequestOptions options, ActionListener listener) { restHighLevelClient.performRequestAsyncAndParseEntity( cancelTasksRequest, - RequestConverters::cancelTasks, + TasksRequestConverters::cancelTasks, options, CancelTasksResponse::fromXContent, listener, diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java new file mode 100644 index 00000000000..93b407a82fe --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; + +public class TasksRequestConverters { + + static Request cancelTasks(CancelTasksRequest cancelTasksRequest) { + Request request = new Request(HttpPost.METHOD_NAME, "/_tasks/_cancel"); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withTimeout(cancelTasksRequest.getTimeout()) + .withTaskId(cancelTasksRequest.getTaskId()) + .withNodes(cancelTasksRequest.getNodes()) + .withParentTaskId(cancelTasksRequest.getParentTaskId()) + .withActions(cancelTasksRequest.getActions()); + return request; + } + + static Request listTasks(ListTasksRequest listTaskRequest) { + if (listTaskRequest.getTaskId() != null && listTaskRequest.getTaskId().isSet()) { + throw new IllegalArgumentException("TaskId cannot be used for list tasks request"); + } + Request request = new Request(HttpGet.METHOD_NAME, "/_tasks"); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withTimeout(listTaskRequest.getTimeout()) + .withDetailed(listTaskRequest.getDetailed()) + .withWaitForCompletion(listTaskRequest.getWaitForCompletion()) + .withParentTaskId(listTaskRequest.getParentTaskId()) + .withNodes(listTaskRequest.getNodes()) + .withActions(listTaskRequest.getActions()) + .putParam("group_by", "none"); + return request; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java new file mode 100644 index 00000000000..af8fbe3e72b --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import org.elasticsearch.common.unit.TimeValue; + +/** + * A base request for any requests that supply timeouts. + * + * Please note, any requests that use a ackTimeout should set timeout as they + * represent the same backing field on the server. + */ +public class TimedRequest implements Validatable { + + private TimeValue timeout; + private TimeValue masterTimeout; + + public void setTimeout(TimeValue timeout) { + this.timeout = timeout; + + } + + public void setMasterTimeout(TimeValue masterTimeout) { + this.masterTimeout = masterTimeout; + } + + /** + * Returns the request timeout + */ + public TimeValue timeout() { + return timeout; + } + + /** + * Returns the timeout for the request to be completed on the master node + */ + public TimeValue masterNodeTimeout() { + return masterTimeout; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java new file mode 100644 index 00000000000..fe4a1fc42cb --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import java.util.Optional; + +/** + * Defines a validation layer for Requests. + */ +public interface Validatable { + /** + * Perform validation. This method does not have to be overridden in the event that no validation needs to be done, + * or the validation was done during object construction time. A {@link ValidationException} that is not null is + * assumed to contain validation errors and will be thrown. + * + * @return An {@link Optional} {@link ValidationException} that contains a list of validation errors. + */ + default Optional validate() { + return Optional.empty(); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java new file mode 100644 index 00000000000..6b5d738d675 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import java.util.ArrayList; +import java.util.List; + +/** + * Encapsulates an accumulation of validation errors + */ +public class ValidationException extends IllegalArgumentException { + private final List validationErrors = new ArrayList<>(); + + /** + * Add a new validation error to the accumulating validation errors + * @param error the error to add + */ + public void addValidationError(String error) { + validationErrors.add(error); + } + + /** + * Returns the validation errors accumulated + */ + public final List validationErrors() { + return validationErrors; + } + + @Override + public final String getMessage() { + StringBuilder sb = new StringBuilder(); + sb.append("Validation Failed: "); + int index = 0; + for (String error : validationErrors) { + sb.append(++index).append(": ").append(error).append(";"); + } + return sb.toString(); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java index 48487926f02..b1a3eb3f87b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java @@ -47,7 +47,7 @@ public final class WatcherClient { * @throws IOException in case there is a problem sending the request or parsing back the response */ public PutWatchResponse putWatch(PutWatchRequest request, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::xPackWatcherPutWatch, options, + return restHighLevelClient.performRequestAndParseEntity(request, WatcherRequestConverters::putWatch, options, PutWatchResponse::fromXContent, emptySet()); } @@ -61,7 +61,7 @@ public final class WatcherClient { */ public void putWatchAsync(PutWatchRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::xPackWatcherPutWatch, options, + restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::putWatch, options, PutWatchResponse::fromXContent, listener, emptySet()); } @@ -75,7 +75,7 @@ public final class WatcherClient { * @throws IOException in case there is a problem sending the request or parsing back the response */ public DeleteWatchResponse deleteWatch(DeleteWatchRequest request, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::xPackWatcherDeleteWatch, options, + return restHighLevelClient.performRequestAndParseEntity(request, WatcherRequestConverters::deleteWatch, options, DeleteWatchResponse::fromXContent, singleton(404)); } @@ -88,7 +88,7 @@ public final class WatcherClient { * @param listener the listener to be notified upon request completion */ public void deleteWatchAsync(DeleteWatchRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::xPackWatcherDeleteWatch, options, + restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::deleteWatch, options, DeleteWatchResponse::fromXContent, listener, singleton(404)); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherRequestConverters.java new file mode 100644 index 00000000000..3b52d1c7b99 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherRequestConverters.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpPut; +import org.apache.http.entity.ByteArrayEntity; +import org.apache.http.entity.ContentType; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; +import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; + +public class WatcherRequestConverters { + + static Request putWatch(PutWatchRequest putWatchRequest) { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("watcher") + .addPathPartAsIs("watch") + .addPathPart(putWatchRequest.getId()) + .build(); + + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(request).withVersion(putWatchRequest.getVersion()); + if (putWatchRequest.isActive() == false) { + params.putParam("active", "false"); + } + ContentType contentType = RequestConverters.createContentType(putWatchRequest.xContentType()); + BytesReference source = putWatchRequest.getSource(); + request.setEntity(new ByteArrayEntity(source.toBytesRef().bytes, 0, source.length(), contentType)); + return request; + } + + static Request deleteWatch(DeleteWatchRequest deleteWatchRequest) { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("watcher") + .addPathPartAsIs("watch") + .addPathPart(deleteWatchRequest.getId()) + .build(); + + Request request = new Request(HttpDelete.METHOD_NAME, endpoint); + return request; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java index 2af49ba1a1b..9cd8413fa79 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java @@ -56,7 +56,7 @@ public final class XPackClient { * @throws IOException in case there is a problem sending the request or parsing back the response */ public XPackInfoResponse info(XPackInfoRequest request, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::xPackInfo, options, + return restHighLevelClient.performRequestAndParseEntity(request, XPackRequestConverters::info, options, XPackInfoResponse::fromXContent, emptySet()); } @@ -70,7 +70,7 @@ public final class XPackClient { */ public void infoAsync(XPackInfoRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::xPackInfo, options, + restHighLevelClient.performRequestAsyncAndParseEntity(request, XPackRequestConverters::info, options, XPackInfoResponse::fromXContent, listener, emptySet()); } @@ -81,7 +81,7 @@ public final class XPackClient { * @throws IOException in case there is a problem sending the request or parsing back the response */ public XPackUsageResponse usage(XPackUsageRequest request, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::xpackUsage, options, + return restHighLevelClient.performRequestAndParseEntity(request, XPackRequestConverters::usage, options, XPackUsageResponse::fromXContent, emptySet()); } @@ -91,7 +91,7 @@ public final class XPackClient { * @param listener the listener to be notified upon request completion */ public void usageAsync(XPackUsageRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::xpackUsage, options, + restHighLevelClient.performRequestAsyncAndParseEntity(request, XPackRequestConverters::usage, options, XPackUsageResponse::fromXContent, listener, emptySet()); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackRequestConverters.java new file mode 100644 index 00000000000..1e2e15ad97c --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackRequestConverters.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpGet; +import org.elasticsearch.protocol.xpack.XPackInfoRequest; +import org.elasticsearch.protocol.xpack.XPackUsageRequest; + +import java.util.EnumSet; +import java.util.Locale; +import java.util.stream.Collectors; + +public class XPackRequestConverters { + + static Request info(XPackInfoRequest infoRequest) { + Request request = new Request(HttpGet.METHOD_NAME, "/_xpack"); + if (false == infoRequest.isVerbose()) { + request.addParameter("human", "false"); + } + if (false == infoRequest.getCategories().equals(EnumSet.allOf(XPackInfoRequest.Category.class))) { + request.addParameter("categories", infoRequest.getCategories().stream() + .map(c -> c.toString().toLowerCase(Locale.ROOT)) + .collect(Collectors.joining(","))); + } + return request; + } + + static Request usage(XPackUsageRequest usageRequest) { + Request request = new Request(HttpGet.METHOD_NAME, "/_xpack/usage"); + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withMasterTimeout(usageRequest.masterNodeTimeout()); + return request; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/AbstractResultResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/AbstractResultResponse.java new file mode 100644 index 00000000000..1b609797dd6 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/AbstractResultResponse.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +/** + * Abstract class that provides a list of results and their count. + */ +public abstract class AbstractResultResponse extends ActionResponse implements ToXContentObject { + + public static final ParseField COUNT = new ParseField("count"); + + private final ParseField resultsField; + protected final List results; + protected final long count; + + AbstractResultResponse(ParseField resultsField, List results, long count) { + this.resultsField = Objects.requireNonNull(resultsField, + "[results_field] must not be null"); + this.results = Collections.unmodifiableList(results); + this.count = count; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(COUNT.getPreferredName(), count); + builder.field(resultsField.getPreferredName(), results); + builder.endObject(); + return builder; + } + + public long count() { + return count; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/CloseJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/CloseJobRequest.java new file mode 100644 index 00000000000..19f3df8e432 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/CloseJobRequest.java @@ -0,0 +1,195 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.security.InvalidParameterException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * Request to close Machine Learning Jobs + */ +public class CloseJobRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField JOB_ID = new ParseField("job_id"); + public static final ParseField TIMEOUT = new ParseField("timeout"); + public static final ParseField FORCE = new ParseField("force"); + public static final ParseField ALLOW_NO_JOBS = new ParseField("allow_no_jobs"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "close_job_request", + true, a -> new CloseJobRequest((List) a[0])); + + static { + PARSER.declareField(ConstructingObjectParser.constructorArg(), + p -> Arrays.asList(Strings.commaDelimitedListToStringArray(p.text())), + JOB_ID, ObjectParser.ValueType.STRING_ARRAY); + PARSER.declareString((obj, val) -> obj.setTimeout(TimeValue.parseTimeValue(val, TIMEOUT.getPreferredName())), TIMEOUT); + PARSER.declareBoolean(CloseJobRequest::setForce, FORCE); + PARSER.declareBoolean(CloseJobRequest::setAllowNoJobs, ALLOW_NO_JOBS); + } + + private static final String ALL_JOBS = "_all"; + + private final List jobIds; + private TimeValue timeout; + private Boolean force; + private Boolean allowNoJobs; + + /** + * Explicitly close all jobs + * + * @return a {@link CloseJobRequest} for all existing jobs + */ + public static CloseJobRequest closeAllJobsRequest(){ + return new CloseJobRequest(ALL_JOBS); + } + + CloseJobRequest(List jobIds) { + if (jobIds.isEmpty()) { + throw new InvalidParameterException("jobIds must not be empty"); + } + if (jobIds.stream().anyMatch(Objects::isNull)) { + throw new NullPointerException("jobIds must not contain null values"); + } + this.jobIds = new ArrayList<>(jobIds); + } + + /** + * Close the specified Jobs via their unique jobIds + * + * @param jobIds must be non-null and non-empty and each jobId must be non-null + */ + public CloseJobRequest(String... jobIds) { + this(Arrays.asList(jobIds)); + } + + /** + * All the jobIds to be closed + */ + public List getJobIds() { + return jobIds; + } + + public TimeValue getTimeout() { + return timeout; + } + + /** + * How long to wait for the close request to complete before timing out. + * + * @param timeout Default value: 30 minutes + */ + public void setTimeout(TimeValue timeout) { + this.timeout = timeout; + } + + public Boolean isForce() { + return force; + } + + /** + * Should the closing be forced. + * + * Use to close a failed job, or to forcefully close a job which has not responded to its initial close request. + * + * @param force When {@code true} forcefully close the job. Defaults to {@code false} + */ + public void setForce(boolean force) { + this.force = force; + } + + public Boolean isAllowNoJobs() { + return this.allowNoJobs; + } + + /** + * Whether to ignore if a wildcard expression matches no jobs. + * + * This includes `_all` string or when no jobs have been specified + * + * @param allowNoJobs When {@code true} ignore if wildcard or `_all` matches no jobs. Defaults to {@code true} + */ + public void setAllowNoJobs(boolean allowNoJobs) { + this.allowNoJobs = allowNoJobs; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return Objects.hash(jobIds, timeout, force, allowNoJobs); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + CloseJobRequest that = (CloseJobRequest) other; + return Objects.equals(jobIds, that.jobIds) && + Objects.equals(timeout, that.timeout) && + Objects.equals(force, that.force) && + Objects.equals(allowNoJobs, that.allowNoJobs); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(JOB_ID.getPreferredName(), Strings.collectionToCommaDelimitedString(jobIds)); + if (timeout != null) { + builder.field(TIMEOUT.getPreferredName(), timeout.getStringRep()); + } + if (force != null) { + builder.field(FORCE.getPreferredName(), force); + } + if (allowNoJobs != null) { + builder.field(ALLOW_NO_JOBS.getPreferredName(), allowNoJobs); + } + builder.endObject(); + return builder; + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/CloseJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/CloseJobResponse.java new file mode 100644 index 00000000000..2ac1e0faee3 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/CloseJobResponse.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * Response indicating if the Job(s) closed or not + */ +public class CloseJobResponse extends ActionResponse implements ToXContentObject { + + private static final ParseField CLOSED = new ParseField("closed"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("close_job_response", true, (a) -> new CloseJobResponse((Boolean)a[0])); + + static { + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), CLOSED); + } + + private boolean closed; + + public CloseJobResponse(boolean closed) { + this.closed = closed; + } + + public static CloseJobResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + /** + * Has the job closed or not + * @return boolean value indicating the job closed status + */ + public boolean isClosed() { + return closed; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + CloseJobResponse that = (CloseJobResponse) other; + return isClosed() == that.isClosed(); + } + + @Override + public int hashCode() { + return Objects.hash(isClosed()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(CLOSED.getPreferredName(), closed); + builder.endObject(); + return builder; + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/DeleteJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteJobRequest.java similarity index 82% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/DeleteJobRequest.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteJobRequest.java index 1b7450de092..a355f7ec659 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/DeleteJobRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteJobRequest.java @@ -16,13 +16,16 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml; +package org.elasticsearch.client.ml; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import java.util.Objects; +/** + * Request to delete a Machine Learning Job via its ID + */ public class DeleteJobRequest extends ActionRequest { private String jobId; @@ -36,6 +39,10 @@ public class DeleteJobRequest extends ActionRequest { return jobId; } + /** + * The jobId which to delete + * @param jobId unique jobId to delete, must not be null + */ public void setJobId(String jobId) { this.jobId = Objects.requireNonNull(jobId, "[job_id] must not be null"); } @@ -44,6 +51,12 @@ public class DeleteJobRequest extends ActionRequest { return force; } + /** + * Used to forcefully delete an opened job. + * This method is quicker than closing and deleting the job. + * + * @param force When {@code true} forcefully delete an opened job. Defaults to {@code false} + */ public void setForce(boolean force) { this.force = force; } diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/DeleteJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteJobResponse.java similarity index 94% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/DeleteJobResponse.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteJobResponse.java index 0b4faa38f54..86cafd9e093 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/DeleteJobResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteJobResponse.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml; +package org.elasticsearch.client.ml; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.xcontent.XContentParser; @@ -24,6 +24,9 @@ import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; +/** + * Response acknowledging the Machine Learning Job request + */ public class DeleteJobResponse extends AcknowledgedResponse { public DeleteJobResponse(boolean acknowledged) { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FlushJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FlushJobRequest.java new file mode 100644 index 00000000000..067851d4526 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FlushJobRequest.java @@ -0,0 +1,195 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Request object to flush a given Machine Learning job. + */ +public class FlushJobRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField CALC_INTERIM = new ParseField("calc_interim"); + public static final ParseField START = new ParseField("start"); + public static final ParseField END = new ParseField("end"); + public static final ParseField ADVANCE_TIME = new ParseField("advance_time"); + public static final ParseField SKIP_TIME = new ParseField("skip_time"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("flush_job_request", (a) -> new FlushJobRequest((String) a[0])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + PARSER.declareBoolean(FlushJobRequest::setCalcInterim, CALC_INTERIM); + PARSER.declareString(FlushJobRequest::setStart, START); + PARSER.declareString(FlushJobRequest::setEnd, END); + PARSER.declareString(FlushJobRequest::setAdvanceTime, ADVANCE_TIME); + PARSER.declareString(FlushJobRequest::setSkipTime, SKIP_TIME); + } + + private final String jobId; + private Boolean calcInterim; + private String start; + private String end; + private String advanceTime; + private String skipTime; + + /** + * Create new Flush job request + * + * @param jobId The job ID of the job to flush + */ + public FlushJobRequest(String jobId) { + this.jobId = jobId; + } + + public String getJobId() { + return jobId; + } + + public boolean getCalcInterim() { + return calcInterim; + } + + /** + * When {@code true} calculates the interim results for the most recent bucket or all buckets within the latency period. + * + * @param calcInterim defaults to {@code false}. + */ + public void setCalcInterim(boolean calcInterim) { + this.calcInterim = calcInterim; + } + + public String getStart() { + return start; + } + + /** + * When used in conjunction with {@link FlushJobRequest#calcInterim}, + * specifies the start of the range of buckets on which to calculate interim results. + * + * @param start the beginning of the range of buckets; may be an epoch seconds, epoch millis or an ISO string + */ + public void setStart(String start) { + this.start = start; + } + + public String getEnd() { + return end; + } + + /** + * When used in conjunction with {@link FlushJobRequest#calcInterim}, specifies the end of the range + * of buckets on which to calculate interim results + * + * @param end the end of the range of buckets; may be an epoch seconds, epoch millis or an ISO string + */ + public void setEnd(String end) { + this.end = end; + } + + public String getAdvanceTime() { + return advanceTime; + } + + /** + * Specifies to advance to a particular time value. + * Results are generated and the model is updated for data from the specified time interval. + * + * @param advanceTime String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO string + */ + public void setAdvanceTime(String advanceTime) { + this.advanceTime = advanceTime; + } + + public String getSkipTime() { + return skipTime; + } + + /** + * Specifies to skip to a particular time value. + * Results are not generated and the model is not updated for data from the specified time interval. + * + * @param skipTime String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO string + */ + public void setSkipTime(String skipTime) { + this.skipTime = skipTime; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, calcInterim, start, end, advanceTime, skipTime); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + FlushJobRequest other = (FlushJobRequest) obj; + return Objects.equals(jobId, other.jobId) && + calcInterim == other.calcInterim && + Objects.equals(start, other.start) && + Objects.equals(end, other.end) && + Objects.equals(advanceTime, other.advanceTime) && + Objects.equals(skipTime, other.skipTime); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + if (calcInterim != null) { + builder.field(CALC_INTERIM.getPreferredName(), calcInterim); + } + if (start != null) { + builder.field(START.getPreferredName(), start); + } + if (end != null) { + builder.field(END.getPreferredName(), end); + } + if (advanceTime != null) { + builder.field(ADVANCE_TIME.getPreferredName(), advanceTime); + } + if (skipTime != null) { + builder.field(SKIP_TIME.getPreferredName(), skipTime); + } + builder.endObject(); + return builder; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FlushJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FlushJobResponse.java new file mode 100644 index 00000000000..048b07b504a --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FlushJobResponse.java @@ -0,0 +1,112 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Date; +import java.util.Objects; + +/** + * Response object containing flush acknowledgement and additional data + */ +public class FlushJobResponse extends ActionResponse implements ToXContentObject { + + public static final ParseField FLUSHED = new ParseField("flushed"); + public static final ParseField LAST_FINALIZED_BUCKET_END = new ParseField("last_finalized_bucket_end"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("flush_job_response", + true, + (a) -> { + boolean flushed = (boolean) a[0]; + Date date = a[1] == null ? null : new Date((long) a[1]); + return new FlushJobResponse(flushed, date); + }); + + static { + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), FLUSHED); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), LAST_FINALIZED_BUCKET_END); + } + + public static FlushJobResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + private final boolean flushed; + private final Date lastFinalizedBucketEnd; + + public FlushJobResponse(boolean flushed, @Nullable Date lastFinalizedBucketEnd) { + this.flushed = flushed; + this.lastFinalizedBucketEnd = lastFinalizedBucketEnd; + } + + /** + * Was the job successfully flushed or not + */ + public boolean isFlushed() { + return flushed; + } + + /** + * Provides the timestamp (in milliseconds-since-the-epoch) of the end of the last bucket that was processed. + */ + @Nullable + public Date getLastFinalizedBucketEnd() { + return lastFinalizedBucketEnd; + } + + @Override + public int hashCode() { + return Objects.hash(flushed, lastFinalizedBucketEnd); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + FlushJobResponse that = (FlushJobResponse) other; + return that.flushed == flushed && Objects.equals(lastFinalizedBucketEnd, that.lastFinalizedBucketEnd); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(FLUSHED.getPreferredName(), flushed); + if (lastFinalizedBucketEnd != null) { + builder.timeField(LAST_FINALIZED_BUCKET_END.getPreferredName(), + LAST_FINALIZED_BUCKET_END.getPreferredName() + "_string", lastFinalizedBucketEnd.getTime()); + } + builder.endObject(); + return builder; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/ForecastJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/ForecastJobRequest.java new file mode 100644 index 00000000000..67d290c37f0 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/ForecastJobRequest.java @@ -0,0 +1,140 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Pojo for forecasting an existing and open Machine Learning Job + */ +public class ForecastJobRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField DURATION = new ParseField("duration"); + public static final ParseField EXPIRES_IN = new ParseField("expires_in"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("forecast_job_request", (a) -> new ForecastJobRequest((String)a[0])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + PARSER.declareString( + (request, val) -> request.setDuration(TimeValue.parseTimeValue(val, DURATION.getPreferredName())), DURATION); + PARSER.declareString( + (request, val) -> request.setExpiresIn(TimeValue.parseTimeValue(val, EXPIRES_IN.getPreferredName())), EXPIRES_IN); + } + + private final String jobId; + private TimeValue duration; + private TimeValue expiresIn; + + /** + * A new forecast request + * + * @param jobId the non-null, existing, and opened jobId to forecast + */ + public ForecastJobRequest(String jobId) { + this.jobId = jobId; + } + + public String getJobId() { + return jobId; + } + + public TimeValue getDuration() { + return duration; + } + + /** + * Set the forecast duration + * + * A period of time that indicates how far into the future to forecast. + * The default value is 1 day. The forecast starts at the last record that was processed. + * + * @param duration TimeValue for the duration of the forecast + */ + public void setDuration(TimeValue duration) { + this.duration = duration; + } + + public TimeValue getExpiresIn() { + return expiresIn; + } + + /** + * Set the forecast expiration + * + * The period of time that forecast results are retained. + * After a forecast expires, the results are deleted. The default value is 14 days. + * If set to a value of 0, the forecast is never automatically deleted. + * + * @param expiresIn TimeValue for the forecast expiration + */ + public void setExpiresIn(TimeValue expiresIn) { + this.expiresIn = expiresIn; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, duration, expiresIn); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + ForecastJobRequest other = (ForecastJobRequest) obj; + return Objects.equals(jobId, other.jobId) + && Objects.equals(duration, other.duration) + && Objects.equals(expiresIn, other.expiresIn); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + if (duration != null) { + builder.field(DURATION.getPreferredName(), duration.getStringRep()); + } + if (expiresIn != null) { + builder.field(EXPIRES_IN.getPreferredName(), expiresIn.getStringRep()); + } + builder.endObject(); + return builder; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/ForecastJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/ForecastJobResponse.java new file mode 100644 index 00000000000..b45275c5e59 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/ForecastJobResponse.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * Forecast response object + */ +public class ForecastJobResponse extends ActionResponse implements ToXContentObject { + + public static final ParseField ACKNOWLEDGED = new ParseField("acknowledged"); + public static final ParseField FORECAST_ID = new ParseField("forecast_id"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("forecast_job_response", + true, + (a) -> new ForecastJobResponse((Boolean)a[0], (String)a[1])); + + static { + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), ACKNOWLEDGED); + PARSER.declareString(ConstructingObjectParser.constructorArg(), FORECAST_ID); + } + + public static ForecastJobResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + private final boolean acknowledged; + private final String forecastId; + + public ForecastJobResponse(boolean acknowledged, String forecastId) { + this.acknowledged = acknowledged; + this.forecastId = forecastId; + } + + /** + * Forecast creating acknowledgement + * @return {@code true} indicates success, {@code false} otherwise + */ + public boolean isAcknowledged() { + return acknowledged; + } + + /** + * The created forecast ID + */ + public String getForecastId() { + return forecastId; + } + + @Override + public int hashCode() { + return Objects.hash(acknowledged, forecastId); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + ForecastJobResponse other = (ForecastJobResponse) obj; + return Objects.equals(acknowledged, other.acknowledged) + && Objects.equals(forecastId, other.forecastId); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ACKNOWLEDGED.getPreferredName(), acknowledged); + builder.field(FORECAST_ID.getPreferredName(), forecastId); + builder.endObject(); + return builder; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetBucketsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetBucketsRequest.java new file mode 100644 index 00000000000..927fd08c1ca --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetBucketsRequest.java @@ -0,0 +1,267 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.results.Result; +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * A request to retrieve buckets of a given job + */ +public class GetBucketsRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField EXPAND = new ParseField("expand"); + public static final ParseField EXCLUDE_INTERIM = new ParseField("exclude_interim"); + public static final ParseField START = new ParseField("start"); + public static final ParseField END = new ParseField("end"); + public static final ParseField ANOMALY_SCORE = new ParseField("anomaly_score"); + public static final ParseField SORT = new ParseField("sort"); + public static final ParseField DESCENDING = new ParseField("desc"); + + public static final ObjectParser PARSER = new ObjectParser<>("get_buckets_request", GetBucketsRequest::new); + + static { + PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID); + PARSER.declareString(GetBucketsRequest::setTimestamp, Result.TIMESTAMP); + PARSER.declareBoolean(GetBucketsRequest::setExpand, EXPAND); + PARSER.declareBoolean(GetBucketsRequest::setExcludeInterim, EXCLUDE_INTERIM); + PARSER.declareStringOrNull(GetBucketsRequest::setStart, START); + PARSER.declareStringOrNull(GetBucketsRequest::setEnd, END); + PARSER.declareObject(GetBucketsRequest::setPageParams, PageParams.PARSER, PageParams.PAGE); + PARSER.declareDouble(GetBucketsRequest::setAnomalyScore, ANOMALY_SCORE); + PARSER.declareString(GetBucketsRequest::setSort, SORT); + PARSER.declareBoolean(GetBucketsRequest::setDescending, DESCENDING); + } + + private String jobId; + private String timestamp; + private Boolean expand; + private Boolean excludeInterim; + private String start; + private String end; + private PageParams pageParams; + private Double anomalyScore; + private String sort; + private Boolean descending; + + private GetBucketsRequest() {} + + /** + * Constructs a request to retrieve buckets of a given job + * @param jobId id of the job to retrieve buckets of + */ + public GetBucketsRequest(String jobId) { + this.jobId = Objects.requireNonNull(jobId); + } + + public String getJobId() { + return jobId; + } + + /** + * Sets the timestamp of a specific bucket to be retrieved. + * @param timestamp String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO string + */ + public void setTimestamp(String timestamp) { + this.timestamp = timestamp; + } + + public String getTimestamp() { + return timestamp; + } + + public boolean isExpand() { + return expand; + } + + /** + * Sets the value of "expand". + * When {@code true}, buckets will be expanded to include their records. + * @param expand value of "expand" to be set + */ + public void setExpand(Boolean expand) { + this.expand = expand; + } + + public Boolean isExcludeInterim() { + return excludeInterim; + } + + /** + * Sets the value of "exclude_interim". + * When {@code true}, interim buckets will be filtered out. + * @param excludeInterim value of "exclude_interim" to be set + */ + public void setExcludeInterim(Boolean excludeInterim) { + this.excludeInterim = excludeInterim; + } + + public String getStart() { + return start; + } + + /** + * Sets the value of "start" which is a timestamp. + * Only buckets whose timestamp is on or after the "start" value will be returned. + * @param start String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO string + */ + public void setStart(String start) { + this.start = start; + } + + public String getEnd() { + return end; + } + + /** + * Sets the value of "end" which is a timestamp. + * Only buckets whose timestamp is before the "end" value will be returned. + * @param end String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO string + */ + public void setEnd(String end) { + this.end = end; + } + + public PageParams getPageParams() { + return pageParams; + } + + /** + * Sets the paging parameters + * @param pageParams the paging parameters + */ + public void setPageParams(PageParams pageParams) { + this.pageParams = pageParams; + } + + public Double getAnomalyScore() { + return anomalyScore; + } + + /** + * Sets the value of "anomaly_score". + * Only buckets with "anomaly_score" equal or greater will be returned. + * @param anomalyScore value of "anomaly_score". + */ + public void setAnomalyScore(Double anomalyScore) { + this.anomalyScore = anomalyScore; + } + + public String getSort() { + return sort; + } + + /** + * Sets the value of "sort". + * Specifies the bucket field to sort on. + * @param sort value of "sort". + */ + public void setSort(String sort) { + this.sort = sort; + } + + public Boolean isDescending() { + return descending; + } + + /** + * Sets the value of "desc". + * Specifies the sorting order. + * @param descending value of "desc" + */ + public void setDescending(boolean descending) { + this.descending = descending; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + if (timestamp != null) { + builder.field(Result.TIMESTAMP.getPreferredName(), timestamp); + } + if (expand != null) { + builder.field(EXPAND.getPreferredName(), expand); + } + if (excludeInterim != null) { + builder.field(EXCLUDE_INTERIM.getPreferredName(), excludeInterim); + } + if (start != null) { + builder.field(START.getPreferredName(), start); + } + if (end != null) { + builder.field(END.getPreferredName(), end); + } + if (pageParams != null) { + builder.field(PageParams.PAGE.getPreferredName(), pageParams); + } + if (anomalyScore != null) { + builder.field(ANOMALY_SCORE.getPreferredName(), anomalyScore); + } + if (sort != null) { + builder.field(SORT.getPreferredName(), sort); + } + if (descending != null) { + builder.field(DESCENDING.getPreferredName(), descending); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, timestamp, expand, excludeInterim, anomalyScore, pageParams, start, end, sort, descending); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetBucketsRequest other = (GetBucketsRequest) obj; + return Objects.equals(jobId, other.jobId) && + Objects.equals(timestamp, other.timestamp) && + Objects.equals(expand, other.expand) && + Objects.equals(excludeInterim, other.excludeInterim) && + Objects.equals(anomalyScore, other.anomalyScore) && + Objects.equals(pageParams, other.pageParams) && + Objects.equals(start, other.start) && + Objects.equals(end, other.end) && + Objects.equals(sort, other.sort) && + Objects.equals(descending, other.descending); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetBucketsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetBucketsResponse.java new file mode 100644 index 00000000000..de8736b86d9 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetBucketsResponse.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.results.Bucket; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * A response containing the requested buckets + */ +public class GetBucketsResponse extends AbstractResultResponse { + + public static final ParseField BUCKETS = new ParseField("buckets"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("get_buckets_response", + true, a -> new GetBucketsResponse((List) a[0], (long) a[1])); + + static { + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), Bucket.PARSER, BUCKETS); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), COUNT); + } + + public static GetBucketsResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + GetBucketsResponse(List buckets, long count) { + super(BUCKETS, buckets, count); + } + + /** + * The retrieved buckets + * @return the retrieved buckets + */ + public List buckets() { + return results; + } + + @Override + public int hashCode() { + return Objects.hash(count, results); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetBucketsResponse other = (GetBucketsResponse) obj; + return count == other.count && Objects.equals(results, other.results); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetInfluencersRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetInfluencersRequest.java new file mode 100644 index 00000000000..f57d327db3a --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetInfluencersRequest.java @@ -0,0 +1,227 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * A request to retrieve influencers of a given job + */ +public class GetInfluencersRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField EXCLUDE_INTERIM = new ParseField("exclude_interim"); + public static final ParseField START = new ParseField("start"); + public static final ParseField END = new ParseField("end"); + public static final ParseField INFLUENCER_SCORE = new ParseField("influencer_score"); + public static final ParseField SORT = new ParseField("sort"); + public static final ParseField DESCENDING = new ParseField("desc"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_influencers_request", a -> new GetInfluencersRequest((String) a[0])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + PARSER.declareBoolean(GetInfluencersRequest::setExcludeInterim, EXCLUDE_INTERIM); + PARSER.declareStringOrNull(GetInfluencersRequest::setStart, START); + PARSER.declareStringOrNull(GetInfluencersRequest::setEnd, END); + PARSER.declareObject(GetInfluencersRequest::setPageParams, PageParams.PARSER, PageParams.PAGE); + PARSER.declareDouble(GetInfluencersRequest::setInfluencerScore, INFLUENCER_SCORE); + PARSER.declareString(GetInfluencersRequest::setSort, SORT); + PARSER.declareBoolean(GetInfluencersRequest::setDescending, DESCENDING); + } + + private final String jobId; + private Boolean excludeInterim; + private String start; + private String end; + private Double influencerScore; + private PageParams pageParams; + private String sort; + private Boolean descending; + + /** + * Constructs a request to retrieve influencers of a given job + * @param jobId id of the job to retrieve influencers of + */ + public GetInfluencersRequest(String jobId) { + this.jobId = Objects.requireNonNull(jobId); + } + + public String getJobId() { + return jobId; + } + + public Boolean isExcludeInterim() { + return excludeInterim; + } + + /** + * Sets the value of "exclude_interim". + * When {@code true}, interim influencers will be filtered out. + * @param excludeInterim value of "exclude_interim" to be set + */ + public void setExcludeInterim(Boolean excludeInterim) { + this.excludeInterim = excludeInterim; + } + + public String getStart() { + return start; + } + + /** + * Sets the value of "start" which is a timestamp. + * Only influencers whose timestamp is on or after the "start" value will be returned. + * @param start String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO string + */ + public void setStart(String start) { + this.start = start; + } + + public String getEnd() { + return end; + } + + /** + * Sets the value of "end" which is a timestamp. + * Only influencers whose timestamp is before the "end" value will be returned. + * @param end String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO string + */ + public void setEnd(String end) { + this.end = end; + } + + public PageParams getPageParams() { + return pageParams; + } + + /** + * Sets the paging parameters + * @param pageParams The paging parameters + */ + public void setPageParams(PageParams pageParams) { + this.pageParams = pageParams; + } + + public Double getInfluencerScore() { + return influencerScore; + } + + /** + * Sets the value of "influencer_score". + * Only influencers with "influencer_score" equal or greater will be returned. + * @param influencerScore value of "influencer_score". + */ + public void setInfluencerScore(Double influencerScore) { + this.influencerScore = influencerScore; + } + + public String getSort() { + return sort; + } + + /** + * Sets the value of "sort". + * Specifies the influencer field to sort on. + * @param sort value of "sort". + */ + public void setSort(String sort) { + this.sort = sort; + } + + public Boolean isDescending() { + return descending; + } + + /** + * Sets the value of "desc". + * Specifies the sorting order. + * @param descending value of "desc" + */ + public void setDescending(Boolean descending) { + this.descending = descending; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + if (excludeInterim != null) { + builder.field(EXCLUDE_INTERIM.getPreferredName(), excludeInterim); + } + if (start != null) { + builder.field(START.getPreferredName(), start); + } + if (end != null) { + builder.field(END.getPreferredName(), end); + } + if (pageParams != null) { + builder.field(PageParams.PAGE.getPreferredName(), pageParams); + } + if (influencerScore != null) { + builder.field(INFLUENCER_SCORE.getPreferredName(), influencerScore); + } + if (sort != null) { + builder.field(SORT.getPreferredName(), sort); + } + if (descending != null) { + builder.field(DESCENDING.getPreferredName(), descending); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, excludeInterim, influencerScore, pageParams, start, end, sort, descending); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetInfluencersRequest other = (GetInfluencersRequest) obj; + return Objects.equals(jobId, other.jobId) && + Objects.equals(excludeInterim, other.excludeInterim) && + Objects.equals(influencerScore, other.influencerScore) && + Objects.equals(pageParams, other.pageParams) && + Objects.equals(start, other.start) && + Objects.equals(end, other.end) && + Objects.equals(sort, other.sort) && + Objects.equals(descending, other.descending); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetInfluencersResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetInfluencersResponse.java new file mode 100644 index 00000000000..113d960008c --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetInfluencersResponse.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.results.Influencer; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * A response containing the requested influencers + */ +public class GetInfluencersResponse extends AbstractResultResponse { + + public static final ParseField INFLUENCERS = new ParseField("influencers"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_influencers_response", true, a -> new GetInfluencersResponse((List) a[0], (long) a[1])); + + static { + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), Influencer.PARSER, INFLUENCERS); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), COUNT); + } + + public static GetInfluencersResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + GetInfluencersResponse(List influencers, long count) { + super(INFLUENCERS, influencers, count); + } + + /** + * The retrieved influencers + * @return the retrieved influencers + */ + public List influencers() { + return results; + } + + @Override + public int hashCode() { + return Objects.hash(count, results); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetInfluencersResponse other = (GetInfluencersResponse) obj; + return count == other.count && Objects.equals(results, other.results); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobRequest.java new file mode 100644 index 00000000000..3de7037e5c8 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobRequest.java @@ -0,0 +1,144 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * Request object to get {@link Job} objects with the matching `jobId`s or + * `groupName`s. + * + * `_all` explicitly gets all the jobs in the cluster + * An empty request (no `jobId`s) implicitly gets all the jobs in the cluster + */ +public class GetJobRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField JOB_IDS = new ParseField("job_ids"); + public static final ParseField ALLOW_NO_JOBS = new ParseField("allow_no_jobs"); + + private static final String ALL_JOBS = "_all"; + private final List jobIds; + private Boolean allowNoJobs; + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_job_request", + true, a -> new GetJobRequest(a[0] == null ? new ArrayList<>() : (List) a[0])); + + static { + PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), JOB_IDS); + PARSER.declareBoolean(GetJobRequest::setAllowNoJobs, ALLOW_NO_JOBS); + } + + /** + * Helper method to create a query that will get ALL jobs + * @return new {@link GetJobRequest} object searching for the jobId "_all" + */ + public static GetJobRequest getAllJobsRequest() { + return new GetJobRequest(ALL_JOBS); + } + + /** + * Get the specified {@link Job} configurations via their unique jobIds + * @param jobIds must not contain any null values + */ + public GetJobRequest(String... jobIds) { + this(Arrays.asList(jobIds)); + } + + GetJobRequest(List jobIds) { + if (jobIds.stream().anyMatch(Objects::isNull)) { + throw new NullPointerException("jobIds must not contain null values"); + } + this.jobIds = new ArrayList<>(jobIds); + } + + /** + * All the jobIds for which to get configuration information + */ + public List getJobIds() { + return jobIds; + } + + /** + * Whether to ignore if a wildcard expression matches no jobs. + * + * @param allowNoJobs If this is {@code false}, then an error is returned when a wildcard (or `_all`) does not match any jobs + */ + public void setAllowNoJobs(boolean allowNoJobs) { + this.allowNoJobs = allowNoJobs; + } + + public Boolean isAllowNoJobs() { + return allowNoJobs; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return Objects.hash(jobIds, allowNoJobs); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || other.getClass() != getClass()) { + return false; + } + + GetJobRequest that = (GetJobRequest) other; + return Objects.equals(jobIds, that.jobIds) && + Objects.equals(allowNoJobs, that.allowNoJobs); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + if (jobIds.isEmpty() == false) { + builder.field(JOB_IDS.getPreferredName(), jobIds); + } + + if (allowNoJobs != null) { + builder.field(ALLOW_NO_JOBS.getPreferredName(), allowNoJobs); + } + + builder.endObject(); + return builder; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobResponse.java new file mode 100644 index 00000000000..0cdf08c6c24 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobResponse.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Contains a {@link List} of the found {@link Job} objects and the total count found + */ +public class GetJobResponse extends AbstractResultResponse { + + public static final ParseField RESULTS_FIELD = new ParseField("jobs"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("jobs_response", true, + a -> new GetJobResponse((List) a[0], (long) a[1])); + + static { + PARSER.declareObjectArray(constructorArg(), Job.PARSER, RESULTS_FIELD); + PARSER.declareLong(constructorArg(), AbstractResultResponse.COUNT); + } + + GetJobResponse(List jobBuilders, long count) { + super(RESULTS_FIELD, jobBuilders.stream().map(Job.Builder::build).collect(Collectors.toList()), count); + } + + /** + * The collection of {@link Job} objects found in the query + */ + public List jobs() { + return results; + } + + public static GetJobResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public int hashCode() { + return Objects.hash(results, count); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + GetJobResponse other = (GetJobResponse) obj; + return Objects.equals(results, other.results) && count == other.count; + } + + @Override + public final String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsRequest.java new file mode 100644 index 00000000000..d8eb350755d --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsRequest.java @@ -0,0 +1,146 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + + +/** + * Request object to get {@link org.elasticsearch.client.ml.job.stats.JobStats} by their respective jobIds + * + * `_all` explicitly gets all the jobs' statistics in the cluster + * An empty request (no `jobId`s) implicitly gets all the jobs' statistics in the cluster + */ +public class GetJobStatsRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField ALLOW_NO_JOBS = new ParseField("allow_no_jobs"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_jobs_stats_request", a -> new GetJobStatsRequest((List) a[0])); + + static { + PARSER.declareField(ConstructingObjectParser.constructorArg(), + p -> Arrays.asList(Strings.commaDelimitedListToStringArray(p.text())), + Job.ID, ObjectParser.ValueType.STRING_ARRAY); + PARSER.declareBoolean(GetJobStatsRequest::setAllowNoJobs, ALLOW_NO_JOBS); + } + + private static final String ALL_JOBS = "_all"; + + private final List jobIds; + private Boolean allowNoJobs; + + /** + * Explicitly gets all jobs statistics + * + * @return a {@link GetJobStatsRequest} for all existing jobs + */ + public static GetJobStatsRequest getAllJobStatsRequest(){ + return new GetJobStatsRequest(ALL_JOBS); + } + + GetJobStatsRequest(List jobIds) { + if (jobIds.stream().anyMatch(Objects::isNull)) { + throw new NullPointerException("jobIds must not contain null values"); + } + this.jobIds = new ArrayList<>(jobIds); + } + + /** + * Get the specified Job's statistics via their unique jobIds + * + * @param jobIds must be non-null and each jobId must be non-null + */ + public GetJobStatsRequest(String... jobIds) { + this(Arrays.asList(jobIds)); + } + + /** + * All the jobIds for which to get statistics + */ + public List getJobIds() { + return jobIds; + } + + public Boolean isAllowNoJobs() { + return this.allowNoJobs; + } + + /** + * Whether to ignore if a wildcard expression matches no jobs. + * + * This includes `_all` string or when no jobs have been specified + * + * @param allowNoJobs When {@code true} ignore if wildcard or `_all` matches no jobs. Defaults to {@code true} + */ + public void setAllowNoJobs(boolean allowNoJobs) { + this.allowNoJobs = allowNoJobs; + } + + @Override + public int hashCode() { + return Objects.hash(jobIds, allowNoJobs); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + GetJobStatsRequest that = (GetJobStatsRequest) other; + return Objects.equals(jobIds, that.jobIds) && + Objects.equals(allowNoJobs, that.allowNoJobs); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), Strings.collectionToCommaDelimitedString(jobIds)); + if (allowNoJobs != null) { + builder.field(ALLOW_NO_JOBS.getPreferredName(), allowNoJobs); + } + builder.endObject(); + return builder; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsResponse.java new file mode 100644 index 00000000000..2e3ba113d19 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsResponse.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.client.ml.job.stats.JobStats; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Contains a {@link List} of the found {@link JobStats} objects and the total count found + */ +public class GetJobStatsResponse extends AbstractResultResponse { + + public static final ParseField RESULTS_FIELD = new ParseField("jobs"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("jobs_stats_response", true, + a -> new GetJobStatsResponse((List) a[0], (long) a[1])); + + static { + PARSER.declareObjectArray(constructorArg(), JobStats.PARSER, RESULTS_FIELD); + PARSER.declareLong(constructorArg(), COUNT); + } + + GetJobStatsResponse(List jobStats, long count) { + super(RESULTS_FIELD, jobStats, count); + } + + /** + * The collection of {@link JobStats} objects found in the query + */ + public List jobStats() { + return results; + } + + public static GetJobStatsResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public int hashCode() { + return Objects.hash(results, count); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + GetJobStatsResponse other = (GetJobStatsResponse) obj; + return Objects.equals(results, other.results) && count == other.count; + } + + @Override + public final String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetOverallBucketsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetOverallBucketsRequest.java new file mode 100644 index 00000000000..97bde11d8c6 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetOverallBucketsRequest.java @@ -0,0 +1,266 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +/** + * A request to retrieve overall buckets of set of jobs + */ +public class GetOverallBucketsRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField TOP_N = new ParseField("top_n"); + public static final ParseField BUCKET_SPAN = new ParseField("bucket_span"); + public static final ParseField OVERALL_SCORE = new ParseField("overall_score"); + public static final ParseField EXCLUDE_INTERIM = new ParseField("exclude_interim"); + public static final ParseField START = new ParseField("start"); + public static final ParseField END = new ParseField("end"); + public static final ParseField ALLOW_NO_JOBS = new ParseField("allow_no_jobs"); + + private static final String ALL_JOBS = "_all"; + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_overall_buckets_request", a -> new GetOverallBucketsRequest((String) a[0])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + PARSER.declareInt(GetOverallBucketsRequest::setTopN, TOP_N); + PARSER.declareString(GetOverallBucketsRequest::setBucketSpan, BUCKET_SPAN); + PARSER.declareBoolean(GetOverallBucketsRequest::setExcludeInterim, EXCLUDE_INTERIM); + PARSER.declareDouble(GetOverallBucketsRequest::setOverallScore, OVERALL_SCORE); + PARSER.declareStringOrNull(GetOverallBucketsRequest::setStart, START); + PARSER.declareStringOrNull(GetOverallBucketsRequest::setEnd, END); + PARSER.declareBoolean(GetOverallBucketsRequest::setAllowNoJobs, ALLOW_NO_JOBS); + } + + private final List jobIds; + private Integer topN; + private TimeValue bucketSpan; + private Boolean excludeInterim; + private Double overallScore; + private String start; + private String end; + private Boolean allowNoJobs; + + private GetOverallBucketsRequest(String jobId) { + this(Strings.tokenizeToStringArray(jobId, ",")); + } + + /** + * Constructs a request to retrieve overall buckets for a set of jobs + * @param jobIds The job identifiers. Each can be a job identifier, a group name, or a wildcard expression. + */ + public GetOverallBucketsRequest(String... jobIds) { + this(Arrays.asList(jobIds)); + } + + /** + * Constructs a request to retrieve overall buckets for a set of jobs + * @param jobIds The job identifiers. Each can be a job identifier, a group name, or a wildcard expression. + */ + public GetOverallBucketsRequest(List jobIds) { + if (jobIds.stream().anyMatch(Objects::isNull)) { + throw new NullPointerException("jobIds must not contain null values"); + } + if (jobIds.isEmpty()) { + this.jobIds = Collections.singletonList(ALL_JOBS); + } else { + this.jobIds = Collections.unmodifiableList(jobIds); + } + } + + public List getJobIds() { + return jobIds; + } + + public Integer getTopN() { + return topN; + } + + /** + * Sets the value of `top_n`. + * @param topN The number of top job bucket scores to be used in the overall_score calculation. Defaults to 1. + */ + public void setTopN(Integer topN) { + this.topN = topN; + } + + public TimeValue getBucketSpan() { + return bucketSpan; + } + + /** + * Sets the value of `bucket_span`. + * @param bucketSpan The span of the overall buckets. Must be greater or equal to the largest job’s bucket_span. + * Defaults to the largest job’s bucket_span. + */ + public void setBucketSpan(TimeValue bucketSpan) { + this.bucketSpan = bucketSpan; + } + + private void setBucketSpan(String bucketSpan) { + this.bucketSpan = TimeValue.parseTimeValue(bucketSpan, BUCKET_SPAN.getPreferredName()); + } + + public boolean isExcludeInterim() { + return excludeInterim; + } + + /** + * Sets the value of "exclude_interim". + * When {@code true}, interim overall buckets will be filtered out. + * Overall buckets are interim if any of the job buckets within the overall bucket interval are interim. + * @param excludeInterim value of "exclude_interim" to be set + */ + public void setExcludeInterim(Boolean excludeInterim) { + this.excludeInterim = excludeInterim; + } + + public String getStart() { + return start; + } + + /** + * Sets the value of "start" which is a timestamp. + * Only overall buckets whose timestamp is on or after the "start" value will be returned. + * @param start String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO string + */ + public void setStart(String start) { + this.start = start; + } + + public String getEnd() { + return end; + } + + /** + * Sets the value of "end" which is a timestamp. + * Only overall buckets whose timestamp is before the "end" value will be returned. + * @param end String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO string + */ + public void setEnd(String end) { + this.end = end; + } + + public Double getOverallScore() { + return overallScore; + } + + /** + * Sets the value of "overall_score". + * Only buckets with "overall_score" equal or greater will be returned. + * @param overallScore value of "anomaly_score". + */ + public void setOverallScore(double overallScore) { + this.overallScore = overallScore; + } + + /** + * See {@link GetJobRequest#isAllowNoJobs()} + * @param allowNoJobs value of "allow_no_jobs". + */ + public void setAllowNoJobs(boolean allowNoJobs) { + this.allowNoJobs = allowNoJobs; + } + + /** + * Whether to ignore if a wildcard expression matches no jobs. + * + * If this is `false`, then an error is returned when a wildcard (or `_all`) does not match any jobs + */ + public Boolean isAllowNoJobs() { + return allowNoJobs; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + if (jobIds.isEmpty() == false) { + builder.field(Job.ID.getPreferredName(), Strings.collectionToCommaDelimitedString(jobIds)); + } + if (topN != null) { + builder.field(TOP_N.getPreferredName(), topN); + } + if (bucketSpan != null) { + builder.field(BUCKET_SPAN.getPreferredName(), bucketSpan.getStringRep()); + } + if (excludeInterim != null) { + builder.field(EXCLUDE_INTERIM.getPreferredName(), excludeInterim); + } + if (start != null) { + builder.field(START.getPreferredName(), start); + } + if (end != null) { + builder.field(END.getPreferredName(), end); + } + if (overallScore != null) { + builder.field(OVERALL_SCORE.getPreferredName(), overallScore); + } + if (allowNoJobs != null) { + builder.field(ALLOW_NO_JOBS.getPreferredName(), allowNoJobs); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobIds, topN, bucketSpan, excludeInterim, overallScore, start, end, allowNoJobs); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetOverallBucketsRequest other = (GetOverallBucketsRequest) obj; + return Objects.equals(jobIds, other.jobIds) && + Objects.equals(topN, other.topN) && + Objects.equals(bucketSpan, other.bucketSpan) && + Objects.equals(excludeInterim, other.excludeInterim) && + Objects.equals(overallScore, other.overallScore) && + Objects.equals(start, other.start) && + Objects.equals(end, other.end) && + Objects.equals(allowNoJobs, other.allowNoJobs); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetOverallBucketsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetOverallBucketsResponse.java new file mode 100644 index 00000000000..8c9b7e29cb6 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetOverallBucketsResponse.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.results.OverallBucket; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * A response containing the requested overall buckets + */ +public class GetOverallBucketsResponse extends AbstractResultResponse { + + public static final ParseField OVERALL_BUCKETS = new ParseField("overall_buckets"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_overall_buckets_response", true, a -> new GetOverallBucketsResponse((List) a[0], (long) a[1])); + + static { + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), OverallBucket.PARSER, OVERALL_BUCKETS); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), COUNT); + } + + public static GetOverallBucketsResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + GetOverallBucketsResponse(List overallBuckets, long count) { + super(OVERALL_BUCKETS, overallBuckets, count); + } + + /** + * The retrieved overall buckets + * @return the retrieved overall buckets + */ + public List overallBuckets() { + return results; + } + + @Override + public int hashCode() { + return Objects.hash(count, results); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetOverallBucketsResponse other = (GetOverallBucketsResponse) obj; + return count == other.count && Objects.equals(results, other.results); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetRecordsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetRecordsRequest.java new file mode 100644 index 00000000000..3c11cbd2c10 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetRecordsRequest.java @@ -0,0 +1,222 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * A request to retrieve records of a given job + */ +public class GetRecordsRequest implements ToXContentObject, Validatable { + + public static final ParseField EXCLUDE_INTERIM = new ParseField("exclude_interim"); + public static final ParseField START = new ParseField("start"); + public static final ParseField END = new ParseField("end"); + public static final ParseField RECORD_SCORE = new ParseField("record_score"); + public static final ParseField SORT = new ParseField("sort"); + public static final ParseField DESCENDING = new ParseField("desc"); + + public static final ObjectParser PARSER = new ObjectParser<>("get_records_request", GetRecordsRequest::new); + + static { + PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID); + PARSER.declareBoolean(GetRecordsRequest::setExcludeInterim, EXCLUDE_INTERIM); + PARSER.declareStringOrNull(GetRecordsRequest::setStart, START); + PARSER.declareStringOrNull(GetRecordsRequest::setEnd, END); + PARSER.declareObject(GetRecordsRequest::setPageParams, PageParams.PARSER, PageParams.PAGE); + PARSER.declareDouble(GetRecordsRequest::setRecordScore, RECORD_SCORE); + PARSER.declareString(GetRecordsRequest::setSort, SORT); + PARSER.declareBoolean(GetRecordsRequest::setDescending, DESCENDING); + } + + private String jobId; + private Boolean excludeInterim; + private String start; + private String end; + private PageParams pageParams; + private Double recordScore; + private String sort; + private Boolean descending; + + private GetRecordsRequest() {} + + /** + * Constructs a request to retrieve records of a given job + * @param jobId id of the job to retrieve records of + */ + public GetRecordsRequest(String jobId) { + this.jobId = Objects.requireNonNull(jobId); + } + + public String getJobId() { + return jobId; + } + + public Boolean isExcludeInterim() { + return excludeInterim; + } + + /** + * Sets the value of "exclude_interim". + * When {@code true}, interim records will be filtered out. + * @param excludeInterim value of "exclude_interim" to be set + */ + public void setExcludeInterim(Boolean excludeInterim) { + this.excludeInterim = excludeInterim; + } + + public String getStart() { + return start; + } + + /** + * Sets the value of "start" which is a timestamp. + * Only records whose timestamp is on or after the "start" value will be returned. + * @param start String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO string + */ + public void setStart(String start) { + this.start = start; + } + + public String getEnd() { + return end; + } + + /** + * Sets the value of "end" which is a timestamp. + * Only records whose timestamp is before the "end" value will be returned. + * @param end String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO string + */ + public void setEnd(String end) { + this.end = end; + } + + public PageParams getPageParams() { + return pageParams; + } + + /** + * Sets the paging parameters + * @param pageParams The paging parameters + */ + public void setPageParams(PageParams pageParams) { + this.pageParams = pageParams; + } + + public Double getRecordScore() { + return recordScore; + } + + /** + * Sets the value of "record_score". + * Only records with "record_score" equal or greater will be returned. + * @param recordScore value of "record_score". + */ + public void setRecordScore(Double recordScore) { + this.recordScore = recordScore; + } + + public String getSort() { + return sort; + } + + /** + * Sets the value of "sort". + * Specifies the record field to sort on. + * @param sort value of "sort". + */ + public void setSort(String sort) { + this.sort = sort; + } + + public Boolean isDescending() { + return descending; + } + + /** + * Sets the value of "desc". + * Specifies the sorting order. + * @param descending value of "desc" + */ + public void setDescending(Boolean descending) { + this.descending = descending; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + if (excludeInterim != null) { + builder.field(EXCLUDE_INTERIM.getPreferredName(), excludeInterim); + } + if (start != null) { + builder.field(START.getPreferredName(), start); + } + if (end != null) { + builder.field(END.getPreferredName(), end); + } + if (pageParams != null) { + builder.field(PageParams.PAGE.getPreferredName(), pageParams); + } + if (recordScore != null) { + builder.field(RECORD_SCORE.getPreferredName(), recordScore); + } + if (sort != null) { + builder.field(SORT.getPreferredName(), sort); + } + if (descending != null) { + builder.field(DESCENDING.getPreferredName(), descending); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, excludeInterim, recordScore, pageParams, start, end, sort, descending); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetRecordsRequest other = (GetRecordsRequest) obj; + return Objects.equals(jobId, other.jobId) && + Objects.equals(excludeInterim, other.excludeInterim) && + Objects.equals(recordScore, other.recordScore) && + Objects.equals(pageParams, other.pageParams) && + Objects.equals(start, other.start) && + Objects.equals(end, other.end) && + Objects.equals(sort, other.sort) && + Objects.equals(descending, other.descending); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetRecordsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetRecordsResponse.java new file mode 100644 index 00000000000..0d8efd5c6ea --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetRecordsResponse.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.results.AnomalyRecord; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * A response containing the requested records + */ +public class GetRecordsResponse extends AbstractResultResponse { + + public static final ParseField RECORDS = new ParseField("records"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("get_records_response", + true, a -> new GetRecordsResponse((List) a[0], (long) a[1])); + + static { + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), AnomalyRecord.PARSER, RECORDS); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), COUNT); + } + + public static GetRecordsResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + GetRecordsResponse(List records, long count) { + super(RECORDS, records, count); + } + + /** + * The retrieved records + * @return the retrieved records + */ + public List records() { + return results; + } + + @Override + public int hashCode() { + return Objects.hash(count, results); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetRecordsResponse other = (GetRecordsResponse) obj; + return count == other.count && Objects.equals(results, other.results); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/NodeAttributes.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/NodeAttributes.java new file mode 100644 index 00000000000..892df340abd --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/NodeAttributes.java @@ -0,0 +1,150 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +/** + * A Pojo class containing an Elastic Node's attributes + */ +public class NodeAttributes implements ToXContentObject { + + public static final ParseField ID = new ParseField("id"); + public static final ParseField NAME = new ParseField("name"); + public static final ParseField EPHEMERAL_ID = new ParseField("ephemeral_id"); + public static final ParseField TRANSPORT_ADDRESS = new ParseField("transport_address"); + public static final ParseField ATTRIBUTES = new ParseField("attributes"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("node", true, + (a) -> { + int i = 0; + String id = (String) a[i++]; + String name = (String) a[i++]; + String ephemeralId = (String) a[i++]; + String transportAddress = (String) a[i++]; + Map attributes = (Map) a[i]; + return new NodeAttributes(id, name, ephemeralId, transportAddress, attributes); + }); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), ID); + PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME); + PARSER.declareString(ConstructingObjectParser.constructorArg(), EPHEMERAL_ID); + PARSER.declareString(ConstructingObjectParser.constructorArg(), TRANSPORT_ADDRESS); + PARSER.declareField(ConstructingObjectParser.constructorArg(), + (p, c) -> p.mapStrings(), + ATTRIBUTES, + ObjectParser.ValueType.OBJECT); + } + + private final String id; + private final String name; + private final String ephemeralId; + private final String transportAddress; + private final Map attributes; + + public NodeAttributes(String id, String name, String ephemeralId, String transportAddress, Map attributes) { + this.id = id; + this.name = name; + this.ephemeralId = ephemeralId; + this.transportAddress = transportAddress; + this.attributes = Collections.unmodifiableMap(attributes); + } + + /** + * The unique identifier of the node. + */ + public String getId() { + return id; + } + + /** + * The node name. + */ + public String getName() { + return name; + } + + /** + * The ephemeral id of the node. + */ + public String getEphemeralId() { + return ephemeralId; + } + + /** + * The host and port where transport HTTP connections are accepted. + */ + public String getTransportAddress() { + return transportAddress; + } + + /** + * Additional attributes related to this node e.g., {"ml.max_open_jobs": "10"}. + */ + public Map getAttributes() { + return attributes; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ID.getPreferredName(), id); + builder.field(NAME.getPreferredName(), name); + builder.field(EPHEMERAL_ID.getPreferredName(), ephemeralId); + builder.field(TRANSPORT_ADDRESS.getPreferredName(), transportAddress); + builder.field(ATTRIBUTES.getPreferredName(), attributes); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(id, name, ephemeralId, transportAddress, attributes); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + NodeAttributes that = (NodeAttributes) other; + return Objects.equals(id, that.id) && + Objects.equals(name, that.name) && + Objects.equals(ephemeralId, that.ephemeralId) && + Objects.equals(transportAddress, that.transportAddress) && + Objects.equals(attributes, that.attributes); + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/OpenJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/OpenJobRequest.java similarity index 85% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/OpenJobRequest.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/OpenJobRequest.java index a18a18bb55a..5b8e68cd72d 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/OpenJobRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/OpenJobRequest.java @@ -16,23 +16,25 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml; +package org.elasticsearch.client.ml; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import java.io.IOException; import java.util.Objects; +/** + * Request to open a Machine Learning Job + */ public class OpenJobRequest extends ActionRequest implements ToXContentObject { public static final ParseField TIMEOUT = new ParseField("timeout"); @@ -51,6 +53,11 @@ public class OpenJobRequest extends ActionRequest implements ToXContentObject { private String jobId; private TimeValue timeout; + /** + * Create a new request with the desired jobId + * + * @param jobId unique jobId, must not be null + */ public OpenJobRequest(String jobId) { this.jobId = Objects.requireNonNull(jobId, "[job_id] must not be null"); } @@ -59,6 +66,11 @@ public class OpenJobRequest extends ActionRequest implements ToXContentObject { return jobId; } + /** + * The jobId to open + * + * @param jobId unique jobId, must not be null + */ public void setJobId(String jobId) { this.jobId = Objects.requireNonNull(jobId, "[job_id] must not be null"); } @@ -67,6 +79,11 @@ public class OpenJobRequest extends ActionRequest implements ToXContentObject { return timeout; } + /** + * How long to wait for job to open before timing out the request + * + * @param timeout default value of 30 minutes + */ public void setTimeout(TimeValue timeout) { this.timeout = timeout; } @@ -77,7 +94,7 @@ public class OpenJobRequest extends ActionRequest implements ToXContentObject { } @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(Job.ID.getPreferredName(), jobId); if (timeout != null) { diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/OpenJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/OpenJobResponse.java similarity index 78% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/OpenJobResponse.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/OpenJobResponse.java index d8850ddbbe3..2536aeeaf78 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/OpenJobResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/OpenJobResponse.java @@ -16,11 +16,11 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml; +package org.elasticsearch.client.ml; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -28,22 +28,23 @@ import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; +/** + * Response indicating if the Machine Learning Job is now opened or not + */ public class OpenJobResponse extends ActionResponse implements ToXContentObject { private static final ParseField OPENED = new ParseField("opened"); - public static final ObjectParser PARSER = new ObjectParser<>("open_job_response", true, OpenJobResponse::new); + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("open_job_response", true, (a) -> new OpenJobResponse((Boolean)a[0])); static { - PARSER.declareBoolean(OpenJobResponse::setOpened, OPENED); + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), OPENED); } private boolean opened; - OpenJobResponse() { - } - - public OpenJobResponse(boolean opened) { + OpenJobResponse(boolean opened) { this.opened = opened; } @@ -51,14 +52,15 @@ public class OpenJobResponse extends ActionResponse implements ToXContentObject return PARSER.parse(parser, null); } + /** + * Has the job opened or not + * + * @return boolean value indicating the job opened status + */ public boolean isOpened() { return opened; } - public void setOpened(boolean opened) { - this.opened = opened; - } - @Override public boolean equals(Object other) { if (this == other) { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PostDataRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PostDataRequest.java new file mode 100644 index 00000000000..cc015fc4837 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PostDataRequest.java @@ -0,0 +1,229 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * POJO for posting data to a Machine Learning job + */ +public class PostDataRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField RESET_START = new ParseField("reset_start"); + public static final ParseField RESET_END = new ParseField("reset_end"); + public static final ParseField CONTENT_TYPE = new ParseField("content_type"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("post_data_request", + (a) -> new PostDataRequest((String)a[0], XContentType.fromMediaTypeOrFormat((String)a[1]), new byte[0])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + PARSER.declareString(ConstructingObjectParser.constructorArg(), CONTENT_TYPE); + PARSER.declareStringOrNull(PostDataRequest::setResetEnd, RESET_END); + PARSER.declareStringOrNull(PostDataRequest::setResetStart, RESET_START); + } + + private final String jobId; + private final XContentType xContentType; + private final BytesReference content; + private String resetStart; + private String resetEnd; + + /** + * Create a new PostDataRequest object + * + * @param jobId non-null jobId of the job to post data to + * @param xContentType content type of the data to post. Only {@link XContentType#JSON} or {@link XContentType#SMILE} are supported + * @param content bulk serialized content in the format of the passed {@link XContentType} + */ + public PostDataRequest(String jobId, XContentType xContentType, BytesReference content) { + this.jobId = Objects.requireNonNull(jobId, "job_id must not be null"); + this.xContentType = Objects.requireNonNull(xContentType, "content_type must not be null"); + this.content = Objects.requireNonNull(content, "content must not be null"); + } + + /** + * Create a new PostDataRequest object referencing the passed {@code byte[]} content + * + * @param jobId non-null jobId of the job to post data to + * @param xContentType content type of the data to post. Only {@link XContentType#JSON} or {@link XContentType#SMILE} are supported + * @param content bulk serialized content in the format of the passed {@link XContentType} + */ + public PostDataRequest(String jobId, XContentType xContentType, byte[] content) { + this(jobId, xContentType, new BytesArray(content)); + } + + /** + * Create a new PostDataRequest object referencing the passed {@link JsonBuilder} object + * + * @param jobId non-null jobId of the job to post data to + * @param builder {@link JsonBuilder} object containing documents to be serialized and sent in {@link XContentType#JSON} format + */ + public PostDataRequest(String jobId, JsonBuilder builder) { + this(jobId, XContentType.JSON, builder.build()); + } + + public String getJobId() { + return jobId; + } + + public String getResetStart() { + return resetStart; + } + + /** + * Specifies the start of the bucket resetting range + * + * @param resetStart String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO 8601 string + */ + public void setResetStart(String resetStart) { + this.resetStart = resetStart; + } + + public String getResetEnd() { + return resetEnd; + } + + /** + * Specifies the end of the bucket resetting range + * + * @param resetEnd String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO 8601 string + */ + public void setResetEnd(String resetEnd) { + this.resetEnd = resetEnd; + } + + public BytesReference getContent() { + return content; + } + + public XContentType getXContentType() { + return xContentType; + } + + @Override + public int hashCode() { + //We leave out the content for server side parity + return Objects.hash(jobId, resetStart, resetEnd, xContentType); + } + + @Override + public boolean equals(Object obj) { + if(obj == this) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + //We leave out the content for server side parity + PostDataRequest other = (PostDataRequest) obj; + return Objects.equals(jobId, other.jobId) && + Objects.equals(resetStart, other.resetStart) && + Objects.equals(resetEnd, other.resetEnd) && + Objects.equals(xContentType, other.xContentType); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(CONTENT_TYPE.getPreferredName(), xContentType.mediaType()); + if (resetEnd != null) { + builder.field(RESET_END.getPreferredName(), resetEnd); + } + if (resetStart != null) { + builder.field(RESET_START.getPreferredName(), resetStart); + } + builder.endObject(); + return builder; + } + + /** + * Class for incrementally building a bulk document request in {@link XContentType#JSON} format + */ + public static class JsonBuilder { + + private final List bytes = new ArrayList<>(); + + /** + * Add a document via a {@code byte[]} array + * + * @param doc {@code byte[]} array of a serialized JSON object + */ + public JsonBuilder addDoc(byte[] doc) { + bytes.add(ByteBuffer.wrap(doc)); + return this; + } + + /** + * Add a document via a serialized JSON String + * + * @param doc a serialized JSON String + */ + public JsonBuilder addDoc(String doc) { + bytes.add(ByteBuffer.wrap(doc.getBytes(StandardCharsets.UTF_8))); + return this; + } + + /** + * Add a document via an object map + * + * @param doc document object to add to bulk request + * @throws IOException on parsing/serialization errors + */ + public JsonBuilder addDoc(Map doc) throws IOException { + try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + builder.map(doc); + bytes.add(ByteBuffer.wrap(BytesReference.toBytes(BytesReference.bytes(builder)))); + } + return this; + } + + private BytesReference build() { + ByteBuffer[] buffers = bytes.toArray(new ByteBuffer[bytes.size()]); + return BytesReference.fromByteBuffers(buffers); + } + + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PostDataResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PostDataResponse.java new file mode 100644 index 00000000000..ce99316e90c --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PostDataResponse.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ml.job.process.DataCounts; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * Response object when posting data to a Machine Learning Job + */ +public class PostDataResponse extends ActionResponse implements ToXContentObject { + + private DataCounts dataCounts; + + public static PostDataResponse fromXContent(XContentParser parser) throws IOException { + return new PostDataResponse(DataCounts.PARSER.parse(parser, null)); + } + + public PostDataResponse(DataCounts counts) { + this.dataCounts = counts; + } + + public DataCounts getDataCounts() { + return dataCounts; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + return dataCounts.toXContent(builder, params); + } + + @Override + public int hashCode() { + return Objects.hashCode(dataCounts); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + PostDataResponse other = (PostDataResponse) obj; + return Objects.equals(dataCounts, other.dataCounts); + } + +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/PutJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutJobRequest.java similarity index 87% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/PutJobRequest.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutJobRequest.java index 2cdf1993fcc..de8529de6bb 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/PutJobRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutJobRequest.java @@ -16,22 +16,30 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml; +package org.elasticsearch.client.ml; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import java.io.IOException; import java.util.Objects; +/** + * Request to create a new Machine Learning Job given a {@link Job} configuration + */ public class PutJobRequest extends ActionRequest implements ToXContentObject { private final Job job; + /** + * Construct a new PutJobRequest + * + * @param job a {@link Job} configuration to create + */ public PutJobRequest(Job job) { this.job = job; } diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/PutJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutJobResponse.java similarity index 84% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/PutJobResponse.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutJobResponse.java index b37bd35d6b1..6e6cce52e58 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/PutJobResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutJobResponse.java @@ -16,17 +16,19 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml; +package org.elasticsearch.client.ml; -import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import java.io.IOException; import java.util.Objects; +/** + * Response containing the newly created {@link Job} + */ public class PutJobResponse implements ToXContentObject { private Job job; @@ -35,19 +37,16 @@ public class PutJobResponse implements ToXContentObject { return new PutJobResponse(Job.PARSER.parse(parser, null).build()); } - public PutJobResponse(Job job) { + PutJobResponse(Job job) { this.job = job; } - public PutJobResponse() { - } - public Job getResponse() { return job; } @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { job.toXContent(builder, params); return builder; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/UpdateJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/UpdateJobRequest.java new file mode 100644 index 00000000000..6e050f8adcf --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/UpdateJobRequest.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.JobUpdate; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Updates a {@link org.elasticsearch.client.ml.job.config.Job} with the passed {@link JobUpdate} + * settings + */ +public class UpdateJobRequest extends ActionRequest implements ToXContentObject { + + private final JobUpdate update; + + public UpdateJobRequest(JobUpdate update) { + this.update = update; + } + + public JobUpdate getJobUpdate() { + return update; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return update.toXContent(builder, params); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + UpdateJobRequest that = (UpdateJobRequest) o; + return Objects.equals(update, that.update); + } + + @Override + public int hashCode() { + return Objects.hash(update); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/ChunkingConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/ChunkingConfig.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/ChunkingConfig.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/ChunkingConfig.java index 0b9d9f12046..10e7b3f9749 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/ChunkingConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/ChunkingConfig.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.datafeed; +package org.elasticsearch.client.ml.datafeed; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java similarity index 99% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedConfig.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java index 929d4dacb90..752752b1038 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java @@ -16,8 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.datafeed; +package org.elasticsearch.client.ml.datafeed; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -27,7 +28,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.builder.SearchSourceBuilder; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedUpdate.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java similarity index 99% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedUpdate.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java index 787bdf06e5e..184d5d51481 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedUpdate.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java @@ -16,8 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.datafeed; +package org.elasticsearch.client.ml.datafeed; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -26,7 +27,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.builder.SearchSourceBuilder; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/AnalysisConfig.java similarity index 99% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisConfig.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/AnalysisConfig.java index 7baaae52a8b..9b759599dda 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/AnalysisConfig.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.unit.TimeValue; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisLimits.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/AnalysisLimits.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisLimits.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/AnalysisLimits.java index f69b9ccbf9f..22d26f06fd8 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisLimits.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/AnalysisLimits.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/CategorizationAnalyzerConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/CategorizationAnalyzerConfig.java similarity index 99% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/CategorizationAnalyzerConfig.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/CategorizationAnalyzerConfig.java index dc7f047b804..3a2243d6548 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/CategorizationAnalyzerConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/CategorizationAnalyzerConfig.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DataDescription.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DataDescription.java similarity index 99% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DataDescription.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DataDescription.java index a3f8c2563b2..636b8c6ad50 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DataDescription.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DataDescription.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DefaultDetectorDescription.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DefaultDetectorDescription.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DefaultDetectorDescription.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DefaultDetectorDescription.java index 081e685fc74..25b4fbbb2a7 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DefaultDetectorDescription.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DefaultDetectorDescription.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.Strings; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectionRule.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DetectionRule.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectionRule.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DetectionRule.java index 9a73afe885b..bcba8a7d74a 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectionRule.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DetectionRule.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Detector.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Detector.java similarity index 99% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Detector.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Detector.java index 042d48b7006..e1af60269b5 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Detector.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Detector.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectorFunction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DetectorFunction.java similarity index 97% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectorFunction.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DetectorFunction.java index 5d9a06948d0..932782101ba 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectorFunction.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DetectorFunction.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import java.util.Arrays; import java.util.Collections; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/FilterRef.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/FilterRef.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/FilterRef.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/FilterRef.java index 9afbdf4876f..b686ad92ae5 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/FilterRef.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/FilterRef.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Job.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Job.java similarity index 99% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Job.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Job.java index 59840cfec2a..aff74271f1c 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Job.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Job.java @@ -16,8 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; +import org.elasticsearch.client.ml.job.util.TimeUtil; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.TimeValue; @@ -25,7 +26,6 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.protocol.xpack.ml.job.util.TimeUtil; import java.io.IOException; import java.util.Collections; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/JobState.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/JobState.java new file mode 100644 index 00000000000..32684bd7e62 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/JobState.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.config; + +import java.util.Locale; + +/** + * Jobs whether running or complete are in one of these states. + * When a job is created it is initialised in the state closed + * i.e. it is not running. + */ +public enum JobState { + + CLOSING, CLOSED, OPENED, FAILED, OPENING; + + public static JobState fromString(String name) { + return valueOf(name.trim().toUpperCase(Locale.ROOT)); + } + + public String value() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/JobUpdate.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/JobUpdate.java new file mode 100644 index 00000000000..15499a65043 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/JobUpdate.java @@ -0,0 +1,454 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.config; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * POJO for updating an existing Machine Learning {@link Job} + */ +public class JobUpdate implements ToXContentObject { + public static final ParseField DETECTORS = new ParseField("detectors"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "job_update", true, args -> new Builder((String) args[0])); + + static { + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), Job.ID); + PARSER.declareStringArray(Builder::setGroups, Job.GROUPS); + PARSER.declareStringOrNull(Builder::setDescription, Job.DESCRIPTION); + PARSER.declareObjectArray(Builder::setDetectorUpdates, DetectorUpdate.PARSER, DETECTORS); + PARSER.declareObject(Builder::setModelPlotConfig, ModelPlotConfig.PARSER, Job.MODEL_PLOT_CONFIG); + PARSER.declareObject(Builder::setAnalysisLimits, AnalysisLimits.PARSER, Job.ANALYSIS_LIMITS); + PARSER.declareString((builder, val) -> builder.setBackgroundPersistInterval( + TimeValue.parseTimeValue(val, Job.BACKGROUND_PERSIST_INTERVAL.getPreferredName())), Job.BACKGROUND_PERSIST_INTERVAL); + PARSER.declareLong(Builder::setRenormalizationWindowDays, Job.RENORMALIZATION_WINDOW_DAYS); + PARSER.declareLong(Builder::setResultsRetentionDays, Job.RESULTS_RETENTION_DAYS); + PARSER.declareLong(Builder::setModelSnapshotRetentionDays, Job.MODEL_SNAPSHOT_RETENTION_DAYS); + PARSER.declareStringArray(Builder::setCategorizationFilters, AnalysisConfig.CATEGORIZATION_FILTERS); + PARSER.declareField(Builder::setCustomSettings, (p, c) -> p.map(), Job.CUSTOM_SETTINGS, ObjectParser.ValueType.OBJECT); + } + + private final String jobId; + private final List groups; + private final String description; + private final List detectorUpdates; + private final ModelPlotConfig modelPlotConfig; + private final AnalysisLimits analysisLimits; + private final Long renormalizationWindowDays; + private final TimeValue backgroundPersistInterval; + private final Long modelSnapshotRetentionDays; + private final Long resultsRetentionDays; + private final List categorizationFilters; + private final Map customSettings; + + private JobUpdate(String jobId, @Nullable List groups, @Nullable String description, + @Nullable List detectorUpdates, @Nullable ModelPlotConfig modelPlotConfig, + @Nullable AnalysisLimits analysisLimits, @Nullable TimeValue backgroundPersistInterval, + @Nullable Long renormalizationWindowDays, @Nullable Long resultsRetentionDays, + @Nullable Long modelSnapshotRetentionDays, @Nullable List categorisationFilters, + @Nullable Map customSettings) { + this.jobId = jobId; + this.groups = groups; + this.description = description; + this.detectorUpdates = detectorUpdates; + this.modelPlotConfig = modelPlotConfig; + this.analysisLimits = analysisLimits; + this.renormalizationWindowDays = renormalizationWindowDays; + this.backgroundPersistInterval = backgroundPersistInterval; + this.modelSnapshotRetentionDays = modelSnapshotRetentionDays; + this.resultsRetentionDays = resultsRetentionDays; + this.categorizationFilters = categorisationFilters; + this.customSettings = customSettings; + } + + public String getJobId() { + return jobId; + } + + public List getGroups() { + return groups; + } + + public String getDescription() { + return description; + } + + public List getDetectorUpdates() { + return detectorUpdates; + } + + public ModelPlotConfig getModelPlotConfig() { + return modelPlotConfig; + } + + public AnalysisLimits getAnalysisLimits() { + return analysisLimits; + } + + public Long getRenormalizationWindowDays() { + return renormalizationWindowDays; + } + + public TimeValue getBackgroundPersistInterval() { + return backgroundPersistInterval; + } + + public Long getModelSnapshotRetentionDays() { + return modelSnapshotRetentionDays; + } + + public Long getResultsRetentionDays() { + return resultsRetentionDays; + } + + public List getCategorizationFilters() { + return categorizationFilters; + } + + public Map getCustomSettings() { + return customSettings; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + if (groups != null) { + builder.field(Job.GROUPS.getPreferredName(), groups); + } + if (description != null) { + builder.field(Job.DESCRIPTION.getPreferredName(), description); + } + if (detectorUpdates != null) { + builder.field(DETECTORS.getPreferredName(), detectorUpdates); + } + if (modelPlotConfig != null) { + builder.field(Job.MODEL_PLOT_CONFIG.getPreferredName(), modelPlotConfig); + } + if (analysisLimits != null) { + builder.field(Job.ANALYSIS_LIMITS.getPreferredName(), analysisLimits); + } + if (renormalizationWindowDays != null) { + builder.field(Job.RENORMALIZATION_WINDOW_DAYS.getPreferredName(), renormalizationWindowDays); + } + if (backgroundPersistInterval != null) { + builder.field(Job.BACKGROUND_PERSIST_INTERVAL.getPreferredName(), backgroundPersistInterval); + } + if (modelSnapshotRetentionDays != null) { + builder.field(Job.MODEL_SNAPSHOT_RETENTION_DAYS.getPreferredName(), modelSnapshotRetentionDays); + } + if (resultsRetentionDays != null) { + builder.field(Job.RESULTS_RETENTION_DAYS.getPreferredName(), resultsRetentionDays); + } + if (categorizationFilters != null) { + builder.field(AnalysisConfig.CATEGORIZATION_FILTERS.getPreferredName(), categorizationFilters); + } + if (customSettings != null) { + builder.field(Job.CUSTOM_SETTINGS.getPreferredName(), customSettings); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + JobUpdate that = (JobUpdate) other; + + return Objects.equals(this.jobId, that.jobId) + && Objects.equals(this.groups, that.groups) + && Objects.equals(this.description, that.description) + && Objects.equals(this.detectorUpdates, that.detectorUpdates) + && Objects.equals(this.modelPlotConfig, that.modelPlotConfig) + && Objects.equals(this.analysisLimits, that.analysisLimits) + && Objects.equals(this.renormalizationWindowDays, that.renormalizationWindowDays) + && Objects.equals(this.backgroundPersistInterval, that.backgroundPersistInterval) + && Objects.equals(this.modelSnapshotRetentionDays, that.modelSnapshotRetentionDays) + && Objects.equals(this.resultsRetentionDays, that.resultsRetentionDays) + && Objects.equals(this.categorizationFilters, that.categorizationFilters) + && Objects.equals(this.customSettings, that.customSettings); + } + + @Override + public int hashCode() { + return Objects.hash(jobId, groups, description, detectorUpdates, modelPlotConfig, analysisLimits, renormalizationWindowDays, + backgroundPersistInterval, modelSnapshotRetentionDays, resultsRetentionDays, categorizationFilters, customSettings); + } + + public static class DetectorUpdate implements ToXContentObject { + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("detector_update", true, a -> new DetectorUpdate((int) a[0], (String) a[1], + (List) a[2])); + + static { + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), Detector.DETECTOR_INDEX); + PARSER.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), Job.DESCRIPTION); + PARSER.declareObjectArray(ConstructingObjectParser.optionalConstructorArg(), (parser, parseFieldMatcher) -> + DetectionRule.PARSER.apply(parser, parseFieldMatcher).build(), Detector.CUSTOM_RULES_FIELD); + } + + private final int detectorIndex; + private final String description; + private final List rules; + + /** + * A detector update to apply to the Machine Learning Job + * + * @param detectorIndex The identifier of the detector to update. + * @param description The new description for the detector. + * @param rules The new list of rules for the detector. + */ + public DetectorUpdate(int detectorIndex, String description, List rules) { + this.detectorIndex = detectorIndex; + this.description = description; + this.rules = rules; + } + + public int getDetectorIndex() { + return detectorIndex; + } + + public String getDescription() { + return description; + } + + public List getRules() { + return rules; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + builder.field(Detector.DETECTOR_INDEX.getPreferredName(), detectorIndex); + if (description != null) { + builder.field(Job.DESCRIPTION.getPreferredName(), description); + } + if (rules != null) { + builder.field(Detector.CUSTOM_RULES_FIELD.getPreferredName(), rules); + } + builder.endObject(); + + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(detectorIndex, description, rules); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + DetectorUpdate that = (DetectorUpdate) other; + return this.detectorIndex == that.detectorIndex && Objects.equals(this.description, that.description) + && Objects.equals(this.rules, that.rules); + } + } + + public static class Builder { + + private final String jobId; + private List groups; + private String description; + private List detectorUpdates; + private ModelPlotConfig modelPlotConfig; + private AnalysisLimits analysisLimits; + private Long renormalizationWindowDays; + private TimeValue backgroundPersistInterval; + private Long modelSnapshotRetentionDays; + private Long resultsRetentionDays; + private List categorizationFilters; + private Map customSettings; + + /** + * New {@link JobUpdate.Builder} object for the existing job + * + * @param jobId non-null `jobId` for referencing an exising {@link Job} + */ + public Builder(String jobId) { + this.jobId = jobId; + } + + /** + * Set the job groups + * + * Updates the {@link Job#groups} setting + * + * @param groups A list of group names + */ + public Builder setGroups(List groups) { + this.groups = groups; + return this; + } + + /** + * Set the job description + * + * Updates the {@link Job#description} setting + * + * @param description the desired Machine Learning job description + */ + public Builder setDescription(String description) { + this.description = description; + return this; + } + + /** + * The detector updates to apply to the job + * + * Updates the {@link AnalysisConfig#detectors} setting + * + * @param detectorUpdates list of {@link JobUpdate.DetectorUpdate} objects + */ + public Builder setDetectorUpdates(List detectorUpdates) { + this.detectorUpdates = detectorUpdates; + return this; + } + + /** + * Enables/disables the model plot config setting through {@link ModelPlotConfig#enabled} + * + * Updates the {@link Job#modelPlotConfig} setting + * + * @param modelPlotConfig {@link ModelPlotConfig} object with updated fields + */ + public Builder setModelPlotConfig(ModelPlotConfig modelPlotConfig) { + this.modelPlotConfig = modelPlotConfig; + return this; + } + + /** + * Sets new {@link AnalysisLimits} for the {@link Job} + * + * Updates the {@link Job#analysisLimits} setting + * + * @param analysisLimits Updates to {@link AnalysisLimits} + */ + public Builder setAnalysisLimits(AnalysisLimits analysisLimits) { + this.analysisLimits = analysisLimits; + return this; + } + + /** + * Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen + * + * Updates the {@link Job#renormalizationWindowDays} setting + * + * @param renormalizationWindowDays number of renormalization window days + */ + public Builder setRenormalizationWindowDays(Long renormalizationWindowDays) { + this.renormalizationWindowDays = renormalizationWindowDays; + return this; + } + + /** + * Advanced configuration option. The time between each periodic persistence of the model + * + * Updates the {@link Job#backgroundPersistInterval} setting + * + * @param backgroundPersistInterval the time between background persistence + */ + public Builder setBackgroundPersistInterval(TimeValue backgroundPersistInterval) { + this.backgroundPersistInterval = backgroundPersistInterval; + return this; + } + + /** + * The time in days that model snapshots are retained for the job. + * + * Updates the {@link Job#modelSnapshotRetentionDays} setting + * + * @param modelSnapshotRetentionDays number of days to keep a model snapshot + */ + public Builder setModelSnapshotRetentionDays(Long modelSnapshotRetentionDays) { + this.modelSnapshotRetentionDays = modelSnapshotRetentionDays; + return this; + } + + /** + * Advanced configuration option. The number of days for which job results are retained + * + * Updates the {@link Job#resultsRetentionDays} setting + * + * @param resultsRetentionDays number of days to keep results. + */ + public Builder setResultsRetentionDays(Long resultsRetentionDays) { + this.resultsRetentionDays = resultsRetentionDays; + return this; + } + + /** + * Sets the categorization filters on the {@link Job} + * + * Updates the {@link AnalysisConfig#categorizationFilters} setting. + * Requires {@link AnalysisConfig#categorizationFieldName} to have been set on the existing Job. + * + * @param categorizationFilters list of categorization filters for the Job's {@link AnalysisConfig} + */ + public Builder setCategorizationFilters(List categorizationFilters) { + this.categorizationFilters = categorizationFilters; + return this; + } + + /** + * Contains custom meta data about the job. + * + * Updates the {@link Job#customSettings} setting + * + * @param customSettings custom settings map for the job + */ + public Builder setCustomSettings(Map customSettings) { + this.customSettings = customSettings; + return this; + } + + public JobUpdate build() { + return new JobUpdate(jobId, groups, description, detectorUpdates, modelPlotConfig, analysisLimits, backgroundPersistInterval, + renormalizationWindowDays, resultsRetentionDays, modelSnapshotRetentionDays, categorizationFilters, customSettings); + } + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/MlFilter.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/MlFilter.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/MlFilter.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/MlFilter.java index bcbc0c295c2..e0d1bd0849b 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/MlFilter.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/MlFilter.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/ModelPlotConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/ModelPlotConfig.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/ModelPlotConfig.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/ModelPlotConfig.java index 59b0252a766..b39db054b30 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/ModelPlotConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/ModelPlotConfig.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Operator.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Operator.java similarity index 97% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Operator.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Operator.java index c3dc52e5a3c..37d62752035 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Operator.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Operator.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.ParseField; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleAction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/RuleAction.java similarity index 95% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleAction.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/RuleAction.java index 9e2364b4fd9..05b6ef6e197 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleAction.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/RuleAction.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import java.util.Locale; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleCondition.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/RuleCondition.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleCondition.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/RuleCondition.java index ec19547fe13..14389809bd2 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleCondition.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/RuleCondition.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleScope.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/RuleScope.java similarity index 89% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleScope.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/RuleScope.java index aa12d5ea2a2..8b6886d5825 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleScope.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/RuleScope.java @@ -16,11 +16,11 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.ContextParser; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -50,7 +50,7 @@ public class RuleScope implements ToXContentObject { Map value = (Map) entry.getValue(); builder.map(value); try (XContentParser scopeParser = XContentFactory.xContent(builder.contentType()).createParser( - NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, Strings.toString(builder))) { + NamedXContentRegistry.EMPTY, DEPRECATION_HANDLER, Strings.toString(builder))) { scope.put(entry.getKey(), FilterRef.PARSER.parse(scopeParser, null)); } } @@ -59,6 +59,15 @@ public class RuleScope implements ToXContentObject { }; } + private static final DeprecationHandler DEPRECATION_HANDLER = new DeprecationHandler() { + + @Override + public void usedDeprecatedName(String usedName, String modernName) {} + + @Override + public void usedDeprecatedField(String usedName, String replacedWith) {} + }; + private final Map scope; public RuleScope() { diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/DataCounts.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/DataCounts.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/DataCounts.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/DataCounts.java index e07312d12e1..7afef0785fe 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/DataCounts.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/DataCounts.java @@ -16,15 +16,15 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.process; +package org.elasticsearch.client.ml.job.process; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.util.TimeUtil; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; -import org.elasticsearch.protocol.xpack.ml.job.util.TimeUtil; import java.io.IOException; import java.util.Date; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSizeStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSizeStats.java similarity index 97% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSizeStats.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSizeStats.java index 50f655b4dd7..c9a34fe5c98 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSizeStats.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSizeStats.java @@ -16,16 +16,16 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.process; +package org.elasticsearch.client.ml.job.process; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.results.Result; +import org.elasticsearch.client.ml.job.util.TimeUtil; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; -import org.elasticsearch.protocol.xpack.ml.job.results.Result; -import org.elasticsearch.protocol.xpack.ml.job.util.TimeUtil; import java.io.IOException; import java.util.Date; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSnapshot.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSnapshot.java similarity index 97% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSnapshot.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSnapshot.java index 2b9957f9bc7..603bff0d906 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSnapshot.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSnapshot.java @@ -16,16 +16,16 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.process; +package org.elasticsearch.client.ml.job.process; import org.elasticsearch.Version; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.util.TimeUtil; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; -import org.elasticsearch.protocol.xpack.ml.job.util.TimeUtil; import java.io.IOException; import java.util.Date; @@ -221,10 +221,8 @@ public class ModelSnapshot implements ToXContentObject { public static class Builder { private String jobId; - // Stored snapshot documents created prior to 6.3.0 will have no - // value for min_version. We default it to 5.5.0 as there were - // no model changes between 5.5.0 and 6.3.0. - private Version minVersion = Version.V_5_5_0; + // Stored snapshot documents created prior to 6.3.0 will have no value for min_version. + private Version minVersion = Version.V_6_3_0; private Date timestamp; private String description; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/Quantiles.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/Quantiles.java similarity index 96% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/Quantiles.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/Quantiles.java index 1c047d6c302..795028847a0 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/Quantiles.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/Quantiles.java @@ -16,14 +16,14 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.process; +package org.elasticsearch.client.ml.job.process; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import java.io.IOException; import java.util.Date; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyCause.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/AnomalyCause.java similarity index 99% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyCause.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/AnomalyCause.java index 7ad57b24fcb..4fbe5ac1ff3 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyCause.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/AnomalyCause.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyRecord.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/AnomalyRecord.java similarity index 99% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyRecord.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/AnomalyRecord.java index 4747f3a48bd..db4483fef4b 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyRecord.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/AnomalyRecord.java @@ -16,8 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -25,7 +26,6 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser.Token; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import java.io.IOException; import java.time.format.DateTimeFormatter; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Bucket.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Bucket.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Bucket.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Bucket.java index cbaf83abbad..2dfed4c3834 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Bucket.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Bucket.java @@ -16,8 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -25,7 +26,6 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser.Token; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import java.io.IOException; import java.time.format.DateTimeFormatter; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketInfluencer.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/BucketInfluencer.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketInfluencer.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/BucketInfluencer.java index 29d8447cd6a..6fc2a9b8b2d 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketInfluencer.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/BucketInfluencer.java @@ -16,8 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -25,7 +26,6 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser.Token; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import java.io.IOException; import java.time.format.DateTimeFormatter; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/CategoryDefinition.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/CategoryDefinition.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/CategoryDefinition.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/CategoryDefinition.java index 59b59006b33..dd65899e67e 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/CategoryDefinition.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/CategoryDefinition.java @@ -16,13 +16,13 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import java.io.IOException; import java.util.ArrayList; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Influence.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Influence.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Influence.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Influence.java index 53607479d66..bfcc545362d 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Influence.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Influence.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Influencer.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Influencer.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Influencer.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Influencer.java index 51c88883608..28ceb243bf6 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Influencer.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Influencer.java @@ -16,8 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -25,7 +26,6 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser.Token; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import java.io.IOException; import java.time.format.DateTimeFormatter; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/OverallBucket.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/OverallBucket.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/OverallBucket.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/OverallBucket.java index 4f13b4b2664..eaf050f8be9 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/OverallBucket.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/OverallBucket.java @@ -16,8 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -25,7 +26,6 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import java.io.IOException; import java.time.format.DateTimeFormatter; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Result.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Result.java similarity index 95% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Result.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Result.java index cce5fa65ebb..a7f8933a0a1 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Result.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Result.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.ParseField; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/ForecastStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/ForecastStats.java new file mode 100644 index 00000000000..a6b41beca83 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/ForecastStats.java @@ -0,0 +1,174 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.stats; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * A class to hold statistics about forecasts. + */ +public class ForecastStats implements ToXContentObject { + + public static final ParseField TOTAL = new ParseField("total"); + public static final ParseField FORECASTED_JOBS = new ParseField("forecasted_jobs"); + public static final ParseField MEMORY_BYTES = new ParseField("memory_bytes"); + public static final ParseField PROCESSING_TIME_MS = new ParseField("processing_time_ms"); + public static final ParseField RECORDS = new ParseField("records"); + public static final ParseField STATUS = new ParseField("status"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("forecast_stats", + true, + (a) -> { + int i = 0; + long total = (long)a[i++]; + SimpleStats memoryStats = (SimpleStats)a[i++]; + SimpleStats recordStats = (SimpleStats)a[i++]; + SimpleStats runtimeStats = (SimpleStats)a[i++]; + Map statusCounts = (Map)a[i]; + return new ForecastStats(total, memoryStats, recordStats, runtimeStats, statusCounts); + }); + + static { + PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), SimpleStats.PARSER, MEMORY_BYTES); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), SimpleStats.PARSER, RECORDS); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), SimpleStats.PARSER, PROCESSING_TIME_MS); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + p -> { + Map counts = new HashMap<>(); + p.map().forEach((key, value) -> counts.put(key, ((Number)value).longValue())); + return counts; + }, STATUS, ObjectParser.ValueType.OBJECT); + } + + private final long total; + private final long forecastedJobs; + private SimpleStats memoryStats; + private SimpleStats recordStats; + private SimpleStats runtimeStats; + private Map statusCounts; + + public ForecastStats(long total, + SimpleStats memoryStats, + SimpleStats recordStats, + SimpleStats runtimeStats, + Map statusCounts) { + this.total = total; + this.forecastedJobs = total > 0 ? 1 : 0; + if (total > 0) { + this.memoryStats = Objects.requireNonNull(memoryStats); + this.recordStats = Objects.requireNonNull(recordStats); + this.runtimeStats = Objects.requireNonNull(runtimeStats); + this.statusCounts = Collections.unmodifiableMap(statusCounts); + } + } + + /** + * The number of forecasts currently available for this model. + */ + public long getTotal() { + return total; + } + + /** + * The number of jobs that have at least one forecast. + */ + public long getForecastedJobs() { + return forecastedJobs; + } + + /** + * Statistics about the memory usage: minimum, maximum, average and total. + */ + public SimpleStats getMemoryStats() { + return memoryStats; + } + + /** + * Statistics about the number of forecast records: minimum, maximum, average and total. + */ + public SimpleStats getRecordStats() { + return recordStats; + } + + /** + * Statistics about the forecast runtime in milliseconds: minimum, maximum, average and total + */ + public SimpleStats getRuntimeStats() { + return runtimeStats; + } + + /** + * Counts per forecast status, for example: {"finished" : 2}. + */ + public Map getStatusCounts() { + return statusCounts; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(TOTAL.getPreferredName(), total); + builder.field(FORECASTED_JOBS.getPreferredName(), forecastedJobs); + + if (total > 0) { + builder.field(MEMORY_BYTES.getPreferredName(), memoryStats); + builder.field(RECORDS.getPreferredName(), recordStats); + builder.field(PROCESSING_TIME_MS.getPreferredName(), runtimeStats); + builder.field(STATUS.getPreferredName(), statusCounts); + } + return builder.endObject(); + } + + @Override + public int hashCode() { + return Objects.hash(total, forecastedJobs, memoryStats, recordStats, runtimeStats, statusCounts); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ForecastStats other = (ForecastStats) obj; + return Objects.equals(total, other.total) && + Objects.equals(forecastedJobs, other.forecastedJobs) && + Objects.equals(memoryStats, other.memoryStats) && + Objects.equals(recordStats, other.recordStats) && + Objects.equals(runtimeStats, other.runtimeStats) && + Objects.equals(statusCounts, other.statusCounts); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/JobStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/JobStats.java new file mode 100644 index 00000000000..df5be4aa4c5 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/JobStats.java @@ -0,0 +1,225 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.stats; + +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.config.JobState; +import org.elasticsearch.client.ml.job.process.DataCounts; +import org.elasticsearch.client.ml.job.process.ModelSizeStats; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.client.ml.NodeAttributes; + +import java.io.IOException; +import java.util.Objects; + +/** + * Class containing the statistics for a Machine Learning job. + * + */ +public class JobStats implements ToXContentObject { + + private static final ParseField DATA_COUNTS = new ParseField("data_counts"); + private static final ParseField MODEL_SIZE_STATS = new ParseField("model_size_stats"); + private static final ParseField FORECASTS_STATS = new ParseField("forecasts_stats"); + private static final ParseField STATE = new ParseField("state"); + private static final ParseField NODE = new ParseField("node"); + private static final ParseField OPEN_TIME = new ParseField("open_time"); + private static final ParseField ASSIGNMENT_EXPLANATION = new ParseField("assignment_explanation"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("job_stats", + true, + (a) -> { + int i = 0; + String jobId = (String) a[i++]; + DataCounts dataCounts = (DataCounts) a[i++]; + JobState jobState = (JobState) a[i++]; + ModelSizeStats.Builder modelSizeStatsBuilder = (ModelSizeStats.Builder) a[i++]; + ModelSizeStats modelSizeStats = modelSizeStatsBuilder == null ? null : modelSizeStatsBuilder.build(); + ForecastStats forecastStats = (ForecastStats) a[i++]; + NodeAttributes node = (NodeAttributes) a[i++]; + String assignmentExplanation = (String) a[i++]; + TimeValue openTime = (TimeValue) a[i]; + return new JobStats(jobId, + dataCounts, + jobState, + modelSizeStats, + forecastStats, + node, + assignmentExplanation, + openTime); + }); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), DataCounts.PARSER, DATA_COUNTS); + PARSER.declareField(ConstructingObjectParser.constructorArg(), + (p) -> JobState.fromString(p.text()), + STATE, + ObjectParser.ValueType.VALUE); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), ModelSizeStats.PARSER, MODEL_SIZE_STATS); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), ForecastStats.PARSER, FORECASTS_STATS); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), NodeAttributes.PARSER, NODE); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), ASSIGNMENT_EXPLANATION); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.textOrNull(), OPEN_TIME.getPreferredName()), + OPEN_TIME, + ObjectParser.ValueType.STRING_OR_NULL); + } + + + private final String jobId; + private final DataCounts dataCounts; + private final JobState state; + private final ModelSizeStats modelSizeStats; + private final ForecastStats forecastStats; + private final NodeAttributes node; + private final String assignmentExplanation; + private final TimeValue openTime; + + JobStats(String jobId, DataCounts dataCounts, JobState state, @Nullable ModelSizeStats modelSizeStats, + @Nullable ForecastStats forecastStats, @Nullable NodeAttributes node, + @Nullable String assignmentExplanation, @Nullable TimeValue opentime) { + this.jobId = Objects.requireNonNull(jobId); + this.dataCounts = Objects.requireNonNull(dataCounts); + this.state = Objects.requireNonNull(state); + this.modelSizeStats = modelSizeStats; + this.forecastStats = forecastStats; + this.node = node; + this.assignmentExplanation = assignmentExplanation; + this.openTime = opentime; + } + + /** + * The jobId referencing the job for these statistics + */ + public String getJobId() { + return jobId; + } + + /** + * An object that describes the number of records processed and any related error counts + * See {@link DataCounts} + */ + public DataCounts getDataCounts() { + return dataCounts; + } + + /** + * An object that provides information about the size and contents of the model. + * See {@link ModelSizeStats} + */ + public ModelSizeStats getModelSizeStats() { + return modelSizeStats; + } + + /** + * An object that provides statistical information about forecasts of this job. + * See {@link ForecastStats} + */ + public ForecastStats getForecastStats() { + return forecastStats; + } + + /** + * The status of the job + * See {@link JobState} + */ + public JobState getState() { + return state; + } + + /** + * For open jobs only, contains information about the node where the job runs + * See {@link NodeAttributes} + */ + public NodeAttributes getNode() { + return node; + } + + /** + * For open jobs only, contains messages relating to the selection of a node to run the job. + */ + public String getAssignmentExplanation() { + return assignmentExplanation; + } + + /** + * For open jobs only, the elapsed time for which the job has been open + */ + public TimeValue getOpenTime() { + return openTime; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(DATA_COUNTS.getPreferredName(), dataCounts); + builder.field(STATE.getPreferredName(), state.toString()); + if (modelSizeStats != null) { + builder.field(MODEL_SIZE_STATS.getPreferredName(), modelSizeStats); + } + if (forecastStats != null) { + builder.field(FORECASTS_STATS.getPreferredName(), forecastStats); + } + if (node != null) { + builder.field(NODE.getPreferredName(), node); + } + if (assignmentExplanation != null) { + builder.field(ASSIGNMENT_EXPLANATION.getPreferredName(), assignmentExplanation); + } + if (openTime != null) { + builder.field(OPEN_TIME.getPreferredName(), openTime.getStringRep()); + } + return builder.endObject(); + } + + @Override + public int hashCode() { + return Objects.hash(jobId, dataCounts, modelSizeStats, forecastStats, state, node, assignmentExplanation, openTime); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + JobStats other = (JobStats) obj; + return Objects.equals(jobId, other.jobId) && + Objects.equals(this.dataCounts, other.dataCounts) && + Objects.equals(this.modelSizeStats, other.modelSizeStats) && + Objects.equals(this.forecastStats, other.forecastStats) && + Objects.equals(this.state, other.state) && + Objects.equals(this.node, other.node) && + Objects.equals(this.assignmentExplanation, other.assignmentExplanation) && + Objects.equals(this.openTime, other.openTime); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/SimpleStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/SimpleStats.java new file mode 100644 index 00000000000..f4c8aa0fa3b --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/SimpleStats.java @@ -0,0 +1,117 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.stats; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Helper class for min, max, avg and total statistics for a quantity + */ +public class SimpleStats implements ToXContentObject { + + public static final ParseField MIN = new ParseField("min"); + public static final ParseField MAX = new ParseField("max"); + public static final ParseField AVG = new ParseField("avg"); + public static final ParseField TOTAL = new ParseField("total"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("simple_stats", true, + (a) -> { + int i = 0; + double total = (double)a[i++]; + double min = (double)a[i++]; + double max = (double)a[i++]; + double avg = (double)a[i++]; + return new SimpleStats(total, min, max, avg); + }); + + static { + PARSER.declareDouble(ConstructingObjectParser.constructorArg(), TOTAL); + PARSER.declareDouble(ConstructingObjectParser.constructorArg(), MIN); + PARSER.declareDouble(ConstructingObjectParser.constructorArg(), MAX); + PARSER.declareDouble(ConstructingObjectParser.constructorArg(), AVG); + } + + private final double total; + private final double min; + private final double max; + private final double avg; + + SimpleStats(double total, double min, double max, double avg) { + this.total = total; + this.min = min; + this.max = max; + this.avg = avg; + } + + public double getMin() { + return min; + } + + public double getMax() { + return max; + } + + public double getAvg() { + return avg; + } + + public double getTotal() { + return total; + } + + @Override + public int hashCode() { + return Objects.hash(total, min, max, avg); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + SimpleStats other = (SimpleStats) obj; + return Objects.equals(total, other.total) && + Objects.equals(min, other.min) && + Objects.equals(avg, other.avg) && + Objects.equals(max, other.max); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MIN.getPreferredName(), min); + builder.field(MAX.getPreferredName(), max); + builder.field(AVG.getPreferredName(), avg); + builder.field(TOTAL.getPreferredName(), total); + builder.endObject(); + return builder; + } +} + diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/util/PageParams.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/util/PageParams.java new file mode 100644 index 00000000000..52d54188f70 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/util/PageParams.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.util; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Paging parameters for GET requests + */ +public class PageParams implements ToXContentObject { + + public static final ParseField PAGE = new ParseField("page"); + public static final ParseField FROM = new ParseField("from"); + public static final ParseField SIZE = new ParseField("size"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(PAGE.getPreferredName(), + a -> new PageParams((Integer) a[0], (Integer) a[1])); + + static { + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), FROM); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), SIZE); + } + + private final Integer from; + private final Integer size; + + /** + * Constructs paging parameters + * @param from skips the specified number of items. When {@code null} the default value will be used. + * @param size specifies the maximum number of items to obtain. When {@code null} the default value will be used. + */ + public PageParams(@Nullable Integer from, @Nullable Integer size) { + this.from = from; + this.size = size; + } + + public int getFrom() { + return from; + } + + public int getSize() { + return size; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (from != null) { + builder.field(FROM.getPreferredName(), from); + } + if (size != null) { + builder.field(SIZE.getPreferredName(), size); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(from, size); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + PageParams other = (PageParams) obj; + return Objects.equals(from, other.from) && + Objects.equals(size, other.size); + } + +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/util/TimeUtil.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/util/TimeUtil.java similarity index 97% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/util/TimeUtil.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/util/TimeUtil.java index 549b1969491..4c21ffb2175 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/util/TimeUtil.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/util/TimeUtil.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.util; +package org.elasticsearch.client.ml.job.util; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/DisableUserRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/DisableUserRequest.java new file mode 100644 index 00000000000..dc5411f3be7 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/DisableUserRequest.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +/** + * Request object to disable a native realm or built-in user. + */ +public final class DisableUserRequest extends SetUserEnabledRequest { + + public DisableUserRequest(String username, RefreshPolicy refreshPolicy) { + super(false, username, refreshPolicy); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/EmptyResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/EmptyResponse.java new file mode 100644 index 00000000000..62fea88e523 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/EmptyResponse.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +/** + * Response for a request which simply returns an empty object. + */ +public final class EmptyResponse { + + private static final ObjectParser PARSER = new ObjectParser<>("empty_response", false, EmptyResponse::new); + + public static EmptyResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/EnableUserRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/EnableUserRequest.java new file mode 100644 index 00000000000..851cb683e05 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/EnableUserRequest.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +/** + * Request object to enable a native realm or built-in user. + */ +public final class EnableUserRequest extends SetUserEnabledRequest { + + public EnableUserRequest(String username, RefreshPolicy refreshPolicy) { + super(true, username, refreshPolicy); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutUserRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutUserRequest.java new file mode 100644 index 00000000000..f8c30a25aed --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutUserRequest.java @@ -0,0 +1,156 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.client.ValidationException; +import org.elasticsearch.common.CharArrays; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +/** + * Request object to create or update a user in the native realm. + */ +public final class PutUserRequest implements Validatable, Closeable, ToXContentObject { + + private final String username; + private final List roles; + private final String fullName; + private final String email; + private final Map metadata; + private final char[] password; + private final boolean enabled; + private final RefreshPolicy refreshPolicy; + + public PutUserRequest(String username, char[] password, List roles, String fullName, String email, boolean enabled, + Map metadata, RefreshPolicy refreshPolicy) { + this.username = Objects.requireNonNull(username, "username is required"); + this.password = password; + this.roles = Collections.unmodifiableList(Objects.requireNonNull(roles, "roles must be specified")); + this.fullName = fullName; + this.email = email; + this.enabled = enabled; + this.metadata = metadata == null ? Collections.emptyMap() : Collections.unmodifiableMap(metadata); + this.refreshPolicy = refreshPolicy == null ? RefreshPolicy.getDefault() : refreshPolicy; + } + + public String getUsername() { + return username; + } + + public List getRoles() { + return roles; + } + + public String getFullName() { + return fullName; + } + + public String getEmail() { + return email; + } + + public Map getMetadata() { + return metadata; + } + + public char[] getPassword() { + return password; + } + + public boolean isEnabled() { + return enabled; + } + + public RefreshPolicy getRefreshPolicy() { + return refreshPolicy; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PutUserRequest that = (PutUserRequest) o; + return enabled == that.enabled && + Objects.equals(username, that.username) && + Objects.equals(roles, that.roles) && + Objects.equals(fullName, that.fullName) && + Objects.equals(email, that.email) && + Objects.equals(metadata, that.metadata) && + Arrays.equals(password, that.password) && + refreshPolicy == that.refreshPolicy; + } + + @Override + public int hashCode() { + int result = Objects.hash(username, roles, fullName, email, metadata, enabled, refreshPolicy); + result = 31 * result + Arrays.hashCode(password); + return result; + } + + @Override + public void close() { + if (password != null) { + Arrays.fill(password, (char) 0); + } + } + + @Override + public Optional validate() { + if (metadata != null && metadata.keySet().stream().anyMatch(s -> s.startsWith("_"))) { + ValidationException validationException = new ValidationException(); + validationException.addValidationError("metadata keys may not start with [_]"); + return Optional.of(validationException); + } + return Optional.empty(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("username", username); + if (password != null) { + byte[] charBytes = CharArrays.toUtf8Bytes(password); + builder.field("password").utf8Value(charBytes, 0, charBytes.length); + } + if (roles != null) { + builder.field("roles", roles); + } + if (fullName != null) { + builder.field("full_name", fullName); + } + if (email != null) { + builder.field("email", email); + } + if (metadata != null) { + builder.field("metadata", metadata); + } + return builder.endObject(); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutUserResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutUserResponse.java new file mode 100644 index 00000000000..73b57fb57ec --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutUserResponse.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Response when adding a user to the native realm. Returns a + * single boolean field for whether the user was created or updated. + */ +public final class PutUserResponse { + + private final boolean created; + + public PutUserResponse(boolean created) { + this.created = created; + } + + public boolean isCreated() { + return created; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PutUserResponse that = (PutUserResponse) o; + return created == that.created; + } + + @Override + public int hashCode() { + return Objects.hash(created); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("put_user_response", + true, args -> new PutUserResponse((boolean) args[0])); + + static { + PARSER.declareBoolean(constructorArg(), new ParseField("created")); + PARSER.declareObject((a,b) -> {}, (parser, context) -> null, new ParseField("user")); // ignore the user field! + } + + public static PutUserResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/RefreshPolicy.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/RefreshPolicy.java new file mode 100644 index 00000000000..8b72f704edf --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/RefreshPolicy.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +/** + * Enumeration of values that control the refresh policy for a request that + * supports specifying a refresh policy. + */ +public enum RefreshPolicy { + + /** + * Don't refresh after this request. The default. + */ + NONE("false"), + /** + * Force a refresh as part of this request. This refresh policy does not scale for high indexing or search throughput but is useful + * to present a consistent view to for indices with very low traffic. And it is wonderful for tests! + */ + IMMEDIATE("true"), + /** + * Leave this request open until a refresh has made the contents of this request visible to search. This refresh policy is + * compatible with high indexing and search throughput but it causes the request to wait to reply until a refresh occurs. + */ + WAIT_UNTIL("wait_for"); + + private final String value; + + RefreshPolicy(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + /** + * Get the default refresh policy, which is NONE + */ + public static RefreshPolicy getDefault() { + return RefreshPolicy.NONE; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/SetUserEnabledRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/SetUserEnabledRequest.java new file mode 100644 index 00000000000..ab61f7d879d --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/SetUserEnabledRequest.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.Validatable; + +import java.util.Objects; + +/** + * Abstract request object to enable or disable a built-in or native user. + */ +public abstract class SetUserEnabledRequest implements Validatable { + + private final boolean enabled; + private final String username; + private final RefreshPolicy refreshPolicy; + + SetUserEnabledRequest(boolean enabled, String username, RefreshPolicy refreshPolicy) { + this.enabled = enabled; + this.username = Objects.requireNonNull(username, "username is required"); + this.refreshPolicy = refreshPolicy == null ? RefreshPolicy.getDefault() : refreshPolicy; + } + + public boolean isEnabled() { + return enabled; + } + + public String getUsername() { + return username; + } + + public RefreshPolicy getRefreshPolicy() { + return refreshPolicy; + } +} diff --git a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt index 33e136a66f4..cc179e12e31 100644 --- a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt +++ b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt @@ -20,5 +20,14 @@ org.apache.http.entity.ContentType#create(java.lang.String,java.lang.String) org.apache.http.entity.ContentType#create(java.lang.String,java.nio.charset.Charset) org.apache.http.entity.ContentType#create(java.lang.String,org.apache.http.NameValuePair[]) +@defaultMessage ES's logging infrastructure uses log4j2 which we don't want to force on high level rest client users +org.elasticsearch.common.logging.DeprecationLogger +org.elasticsearch.common.logging.ESLoggerFactory +org.elasticsearch.common.logging.LogConfigurator +org.elasticsearch.common.logging.LoggerMessageFormat +org.elasticsearch.common.logging.Loggers +org.elasticsearch.common.logging.NodeNamePatternConverter +org.elasticsearch.common.logging.PrefixLogger + @defaultMessage We can't rely on log4j2 being on the classpath so don't log deprecations! org.elasticsearch.common.xcontent.LoggingDeprecationHandler diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java index 58b4b268788..a914008376a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.client; +import org.apache.http.util.EntityUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; @@ -34,6 +35,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.client.Request; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.rest.RestStatus; @@ -174,6 +176,8 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase { request.timeout("5s"); ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync); + logger.info("Shard stats\n{}", EntityUtils.toString( + client().performRequest(new Request("GET", "/_cat/shards")).getEntity())); assertYellowShards(response); assertThat(response.getIndices().size(), equalTo(0)); } @@ -186,6 +190,8 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase { request.level(ClusterHealthRequest.Level.INDICES); ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync); + logger.info("Shard stats\n{}", EntityUtils.toString( + client().performRequest(new Request("GET", "/_cat/shards")).getEntity())); assertYellowShards(response); assertThat(response.getIndices().size(), equalTo(2)); for (Map.Entry entry : response.getIndices().entrySet()) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterRequestConvertersTests.java new file mode 100644 index 00000000000..9a7596957d0 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterRequestConvertersTests.java @@ -0,0 +1,150 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsRequest; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.common.Priority; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.CoreMatchers; +import org.junit.Assert; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class ClusterRequestConvertersTests extends ESTestCase { + + public void testClusterPutSettings() throws IOException { + ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomMasterTimeout(request, expectedParams); + RequestConvertersTests.setRandomTimeout(request::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request expectedRequest = ClusterRequestConverters.clusterPutSettings(request); + Assert.assertEquals("/_cluster/settings", expectedRequest.getEndpoint()); + Assert.assertEquals(HttpPut.METHOD_NAME, expectedRequest.getMethod()); + Assert.assertEquals(expectedParams, expectedRequest.getParameters()); + } + + public void testClusterGetSettings() throws IOException { + ClusterGetSettingsRequest request = new ClusterGetSettingsRequest(); + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomMasterTimeout(request, expectedParams); + request.includeDefaults(ESTestCase.randomBoolean()); + if (request.includeDefaults()) { + expectedParams.put("include_defaults", String.valueOf(true)); + } + + Request expectedRequest = ClusterRequestConverters.clusterGetSettings(request); + Assert.assertEquals("/_cluster/settings", expectedRequest.getEndpoint()); + Assert.assertEquals(HttpGet.METHOD_NAME, expectedRequest.getMethod()); + Assert.assertEquals(expectedParams, expectedRequest.getParameters()); + } + + public void testClusterHealth() { + ClusterHealthRequest healthRequest = new ClusterHealthRequest(); + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomLocal(healthRequest, expectedParams); + String timeoutType = ESTestCase.randomFrom("timeout", "masterTimeout", "both", "none"); + String timeout = ESTestCase.randomTimeValue(); + String masterTimeout = ESTestCase.randomTimeValue(); + switch (timeoutType) { + case "timeout": + healthRequest.timeout(timeout); + expectedParams.put("timeout", timeout); + // If Master Timeout wasn't set it uses the same value as Timeout + expectedParams.put("master_timeout", timeout); + break; + case "masterTimeout": + expectedParams.put("timeout", "30s"); + healthRequest.masterNodeTimeout(masterTimeout); + expectedParams.put("master_timeout", masterTimeout); + break; + case "both": + healthRequest.timeout(timeout); + expectedParams.put("timeout", timeout); + healthRequest.masterNodeTimeout(timeout); + expectedParams.put("master_timeout", timeout); + break; + case "none": + expectedParams.put("timeout", "30s"); + expectedParams.put("master_timeout", "30s"); + break; + default: + throw new UnsupportedOperationException(); + } + RequestConvertersTests.setRandomWaitForActiveShards(healthRequest::waitForActiveShards, ActiveShardCount.NONE, expectedParams); + if (ESTestCase.randomBoolean()) { + ClusterHealthRequest.Level level = ESTestCase.randomFrom(ClusterHealthRequest.Level.values()); + healthRequest.level(level); + expectedParams.put("level", level.name().toLowerCase(Locale.ROOT)); + } else { + expectedParams.put("level", "cluster"); + } + if (ESTestCase.randomBoolean()) { + Priority priority = ESTestCase.randomFrom(Priority.values()); + healthRequest.waitForEvents(priority); + expectedParams.put("wait_for_events", priority.name().toLowerCase(Locale.ROOT)); + } + if (ESTestCase.randomBoolean()) { + ClusterHealthStatus status = ESTestCase.randomFrom(ClusterHealthStatus.values()); + healthRequest.waitForStatus(status); + expectedParams.put("wait_for_status", status.name().toLowerCase(Locale.ROOT)); + } + if (ESTestCase.randomBoolean()) { + boolean waitForNoInitializingShards = ESTestCase.randomBoolean(); + healthRequest.waitForNoInitializingShards(waitForNoInitializingShards); + if (waitForNoInitializingShards) { + expectedParams.put("wait_for_no_initializing_shards", Boolean.TRUE.toString()); + } + } + if (ESTestCase.randomBoolean()) { + boolean waitForNoRelocatingShards = ESTestCase.randomBoolean(); + healthRequest.waitForNoRelocatingShards(waitForNoRelocatingShards); + if (waitForNoRelocatingShards) { + expectedParams.put("wait_for_no_relocating_shards", Boolean.TRUE.toString()); + } + } + String[] indices = ESTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5); + healthRequest.indices(indices); + + Request request = ClusterRequestConverters.clusterHealth(healthRequest); + Assert.assertThat(request, CoreMatchers.notNullValue()); + Assert.assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME)); + Assert.assertThat(request.getEntity(), nullValue()); + if (indices != null && indices.length > 0) { + Assert.assertThat(request.getEndpoint(), equalTo("/_cluster/health/" + String.join(",", indices))); + } else { + Assert.assertThat(request.getEndpoint(), equalTo("/_cluster/health")); + } + Assert.assertThat(request.getParameters(), equalTo(expectedParams)); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index 89f357477fa..feb57bed9c4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -36,17 +36,24 @@ import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.index.query.IdsQueryBuilder; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.DeleteByQueryRequest; +import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -624,6 +631,181 @@ public class CrudIT extends ESRestHighLevelClientTestCase { validateBulkResponses(nbItems, errors, bulkResponse, bulkRequest); } + public void testReindex() throws IOException { + final String sourceIndex = "source1"; + final String destinationIndex = "dest"; + { + // Prepare + Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createIndex(sourceIndex, settings); + createIndex(destinationIndex, settings); + assertEquals( + RestStatus.OK, + highLevelClient().bulk( + new BulkRequest() + .add(new IndexRequest(sourceIndex, "type", "1") + .source(Collections.singletonMap("foo", "bar"), XContentType.JSON)) + .add(new IndexRequest(sourceIndex, "type", "2") + .source(Collections.singletonMap("foo2", "bar2"), XContentType.JSON)) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE), + RequestOptions.DEFAULT + ).status() + ); + } + { + // test1: create one doc in dest + ReindexRequest reindexRequest = new ReindexRequest(); + reindexRequest.setSourceIndices(sourceIndex); + reindexRequest.setDestIndex(destinationIndex); + reindexRequest.setSourceQuery(new IdsQueryBuilder().addIds("1").types("type")); + reindexRequest.setRefresh(true); + BulkByScrollResponse bulkResponse = execute(reindexRequest, highLevelClient()::reindex, highLevelClient()::reindexAsync); + assertEquals(1, bulkResponse.getCreated()); + assertEquals(1, bulkResponse.getTotal()); + assertEquals(0, bulkResponse.getDeleted()); + assertEquals(0, bulkResponse.getNoops()); + assertEquals(0, bulkResponse.getVersionConflicts()); + assertEquals(1, bulkResponse.getBatches()); + assertTrue(bulkResponse.getTook().getMillis() > 0); + assertEquals(1, bulkResponse.getBatches()); + assertEquals(0, bulkResponse.getBulkFailures().size()); + assertEquals(0, bulkResponse.getSearchFailures().size()); + } + { + // test2: create 1 and update 1 + ReindexRequest reindexRequest = new ReindexRequest(); + reindexRequest.setSourceIndices(sourceIndex); + reindexRequest.setDestIndex(destinationIndex); + BulkByScrollResponse bulkResponse = execute(reindexRequest, highLevelClient()::reindex, highLevelClient()::reindexAsync); + assertEquals(1, bulkResponse.getCreated()); + assertEquals(2, bulkResponse.getTotal()); + assertEquals(1, bulkResponse.getUpdated()); + assertEquals(0, bulkResponse.getDeleted()); + assertEquals(0, bulkResponse.getNoops()); + assertEquals(0, bulkResponse.getVersionConflicts()); + assertEquals(1, bulkResponse.getBatches()); + assertTrue(bulkResponse.getTook().getMillis() > 0); + assertEquals(1, bulkResponse.getBatches()); + assertEquals(0, bulkResponse.getBulkFailures().size()); + assertEquals(0, bulkResponse.getSearchFailures().size()); + } + } + + public void testUpdateByQuery() throws IOException { + final String sourceIndex = "source1"; + { + // Prepare + Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createIndex(sourceIndex, settings); + assertEquals( + RestStatus.OK, + highLevelClient().bulk( + new BulkRequest() + .add(new IndexRequest(sourceIndex, "type", "1") + .source(Collections.singletonMap("foo", 1), XContentType.JSON)) + .add(new IndexRequest(sourceIndex, "type", "2") + .source(Collections.singletonMap("foo", 2), XContentType.JSON)) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE), + RequestOptions.DEFAULT + ).status() + ); + } + { + // test1: create one doc in dest + UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); + updateByQueryRequest.indices(sourceIndex); + updateByQueryRequest.setQuery(new IdsQueryBuilder().addIds("1").types("type")); + updateByQueryRequest.setRefresh(true); + BulkByScrollResponse bulkResponse = + execute(updateByQueryRequest, highLevelClient()::updateByQuery, highLevelClient()::updateByQueryAsync); + assertEquals(1, bulkResponse.getTotal()); + assertEquals(1, bulkResponse.getUpdated()); + assertEquals(0, bulkResponse.getNoops()); + assertEquals(0, bulkResponse.getVersionConflicts()); + assertEquals(1, bulkResponse.getBatches()); + assertTrue(bulkResponse.getTook().getMillis() > 0); + assertEquals(1, bulkResponse.getBatches()); + assertEquals(0, bulkResponse.getBulkFailures().size()); + assertEquals(0, bulkResponse.getSearchFailures().size()); + } + { + // test2: update using script + UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); + updateByQueryRequest.indices(sourceIndex); + updateByQueryRequest.setScript(new Script("if (ctx._source.foo == 2) ctx._source.foo++;")); + updateByQueryRequest.setRefresh(true); + BulkByScrollResponse bulkResponse = + execute(updateByQueryRequest, highLevelClient()::updateByQuery, highLevelClient()::updateByQueryAsync); + assertEquals(2, bulkResponse.getTotal()); + assertEquals(2, bulkResponse.getUpdated()); + assertEquals(0, bulkResponse.getDeleted()); + assertEquals(0, bulkResponse.getNoops()); + assertEquals(0, bulkResponse.getVersionConflicts()); + assertEquals(1, bulkResponse.getBatches()); + assertTrue(bulkResponse.getTook().getMillis() > 0); + assertEquals(1, bulkResponse.getBatches()); + assertEquals(0, bulkResponse.getBulkFailures().size()); + assertEquals(0, bulkResponse.getSearchFailures().size()); + assertEquals( + 3, + (int) (highLevelClient().get(new GetRequest(sourceIndex, "type", "2"), RequestOptions.DEFAULT) + .getSourceAsMap().get("foo")) + ); + } + } + + public void testDeleteByQuery() throws IOException { + final String sourceIndex = "source1"; + { + // Prepare + Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createIndex(sourceIndex, settings); + assertEquals( + RestStatus.OK, + highLevelClient().bulk( + new BulkRequest() + .add(new IndexRequest(sourceIndex, "type", "1") + .source(Collections.singletonMap("foo", 1), XContentType.JSON)) + .add(new IndexRequest(sourceIndex, "type", "2") + .source(Collections.singletonMap("foo", 2), XContentType.JSON)) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE), + RequestOptions.DEFAULT + ).status() + ); + } + { + // test1: delete one doc + DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(); + deleteByQueryRequest.indices(sourceIndex); + deleteByQueryRequest.setQuery(new IdsQueryBuilder().addIds("1").types("type")); + deleteByQueryRequest.setRefresh(true); + BulkByScrollResponse bulkResponse = + execute(deleteByQueryRequest, highLevelClient()::deleteByQuery, highLevelClient()::deleteByQueryAsync); + assertEquals(1, bulkResponse.getTotal()); + assertEquals(1, bulkResponse.getDeleted()); + assertEquals(0, bulkResponse.getNoops()); + assertEquals(0, bulkResponse.getVersionConflicts()); + assertEquals(1, bulkResponse.getBatches()); + assertTrue(bulkResponse.getTook().getMillis() > 0); + assertEquals(1, bulkResponse.getBatches()); + assertEquals(0, bulkResponse.getBulkFailures().size()); + assertEquals(0, bulkResponse.getSearchFailures().size()); + assertEquals( + 1, + highLevelClient().search(new SearchRequest(sourceIndex), RequestOptions.DEFAULT).getHits().totalHits + ); + } + } + public void testBulkProcessorIntegration() throws IOException { int nbItems = randomIntBetween(10, 100); boolean[] errors = new boolean[nbItems]; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java index e05fa9fa79b..9217b0b4e55 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.ingest.Pipeline; @@ -33,7 +34,10 @@ import org.junit.AfterClass; import org.junit.Before; import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Base64; import java.util.Collections; +import java.util.Objects; public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase { @@ -126,7 +130,8 @@ public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase { } protected static void createPipeline(PutPipelineRequest putPipelineRequest) throws IOException { - assertOK(client().performRequest(RequestConverters.putPipeline(putPipelineRequest))); + assertTrue(execute( + putPipelineRequest, highLevelClient().ingest()::putPipeline, highLevelClient().ingest()::putPipelineAsync).isAcknowledged()); } protected static void clusterUpdateSettings(Settings persistentSettings, @@ -134,6 +139,18 @@ public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase { ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); request.persistentSettings(persistentSettings); request.transientSettings(transientSettings); - assertOK(client().performRequest(RequestConverters.clusterPutSettings(request))); + assertTrue(execute( + request, highLevelClient().cluster()::putSettings, highLevelClient().cluster()::putSettingsAsync).isAcknowledged()); + } + + @Override + protected Settings restClientSettings() { + final String user = Objects.requireNonNull(System.getProperty("tests.rest.cluster.username")); + final String pass = Objects.requireNonNull(System.getProperty("tests.rest.cluster.password")); + final String token = "Basic " + Base64.getEncoder().encodeToString((user + ":" + pass).getBytes(StandardCharsets.UTF_8)); + return Settings.builder() + .put(super.restClientSettings()) + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/GrapRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/GrapRequestConvertersTests.java new file mode 100644 index 00000000000..6598800d76e --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/GrapRequestConvertersTests.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpGet; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.Hop; +import org.elasticsearch.test.ESTestCase; +import org.junit.Assert; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.is; + +public class GrapRequestConvertersTests extends ESTestCase{ + + public void testGraphExplore() throws Exception { + Map expectedParams = new HashMap<>(); + + GraphExploreRequest graphExploreRequest = new GraphExploreRequest(); + graphExploreRequest.sampleDiversityField("diversity"); + graphExploreRequest.indices("index1", "index2"); + graphExploreRequest.types("type1", "type2"); + int timeout = ESTestCase.randomIntBetween(10000, 20000); + graphExploreRequest.timeout(TimeValue.timeValueMillis(timeout)); + graphExploreRequest.useSignificance(ESTestCase.randomBoolean()); + int numHops = ESTestCase.randomIntBetween(1, 5); + for (int i = 0; i < numHops; i++) { + int hopNumber = i + 1; + QueryBuilder guidingQuery = null; + if (ESTestCase.randomBoolean()) { + guidingQuery = new TermQueryBuilder("field" + hopNumber, "value" + hopNumber); + } + Hop hop = graphExploreRequest.createNextHop(guidingQuery); + hop.addVertexRequest("field" + hopNumber); + hop.getVertexRequest(0).addInclude("value" + hopNumber, hopNumber); + } + Request request = GraphRequestConverters.explore(graphExploreRequest); + Assert.assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + Assert.assertEquals("/index1,index2/type1,type2/_xpack/graph/_explore", request.getEndpoint()); + Assert.assertEquals(expectedParams, request.getParameters()); + Assert.assertThat(request.getEntity().getContentType().getValue(), is(XContentType.JSON.mediaTypeWithoutParameters())); + RequestConvertersTests.assertToXContentBody(graphExploreRequest, request.getEntity()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/GraphIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/GraphIT.java new file mode 100644 index 00000000000..4376b47d737 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/GraphIT.java @@ -0,0 +1,139 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; +import org.elasticsearch.protocol.xpack.graph.Hop; +import org.elasticsearch.protocol.xpack.graph.Vertex; +import org.elasticsearch.protocol.xpack.graph.VertexRequest; +import org.hamcrest.Matchers; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +public class GraphIT extends ESRestHighLevelClientTestCase { + + @Before + public void indexDocuments() throws IOException { + // Create chain of doc IDs across indices 1->2->3 + Request doc1 = new Request(HttpPut.METHOD_NAME, "/index1/type/1"); + doc1.setJsonEntity("{ \"num\":[1], \"const\":\"start\"}"); + client().performRequest(doc1); + + Request doc2 = new Request(HttpPut.METHOD_NAME, "/index2/type/1"); + doc2.setJsonEntity("{\"num\":[1,2], \"const\":\"foo\"}"); + client().performRequest(doc2); + + Request doc3 = new Request(HttpPut.METHOD_NAME, "/index2/type/2"); + doc3.setJsonEntity("{\"num\":[2,3], \"const\":\"foo\"}"); + client().performRequest(doc3); + + Request doc4 = new Request(HttpPut.METHOD_NAME, "/index_no_field_data/type/2"); + doc4.setJsonEntity("{\"num\":\"string\", \"const\":\"foo\"}"); + client().performRequest(doc4); + + Request doc5 = new Request(HttpPut.METHOD_NAME, "/index_no_field_data/type/2"); + doc5.setJsonEntity("{\"num\":[2,4], \"const\":\"foo\"}"); + client().performRequest(doc5); + + + client().performRequest(new Request(HttpPost.METHOD_NAME, "/_refresh")); + } + + public void testCleanExplore() throws Exception { + GraphExploreRequest graphExploreRequest = new GraphExploreRequest(); + graphExploreRequest.indices("index1", "index2"); + graphExploreRequest.useSignificance(false); + int numHops = 3; + for (int i = 0; i < numHops; i++) { + QueryBuilder guidingQuery = null; + if (i == 0) { + guidingQuery = new TermQueryBuilder("const.keyword", "start"); + } else if (randomBoolean()){ + guidingQuery = new TermQueryBuilder("const.keyword", "foo"); + } + Hop hop = graphExploreRequest.createNextHop(guidingQuery); + VertexRequest vr = hop.addVertexRequest("num"); + vr.minDocCount(1); + } + Map expectedTermsAndDepths = new HashMap<>(); + expectedTermsAndDepths.put("1", 0); + expectedTermsAndDepths.put("2", 1); + expectedTermsAndDepths.put("3", 2); + + GraphExploreResponse exploreResponse = highLevelClient().graph().explore(graphExploreRequest, RequestOptions.DEFAULT); + Map actualTermsAndDepths = new HashMap<>(); + Collection v = exploreResponse.getVertices(); + for (Vertex vertex : v) { + actualTermsAndDepths.put(vertex.getTerm(), vertex.getHopDepth()); + } + assertEquals(expectedTermsAndDepths, actualTermsAndDepths); + assertThat(exploreResponse.isTimedOut(), Matchers.is(false)); + ShardOperationFailedException[] failures = exploreResponse.getShardFailures(); + assertThat(failures.length, Matchers.equalTo(0)); + + } + + public void testBadExplore() throws Exception { + //Explore indices where lack of fielddata=true on one index leads to partial failures + GraphExploreRequest graphExploreRequest = new GraphExploreRequest(); + graphExploreRequest.indices("index1", "index2", "index_no_field_data"); + graphExploreRequest.useSignificance(false); + int numHops = 3; + for (int i = 0; i < numHops; i++) { + QueryBuilder guidingQuery = null; + if (i == 0) { + guidingQuery = new TermQueryBuilder("const.keyword", "start"); + } else if (randomBoolean()){ + guidingQuery = new TermQueryBuilder("const.keyword", "foo"); + } + Hop hop = graphExploreRequest.createNextHop(guidingQuery); + VertexRequest vr = hop.addVertexRequest("num"); + vr.minDocCount(1); + } + Map expectedTermsAndDepths = new HashMap<>(); + expectedTermsAndDepths.put("1", 0); + expectedTermsAndDepths.put("2", 1); + expectedTermsAndDepths.put("3", 2); + + GraphExploreResponse exploreResponse = highLevelClient().graph().explore(graphExploreRequest, RequestOptions.DEFAULT); + Map actualTermsAndDepths = new HashMap<>(); + Collection v = exploreResponse.getVertices(); + for (Vertex vertex : v) { + actualTermsAndDepths.put(vertex.getTerm(), vertex.getHopDepth()); + } + assertEquals(expectedTermsAndDepths, actualTermsAndDepths); + assertThat(exploreResponse.isTimedOut(), Matchers.is(false)); + ShardOperationFailedException[] failures = exploreResponse.getShardFailures(); + assertThat(failures.length, Matchers.equalTo(1)); + assertTrue(failures[0].reason().contains("Fielddata is disabled")); + + } + + +} \ No newline at end of file diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IngestRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IngestRequestConvertersTests.java new file mode 100644 index 00000000000..a615757fa22 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IngestRequestConvertersTests.java @@ -0,0 +1,120 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.action.ingest.DeletePipelineRequest; +import org.elasticsearch.action.ingest.GetPipelineRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.ingest.SimulatePipelineRequest; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.junit.Assert; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Map; +import java.util.StringJoiner; + +public class IngestRequestConvertersTests extends ESTestCase { + + public void testPutPipeline() throws IOException { + String pipelineId = "some_pipeline_id"; + PutPipelineRequest request = new PutPipelineRequest( + "some_pipeline_id", + new BytesArray("{}".getBytes(StandardCharsets.UTF_8)), + XContentType.JSON + ); + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomMasterTimeout(request, expectedParams); + RequestConvertersTests.setRandomTimeout(request::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request expectedRequest = IngestRequestConverters.putPipeline(request); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + endpoint.add("_ingest/pipeline"); + endpoint.add(pipelineId); + Assert.assertEquals(endpoint.toString(), expectedRequest.getEndpoint()); + Assert.assertEquals(HttpPut.METHOD_NAME, expectedRequest.getMethod()); + Assert.assertEquals(expectedParams, expectedRequest.getParameters()); + } + + public void testGetPipeline() { + String pipelineId = "some_pipeline_id"; + Map expectedParams = new HashMap<>(); + GetPipelineRequest request = new GetPipelineRequest("some_pipeline_id"); + RequestConvertersTests.setRandomMasterTimeout(request, expectedParams); + Request expectedRequest = IngestRequestConverters.getPipeline(request); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + endpoint.add("_ingest/pipeline"); + endpoint.add(pipelineId); + Assert.assertEquals(endpoint.toString(), expectedRequest.getEndpoint()); + Assert.assertEquals(HttpGet.METHOD_NAME, expectedRequest.getMethod()); + Assert.assertEquals(expectedParams, expectedRequest.getParameters()); + } + + public void testDeletePipeline() { + String pipelineId = "some_pipeline_id"; + Map expectedParams = new HashMap<>(); + DeletePipelineRequest request = new DeletePipelineRequest(pipelineId); + RequestConvertersTests.setRandomMasterTimeout(request, expectedParams); + RequestConvertersTests.setRandomTimeout(request::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + Request expectedRequest = IngestRequestConverters.deletePipeline(request); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + endpoint.add("_ingest/pipeline"); + endpoint.add(pipelineId); + Assert.assertEquals(endpoint.toString(), expectedRequest.getEndpoint()); + Assert.assertEquals(HttpDelete.METHOD_NAME, expectedRequest.getMethod()); + Assert.assertEquals(expectedParams, expectedRequest.getParameters()); + } + + public void testSimulatePipeline() throws IOException { + String pipelineId = ESTestCase.randomBoolean() ? "some_pipeline_id" : null; + boolean verbose = ESTestCase.randomBoolean(); + String json = "{\"pipeline\":{" + + "\"description\":\"_description\"," + + "\"processors\":[{\"set\":{\"field\":\"field2\",\"value\":\"_value\"}}]}," + + "\"docs\":[{\"_index\":\"index\",\"_type\":\"_doc\",\"_id\":\"id\",\"_source\":{\"foo\":\"rab\"}}]}"; + SimulatePipelineRequest request = new SimulatePipelineRequest( + new BytesArray(json.getBytes(StandardCharsets.UTF_8)), + XContentType.JSON + ); + request.setId(pipelineId); + request.setVerbose(verbose); + Map expectedParams = new HashMap<>(); + expectedParams.put("verbose", Boolean.toString(verbose)); + + Request expectedRequest = IngestRequestConverters.simulatePipeline(request); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + endpoint.add("_ingest/pipeline"); + if (pipelineId != null && !pipelineId.isEmpty()) + endpoint.add(pipelineId); + endpoint.add("_simulate"); + Assert.assertEquals(endpoint.toString(), expectedRequest.getEndpoint()); + Assert.assertEquals(HttpPost.METHOD_NAME, expectedRequest.getMethod()); + Assert.assertEquals(expectedParams, expectedRequest.getParameters()); + RequestConvertersTests.assertToXContentBody(request, expectedRequest.getEntity()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java index 43a41960e00..26e6251af48 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java @@ -20,21 +20,40 @@ package org.elasticsearch.client; import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.ml.CloseJobRequest; +import org.elasticsearch.client.ml.DeleteJobRequest; +import org.elasticsearch.client.ml.FlushJobRequest; +import org.elasticsearch.client.ml.ForecastJobRequest; +import org.elasticsearch.client.ml.GetBucketsRequest; +import org.elasticsearch.client.ml.GetInfluencersRequest; +import org.elasticsearch.client.ml.GetJobRequest; +import org.elasticsearch.client.ml.GetJobStatsRequest; +import org.elasticsearch.client.ml.GetOverallBucketsRequest; +import org.elasticsearch.client.ml.GetRecordsRequest; +import org.elasticsearch.client.ml.OpenJobRequest; +import org.elasticsearch.client.ml.PostDataRequest; +import org.elasticsearch.client.ml.PutJobRequest; +import org.elasticsearch.client.ml.UpdateJobRequest; +import org.elasticsearch.client.ml.job.config.AnalysisConfig; +import org.elasticsearch.client.ml.job.config.Detector; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.config.JobUpdate; +import org.elasticsearch.client.ml.job.config.JobUpdateTests; +import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; -import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; -import org.elasticsearch.protocol.xpack.ml.PutJobRequest; -import org.elasticsearch.protocol.xpack.ml.job.config.AnalysisConfig; -import org.elasticsearch.protocol.xpack.ml.job.config.Detector; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import org.elasticsearch.test.ESTestCase; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import static org.hamcrest.Matchers.equalTo; @@ -46,6 +65,7 @@ public class MLRequestConvertersTests extends ESTestCase { Request request = MLRequestConverters.putJob(putJobRequest); + assertEquals(HttpPut.METHOD_NAME, request.getMethod()); assertThat(request.getEndpoint(), equalTo("/_xpack/ml/anomaly_detectors/foo")); try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { Job parsedJob = Job.PARSER.apply(parser, null).build(); @@ -53,6 +73,41 @@ public class MLRequestConvertersTests extends ESTestCase { } } + public void testGetJob() { + GetJobRequest getJobRequest = new GetJobRequest(); + + Request request = MLRequestConverters.getJob(getJobRequest); + + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors", request.getEndpoint()); + assertFalse(request.getParameters().containsKey("allow_no_jobs")); + + getJobRequest = new GetJobRequest("job1", "jobs*"); + getJobRequest.setAllowNoJobs(true); + request = MLRequestConverters.getJob(getJobRequest); + + assertEquals("/_xpack/ml/anomaly_detectors/job1,jobs*", request.getEndpoint()); + assertEquals(Boolean.toString(true), request.getParameters().get("allow_no_jobs")); + } + + public void testGetJobStats() { + GetJobStatsRequest getJobStatsRequestRequest = new GetJobStatsRequest(); + + Request request = MLRequestConverters.getJobStats(getJobStatsRequestRequest); + + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/_stats", request.getEndpoint()); + assertFalse(request.getParameters().containsKey("allow_no_jobs")); + + getJobStatsRequestRequest = new GetJobStatsRequest("job1", "jobs*"); + getJobStatsRequestRequest.setAllowNoJobs(true); + request = MLRequestConverters.getJobStats(getJobStatsRequestRequest); + + assertEquals("/_xpack/ml/anomaly_detectors/job1,jobs*/_stats", request.getEndpoint()); + assertEquals(Boolean.toString(true), request.getParameters().get("allow_no_jobs")); + } + + public void testOpenJob() throws Exception { String jobId = "some-job-id"; OpenJobRequest openJobRequest = new OpenJobRequest(jobId); @@ -61,9 +116,27 @@ public class MLRequestConvertersTests extends ESTestCase { Request request = MLRequestConverters.openJob(openJobRequest); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_open", request.getEndpoint()); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - request.getEntity().writeTo(bos); - assertEquals(bos.toString("UTF-8"), "{\"job_id\":\""+ jobId +"\",\"timeout\":\"10m\"}"); + assertEquals(requestEntityToString(request), "{\"job_id\":\""+ jobId +"\",\"timeout\":\"10m\"}"); + } + + public void testCloseJob() throws Exception { + String jobId = "somejobid"; + CloseJobRequest closeJobRequest = new CloseJobRequest(jobId); + + Request request = MLRequestConverters.closeJob(closeJobRequest); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_close", request.getEndpoint()); + assertEquals("{\"job_id\":\"somejobid\"}", requestEntityToString(request)); + + closeJobRequest = new CloseJobRequest(jobId, "otherjobs*"); + closeJobRequest.setForce(true); + closeJobRequest.setAllowNoJobs(false); + closeJobRequest.setTimeout(TimeValue.timeValueMinutes(10)); + request = MLRequestConverters.closeJob(closeJobRequest); + + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + ",otherjobs*/_close", request.getEndpoint()); + assertEquals("{\"job_id\":\"somejobid,otherjobs*\",\"timeout\":\"10m\",\"force\":true,\"allow_no_jobs\":false}", + requestEntityToString(request)); } public void testDeleteJob() { @@ -80,6 +153,159 @@ public class MLRequestConvertersTests extends ESTestCase { assertEquals(Boolean.toString(true), request.getParameters().get("force")); } + public void testFlushJob() throws Exception { + String jobId = randomAlphaOfLength(10); + FlushJobRequest flushJobRequest = new FlushJobRequest(jobId); + + Request request = MLRequestConverters.flushJob(flushJobRequest); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_flush", request.getEndpoint()); + assertEquals("{\"job_id\":\"" + jobId + "\"}", requestEntityToString(request)); + + flushJobRequest.setSkipTime("1000"); + flushJobRequest.setStart("105"); + flushJobRequest.setEnd("200"); + flushJobRequest.setAdvanceTime("100"); + flushJobRequest.setCalcInterim(true); + request = MLRequestConverters.flushJob(flushJobRequest); + assertEquals( + "{\"job_id\":\"" + jobId + "\",\"calc_interim\":true,\"start\":\"105\"," + + "\"end\":\"200\",\"advance_time\":\"100\",\"skip_time\":\"1000\"}", + requestEntityToString(request)); + } + + public void testForecastJob() throws Exception { + String jobId = randomAlphaOfLength(10); + ForecastJobRequest forecastJobRequest = new ForecastJobRequest(jobId); + + forecastJobRequest.setDuration(TimeValue.timeValueHours(10)); + forecastJobRequest.setExpiresIn(TimeValue.timeValueHours(12)); + Request request = MLRequestConverters.forecastJob(forecastJobRequest); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_forecast", request.getEndpoint()); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + ForecastJobRequest parsedRequest = ForecastJobRequest.PARSER.apply(parser, null); + assertThat(parsedRequest, equalTo(forecastJobRequest)); + } + } + + public void testUpdateJob() throws Exception { + String jobId = randomAlphaOfLength(10); + JobUpdate updates = JobUpdateTests.createRandom(jobId); + UpdateJobRequest updateJobRequest = new UpdateJobRequest(updates); + + Request request = MLRequestConverters.updateJob(updateJobRequest); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_update", request.getEndpoint()); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + JobUpdate.Builder parsedRequest = JobUpdate.PARSER.apply(parser, null); + assertThat(parsedRequest.build(), equalTo(updates)); + } + } + + public void testGetBuckets() throws IOException { + String jobId = randomAlphaOfLength(10); + GetBucketsRequest getBucketsRequest = new GetBucketsRequest(jobId); + getBucketsRequest.setPageParams(new PageParams(100, 300)); + getBucketsRequest.setAnomalyScore(75.0); + getBucketsRequest.setSort("anomaly_score"); + getBucketsRequest.setDescending(true); + + Request request = MLRequestConverters.getBuckets(getBucketsRequest); + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/results/buckets", request.getEndpoint()); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + GetBucketsRequest parsedRequest = GetBucketsRequest.PARSER.apply(parser, null); + assertThat(parsedRequest, equalTo(getBucketsRequest)); + } + } + + public void testGetOverallBuckets() throws IOException { + String jobId = randomAlphaOfLength(10); + GetOverallBucketsRequest getOverallBucketsRequest = new GetOverallBucketsRequest(jobId); + getOverallBucketsRequest.setBucketSpan(TimeValue.timeValueHours(3)); + getOverallBucketsRequest.setTopN(3); + getOverallBucketsRequest.setStart("2018-08-08T00:00:00Z"); + getOverallBucketsRequest.setEnd("2018-09-08T00:00:00Z"); + getOverallBucketsRequest.setExcludeInterim(true); + + Request request = MLRequestConverters.getOverallBuckets(getOverallBucketsRequest); + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/results/overall_buckets", request.getEndpoint()); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + GetOverallBucketsRequest parsedRequest = GetOverallBucketsRequest.PARSER.apply(parser, null); + assertThat(parsedRequest, equalTo(getOverallBucketsRequest)); + } + } + + public void testGetRecords() throws IOException { + String jobId = randomAlphaOfLength(10); + GetRecordsRequest getRecordsRequest = new GetRecordsRequest(jobId); + getRecordsRequest.setStart("2018-08-08T00:00:00Z"); + getRecordsRequest.setEnd("2018-09-08T00:00:00Z"); + getRecordsRequest.setPageParams(new PageParams(100, 300)); + getRecordsRequest.setRecordScore(75.0); + getRecordsRequest.setSort("anomaly_score"); + getRecordsRequest.setDescending(true); + getRecordsRequest.setExcludeInterim(true); + + Request request = MLRequestConverters.getRecords(getRecordsRequest); + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/results/records", request.getEndpoint()); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + GetRecordsRequest parsedRequest = GetRecordsRequest.PARSER.apply(parser, null); + assertThat(parsedRequest, equalTo(getRecordsRequest)); + } + } + + public void testPostData() throws Exception { + String jobId = randomAlphaOfLength(10); + PostDataRequest.JsonBuilder jsonBuilder = new PostDataRequest.JsonBuilder(); + Map obj = new HashMap<>(); + obj.put("foo", "bar"); + jsonBuilder.addDoc(obj); + + PostDataRequest postDataRequest = new PostDataRequest(jobId, jsonBuilder); + Request request = MLRequestConverters.postData(postDataRequest); + + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_data", request.getEndpoint()); + assertEquals("{\"foo\":\"bar\"}", requestEntityToString(request)); + assertEquals(postDataRequest.getXContentType().mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertFalse(request.getParameters().containsKey(PostDataRequest.RESET_END.getPreferredName())); + assertFalse(request.getParameters().containsKey(PostDataRequest.RESET_START.getPreferredName())); + + PostDataRequest postDataRequest2 = new PostDataRequest(jobId, XContentType.SMILE, new byte[0]); + postDataRequest2.setResetStart("2018-08-08T00:00:00Z"); + postDataRequest2.setResetEnd("2018-09-08T00:00:00Z"); + + request = MLRequestConverters.postData(postDataRequest2); + + assertEquals(postDataRequest2.getXContentType().mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals("2018-09-08T00:00:00Z", request.getParameters().get(PostDataRequest.RESET_END.getPreferredName())); + assertEquals("2018-08-08T00:00:00Z", request.getParameters().get(PostDataRequest.RESET_START.getPreferredName())); + } + + public void testGetInfluencers() throws IOException { + String jobId = randomAlphaOfLength(10); + GetInfluencersRequest getInfluencersRequest = new GetInfluencersRequest(jobId); + getInfluencersRequest.setStart("2018-08-08T00:00:00Z"); + getInfluencersRequest.setEnd("2018-09-08T00:00:00Z"); + getInfluencersRequest.setPageParams(new PageParams(100, 300)); + getInfluencersRequest.setInfluencerScore(75.0); + getInfluencersRequest.setSort("anomaly_score"); + getInfluencersRequest.setDescending(true); + getInfluencersRequest.setExcludeInterim(true); + + Request request = MLRequestConverters.getInfluencers(getInfluencersRequest); + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/results/influencers", request.getEndpoint()); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + GetInfluencersRequest parsedRequest = GetInfluencersRequest.PARSER.apply(parser, null); + assertThat(parsedRequest, equalTo(getInfluencersRequest)); + } + } + private static Job createValidJob(String jobId) { AnalysisConfig.Builder analysisConfig = AnalysisConfig.builder(Collections.singletonList( Detector.builder().setFunction("count").build())); @@ -87,4 +313,10 @@ public class MLRequestConvertersTests extends ESTestCase { jobBuilder.setAnalysisConfig(analysisConfig); return jobBuilder.build(); } -} \ No newline at end of file + + private static String requestEntityToString(Request request) throws Exception { + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + request.getEntity().writeTo(bos); + return bos.toString("UTF-8"); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java new file mode 100644 index 00000000000..40d8596d1ba --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java @@ -0,0 +1,530 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.ml.GetBucketsRequest; +import org.elasticsearch.client.ml.GetBucketsResponse; +import org.elasticsearch.client.ml.GetInfluencersRequest; +import org.elasticsearch.client.ml.GetInfluencersResponse; +import org.elasticsearch.client.ml.GetOverallBucketsRequest; +import org.elasticsearch.client.ml.GetOverallBucketsResponse; +import org.elasticsearch.client.ml.GetRecordsRequest; +import org.elasticsearch.client.ml.GetRecordsResponse; +import org.elasticsearch.client.ml.PutJobRequest; +import org.elasticsearch.client.ml.job.config.AnalysisConfig; +import org.elasticsearch.client.ml.job.config.DataDescription; +import org.elasticsearch.client.ml.job.config.Detector; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.results.AnomalyRecord; +import org.elasticsearch.client.ml.job.results.Bucket; +import org.elasticsearch.client.ml.job.results.Influencer; +import org.elasticsearch.client.ml.job.results.OverallBucket; +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase { + + private static final String RESULTS_INDEX = ".ml-anomalies-shared"; + private static final String DOC = "doc"; + + private static final String JOB_ID = "get-results-it-job"; + + // 2018-08-01T00:00:00Z + private static final long START_TIME_EPOCH_MS = 1533081600000L; + + private Stats bucketStats = new Stats(); + private Stats recordStats = new Stats(); + + @Before + public void createJobAndIndexResults() throws IOException { + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + Job job = buildJob(JOB_ID); + machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + + long time = START_TIME_EPOCH_MS; + long endTime = time + 3600000L * 24 * 10; // 10 days of hourly buckets + while (time < endTime) { + addBucketIndexRequest(time, false, bulkRequest); + addRecordIndexRequests(time, false, bulkRequest); + time += 3600000L; + } + + // Also index an interim bucket + addBucketIndexRequest(time, true, bulkRequest); + addRecordIndexRequest(time, true, bulkRequest); + + highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT); + } + + private void addBucketIndexRequest(long timestamp, boolean isInterim, BulkRequest bulkRequest) { + IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX, DOC); + double bucketScore = randomDoubleBetween(0.0, 100.0, true); + bucketStats.report(bucketScore); + indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"result_type\":\"bucket\", \"timestamp\": " + timestamp + "," + + "\"bucket_span\": 3600,\"is_interim\": " + isInterim + ", \"anomaly_score\": " + bucketScore + + ", \"bucket_influencers\":[{\"job_id\": \"" + JOB_ID + "\", \"result_type\":\"bucket_influencer\", " + + "\"influencer_field_name\": \"bucket_time\", \"timestamp\": " + timestamp + ", \"bucket_span\": 3600, " + + "\"is_interim\": " + isInterim + "}]}", XContentType.JSON); + bulkRequest.add(indexRequest); + } + + private void addRecordIndexRequests(long timestamp, boolean isInterim, BulkRequest bulkRequest) { + if (randomBoolean()) { + return; + } + int recordCount = randomIntBetween(1, 3); + for (int i = 0; i < recordCount; ++i) { + addRecordIndexRequest(timestamp, isInterim, bulkRequest); + } + } + + private void addRecordIndexRequest(long timestamp, boolean isInterim, BulkRequest bulkRequest) { + IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX, DOC); + double recordScore = randomDoubleBetween(0.0, 100.0, true); + recordStats.report(recordScore); + double p = randomDoubleBetween(0.0, 0.05, false); + indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"result_type\":\"record\", \"timestamp\": " + timestamp + "," + + "\"bucket_span\": 3600,\"is_interim\": " + isInterim + ", \"record_score\": " + recordScore + ", \"probability\": " + + p + "}", XContentType.JSON); + bulkRequest.add(indexRequest); + } + + @After + public void deleteJob() throws IOException { + new MlRestTestStateCleaner(logger, client()).clearMlMetadata(); + } + + public void testGetBuckets() throws IOException { + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + assertThat(response.count(), equalTo(241L)); + assertThat(response.buckets().size(), equalTo(100)); + assertThat(response.buckets().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS)); + } + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + request.setTimestamp("1533081600000"); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + assertThat(response.count(), equalTo(1L)); + assertThat(response.buckets().size(), equalTo(1)); + assertThat(response.buckets().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS)); + } + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + request.setAnomalyScore(75.0); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + assertThat(response.count(), equalTo(bucketStats.criticalCount)); + assertThat(response.buckets().size(), equalTo((int) Math.min(100, bucketStats.criticalCount))); + assertThat(response.buckets().stream().anyMatch(b -> b.getAnomalyScore() < 75.0), is(false)); + } + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + request.setExcludeInterim(true); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + assertThat(response.count(), equalTo(240L)); + } + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + request.setStart("1533081600000"); + request.setEnd("1533092400000"); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + assertThat(response.count(), equalTo(3L)); + assertThat(response.buckets().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS)); + assertThat(response.buckets().get(1).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 3600000L)); + assertThat(response.buckets().get(2).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 2 * + 3600000L)); + } + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + request.setPageParams(new PageParams(3, 3)); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + assertThat(response.buckets().size(), equalTo(3)); + assertThat(response.buckets().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 3 * 3600000L)); + assertThat(response.buckets().get(1).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 4 * 3600000L)); + assertThat(response.buckets().get(2).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 5 * 3600000L)); + } + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + request.setSort("anomaly_score"); + request.setDescending(true); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + double previousScore = 100.0; + for (Bucket bucket : response.buckets()) { + assertThat(bucket.getAnomalyScore(), lessThanOrEqualTo(previousScore)); + previousScore = bucket.getAnomalyScore(); + } + } + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + // Make sure we get all buckets + request.setPageParams(new PageParams(0, 10000)); + request.setExpand(true); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + assertThat(response.buckets().stream().anyMatch(b -> b.getRecords().size() > 0), is(true)); + } + } + + public void testGetOverallBuckets() throws IOException { + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + + GetBucketsRequest getBucketsRequest = new GetBucketsRequest(JOB_ID); + getBucketsRequest.setPageParams(new PageParams(0, 3)); + List firstBuckets = machineLearningClient.getBuckets(getBucketsRequest, RequestOptions.DEFAULT).buckets(); + + String anotherJobId = "test-get-overall-buckets-job"; + Job anotherJob = buildJob(anotherJobId); + machineLearningClient.putJob(new PutJobRequest(anotherJob), RequestOptions.DEFAULT); + + // Let's index matching buckets with the score being 10.0 higher + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (Bucket bucket : firstBuckets) { + IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX, DOC); + indexRequest.source("{\"job_id\":\"" + anotherJobId + "\", \"result_type\":\"bucket\", \"timestamp\": " + + bucket.getTimestamp().getTime() + "," + "\"bucket_span\": 3600,\"is_interim\": " + bucket.isInterim() + + ", \"anomaly_score\": " + String.valueOf(bucket.getAnomalyScore() + 10.0) + "}", XContentType.JSON); + bulkRequest.add(indexRequest); + } + highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT); + + { + GetOverallBucketsRequest request = new GetOverallBucketsRequest(JOB_ID, anotherJobId); + + GetOverallBucketsResponse response = execute(request, machineLearningClient::getOverallBuckets, + machineLearningClient::getOverallBucketsAsync); + + assertThat(response.count(), equalTo(241L)); + List overallBuckets = response.overallBuckets(); + assertThat(overallBuckets.size(), equalTo(241)); + assertThat(overallBuckets.stream().allMatch(b -> b.getBucketSpan() == 3600L), is(true)); + assertThat(overallBuckets.get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS)); + assertThat(overallBuckets.get(240).isInterim(), is(true)); + } + { + GetOverallBucketsRequest request = new GetOverallBucketsRequest(JOB_ID, anotherJobId); + request.setBucketSpan(TimeValue.timeValueHours(2)); + + GetOverallBucketsResponse response = execute(request, machineLearningClient::getOverallBuckets, + machineLearningClient::getOverallBucketsAsync); + + assertThat(response.count(), equalTo(121L)); + } + { + long end = START_TIME_EPOCH_MS + 10 * 3600000L; + GetOverallBucketsRequest request = new GetOverallBucketsRequest(JOB_ID, anotherJobId); + request.setEnd(String.valueOf(end)); + + GetOverallBucketsResponse response = execute(request, machineLearningClient::getOverallBuckets, + machineLearningClient::getOverallBucketsAsync); + + assertThat(response.count(), equalTo(10L)); + assertThat(response.overallBuckets().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS)); + assertThat(response.overallBuckets().get(9).getTimestamp().getTime(), equalTo(end - 3600000L)); + } + { + GetOverallBucketsRequest request = new GetOverallBucketsRequest(JOB_ID, anotherJobId); + request.setExcludeInterim(true); + + GetOverallBucketsResponse response = execute(request, machineLearningClient::getOverallBuckets, + machineLearningClient::getOverallBucketsAsync); + + assertThat(response.count(), equalTo(240L)); + assertThat(response.overallBuckets().stream().allMatch(b -> b.isInterim() == false), is(true)); + } + { + GetOverallBucketsRequest request = new GetOverallBucketsRequest(JOB_ID); + request.setOverallScore(75.0); + + GetOverallBucketsResponse response = execute(request, machineLearningClient::getOverallBuckets, + machineLearningClient::getOverallBucketsAsync); + + assertThat(response.count(), equalTo(bucketStats.criticalCount)); + assertThat(response.overallBuckets().stream().allMatch(b -> b.getOverallScore() >= 75.0), is(true)); + } + { + long start = START_TIME_EPOCH_MS + 10 * 3600000L; + GetOverallBucketsRequest request = new GetOverallBucketsRequest(JOB_ID, anotherJobId); + request.setStart(String.valueOf(start)); + + GetOverallBucketsResponse response = execute(request, machineLearningClient::getOverallBuckets, + machineLearningClient::getOverallBucketsAsync); + + assertThat(response.count(), equalTo(231L)); + assertThat(response.overallBuckets().get(0).getTimestamp().getTime(), equalTo(start)); + } + { + GetOverallBucketsRequest request = new GetOverallBucketsRequest(JOB_ID, anotherJobId); + request.setEnd(String.valueOf(START_TIME_EPOCH_MS + 3 * 3600000L)); + request.setTopN(2); + + GetOverallBucketsResponse response = execute(request, machineLearningClient::getOverallBuckets, + machineLearningClient::getOverallBucketsAsync); + + assertThat(response.count(), equalTo(3L)); + List overallBuckets = response.overallBuckets(); + for (int i = 0; i < overallBuckets.size(); ++i) { + // As the second job has scores that are -10 from the first, the overall buckets should be +5 from the initial job + assertThat(overallBuckets.get(i).getOverallScore(), is(closeTo(firstBuckets.get(i).getAnomalyScore() + 5.0, 0.0001))); + } + } + } + + public void testGetRecords() throws IOException { + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + + { + GetRecordsRequest request = new GetRecordsRequest(JOB_ID); + + GetRecordsResponse response = execute(request, machineLearningClient::getRecords, machineLearningClient::getRecordsAsync); + + assertThat(response.count(), greaterThan(0L)); + assertThat(response.count(), equalTo(recordStats.totalCount())); + } + { + GetRecordsRequest request = new GetRecordsRequest(JOB_ID); + request.setRecordScore(50.0); + + GetRecordsResponse response = execute(request, machineLearningClient::getRecords, machineLearningClient::getRecordsAsync); + + long majorAndCriticalCount = recordStats.majorCount + recordStats.criticalCount; + assertThat(response.count(), equalTo(majorAndCriticalCount)); + assertThat(response.records().size(), equalTo((int) Math.min(100, majorAndCriticalCount))); + assertThat(response.records().stream().anyMatch(r -> r.getRecordScore() < 50.0), is(false)); + } + { + GetRecordsRequest request = new GetRecordsRequest(JOB_ID); + request.setExcludeInterim(true); + + GetRecordsResponse response = execute(request, machineLearningClient::getRecords, machineLearningClient::getRecordsAsync); + + assertThat(response.count(), equalTo(recordStats.totalCount() - 1)); + } + { + long end = START_TIME_EPOCH_MS + 10 * 3600000; + GetRecordsRequest request = new GetRecordsRequest(JOB_ID); + request.setStart(String.valueOf(START_TIME_EPOCH_MS)); + request.setEnd(String.valueOf(end)); + + GetRecordsResponse response = execute(request, machineLearningClient::getRecords, machineLearningClient::getRecordsAsync); + + for (AnomalyRecord record : response.records()) { + assertThat(record.getTimestamp().getTime(), greaterThanOrEqualTo(START_TIME_EPOCH_MS)); + assertThat(record.getTimestamp().getTime(), lessThan(end)); + } + } + { + GetRecordsRequest request = new GetRecordsRequest(JOB_ID); + request.setPageParams(new PageParams(3, 3)); + + GetRecordsResponse response = execute(request, machineLearningClient::getRecords, machineLearningClient::getRecordsAsync); + + assertThat(response.records().size(), equalTo(3)); + } + { + GetRecordsRequest request = new GetRecordsRequest(JOB_ID); + request.setSort("probability"); + request.setDescending(true); + + GetRecordsResponse response = execute(request, machineLearningClient::getRecords, machineLearningClient::getRecordsAsync); + + double previousProb = 1.0; + for (AnomalyRecord record : response.records()) { + assertThat(record.getProbability(), lessThanOrEqualTo(previousProb)); + previousProb = record.getProbability(); + } + } + } + + public void testGetInfluencers() throws IOException { + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + + // Let us index a few influencer docs + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + long timestamp = START_TIME_EPOCH_MS; + long end = START_TIME_EPOCH_MS + 5 * 3600000L; + while (timestamp < end) { + boolean isLast = timestamp == end - 3600000L; + // Last one is interim + boolean isInterim = isLast; + // Last one score is higher + double score = isLast ? 90.0 : 42.0; + + IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX, DOC); + indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"result_type\":\"influencer\", \"timestamp\": " + + timestamp + "," + "\"bucket_span\": 3600,\"is_interim\": " + isInterim + ", \"influencer_score\": " + score + ", " + + "\"influencer_field_name\":\"my_influencer\", \"influencer_field_value\": \"inf_1\", \"probability\":" + + randomDouble() + "}", XContentType.JSON); + bulkRequest.add(indexRequest); + timestamp += 3600000L; + } + highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT); + + { + GetInfluencersRequest request = new GetInfluencersRequest(JOB_ID); + request.setDescending(false); + + GetInfluencersResponse response = execute(request, machineLearningClient::getInfluencers, + machineLearningClient::getInfluencersAsync); + + assertThat(response.count(), equalTo(5L)); + } + { + long requestStart = START_TIME_EPOCH_MS + 3600000L; + long requestEnd = end - 3600000L; + GetInfluencersRequest request = new GetInfluencersRequest(JOB_ID); + request.setStart(String.valueOf(requestStart)); + request.setEnd(String.valueOf(requestEnd)); + + GetInfluencersResponse response = execute(request, machineLearningClient::getInfluencers, + machineLearningClient::getInfluencersAsync); + + assertThat(response.count(), equalTo(3L)); + for (Influencer influencer : response.influencers()) { + assertThat(influencer.getTimestamp().getTime(), greaterThanOrEqualTo(START_TIME_EPOCH_MS)); + assertThat(influencer.getTimestamp().getTime(), lessThan(end)); + } + } + { + GetInfluencersRequest request = new GetInfluencersRequest(JOB_ID); + request.setSort("timestamp"); + request.setDescending(false); + request.setPageParams(new PageParams(1, 2)); + + GetInfluencersResponse response = execute(request, machineLearningClient::getInfluencers, + machineLearningClient::getInfluencersAsync); + + assertThat(response.influencers().size(), equalTo(2)); + assertThat(response.influencers().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 3600000L)); + assertThat(response.influencers().get(1).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 2 * 3600000L)); + } + { + GetInfluencersRequest request = new GetInfluencersRequest(JOB_ID); + request.setExcludeInterim(true); + + GetInfluencersResponse response = execute(request, machineLearningClient::getInfluencers, + machineLearningClient::getInfluencersAsync); + + assertThat(response.count(), equalTo(4L)); + assertThat(response.influencers().stream().anyMatch(Influencer::isInterim), is(false)); + } + { + GetInfluencersRequest request = new GetInfluencersRequest(JOB_ID); + request.setInfluencerScore(75.0); + + GetInfluencersResponse response = execute(request, machineLearningClient::getInfluencers, + machineLearningClient::getInfluencersAsync); + + assertThat(response.count(), equalTo(1L)); + assertThat(response.influencers().get(0).getInfluencerScore(), greaterThanOrEqualTo(75.0)); + } + { + GetInfluencersRequest request = new GetInfluencersRequest(JOB_ID); + request.setSort("probability"); + request.setDescending(true); + + GetInfluencersResponse response = execute(request, machineLearningClient::getInfluencers, + machineLearningClient::getInfluencersAsync); + + assertThat(response.influencers().size(), equalTo(5)); + double previousProb = 1.0; + for (Influencer influencer : response.influencers()) { + assertThat(influencer.getProbability(), lessThanOrEqualTo(previousProb)); + previousProb = influencer.getProbability(); + } + } + } + + public static Job buildJob(String jobId) { + Job.Builder builder = new Job.Builder(jobId); + + Detector detector = new Detector.Builder("count", null).build(); + AnalysisConfig.Builder configBuilder = new AnalysisConfig.Builder(Arrays.asList(detector)); + configBuilder.setBucketSpan(TimeValue.timeValueHours(1)); + builder.setAnalysisConfig(configBuilder); + + DataDescription.Builder dataDescription = new DataDescription.Builder(); + builder.setDataDescription(dataDescription); + return builder.build(); + } + + private static class Stats { + // score < 50.0 + private long minorCount; + + // score < 75.0 + private long majorCount; + + // score > 75.0 + private long criticalCount; + + private void report(double score) { + if (score < 50.0) { + minorCount++; + } else if (score < 75.0) { + majorCount++; + } else { + criticalCount++; + } + } + + private long totalCount() { + return minorCount + majorCount + criticalCount; + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 0037460150f..fb715683b27 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -19,25 +19,56 @@ package org.elasticsearch.client; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.client.ml.ForecastJobRequest; +import org.elasticsearch.client.ml.ForecastJobResponse; +import org.elasticsearch.client.ml.PostDataRequest; +import org.elasticsearch.client.ml.PostDataResponse; +import org.elasticsearch.client.ml.UpdateJobRequest; +import org.elasticsearch.client.ml.job.config.JobUpdate; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; -import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse; -import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; -import org.elasticsearch.protocol.xpack.ml.OpenJobResponse; -import org.elasticsearch.protocol.xpack.ml.PutJobRequest; -import org.elasticsearch.protocol.xpack.ml.PutJobResponse; -import org.elasticsearch.protocol.xpack.ml.job.config.AnalysisConfig; -import org.elasticsearch.protocol.xpack.ml.job.config.DataDescription; -import org.elasticsearch.protocol.xpack.ml.job.config.Detector; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; +import org.elasticsearch.client.ml.GetJobStatsRequest; +import org.elasticsearch.client.ml.GetJobStatsResponse; +import org.elasticsearch.client.ml.job.config.JobState; +import org.elasticsearch.client.ml.job.stats.JobStats; +import org.elasticsearch.client.ml.CloseJobRequest; +import org.elasticsearch.client.ml.CloseJobResponse; +import org.elasticsearch.client.ml.DeleteJobRequest; +import org.elasticsearch.client.ml.DeleteJobResponse; +import org.elasticsearch.client.ml.GetJobRequest; +import org.elasticsearch.client.ml.GetJobResponse; +import org.elasticsearch.client.ml.OpenJobRequest; +import org.elasticsearch.client.ml.OpenJobResponse; +import org.elasticsearch.client.ml.PutJobRequest; +import org.elasticsearch.client.ml.PutJobResponse; +import org.elasticsearch.client.ml.job.config.AnalysisConfig; +import org.elasticsearch.client.ml.job.config.DataDescription; +import org.elasticsearch.client.ml.job.config.Detector; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.FlushJobRequest; +import org.elasticsearch.client.ml.FlushJobResponse; +import org.junit.After; +import java.io.IOException; import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.hasItems; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; public class MachineLearningIT extends ESRestHighLevelClientTestCase { + @After + public void cleanUp() throws IOException { + new MlRestTestStateCleaner(logger, client()).clearMlMetadata(); + } + public void testPutJob() throws Exception { String jobId = randomValidJobId(); Job job = buildJob(jobId); @@ -50,6 +81,41 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { assertThat(createdJob.getJobType(), is(Job.ANOMALY_DETECTOR_JOB_TYPE)); } + public void testGetJob() throws Exception { + String jobId1 = randomValidJobId(); + String jobId2 = randomValidJobId(); + + Job job1 = buildJob(jobId1); + Job job2 = buildJob(jobId2); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putJob(new PutJobRequest(job1), RequestOptions.DEFAULT); + machineLearningClient.putJob(new PutJobRequest(job2), RequestOptions.DEFAULT); + + GetJobRequest request = new GetJobRequest(jobId1, jobId2); + + // Test getting specific jobs + GetJobResponse response = execute(request, machineLearningClient::getJob, machineLearningClient::getJobAsync); + + assertEquals(2, response.count()); + assertThat(response.jobs(), hasSize(2)); + assertThat(response.jobs().stream().map(Job::getId).collect(Collectors.toList()), containsInAnyOrder(jobId1, jobId2)); + + // Test getting all jobs explicitly + request = GetJobRequest.getAllJobsRequest(); + response = execute(request, machineLearningClient::getJob, machineLearningClient::getJobAsync); + + assertTrue(response.count() >= 2L); + assertTrue(response.jobs().size() >= 2L); + assertThat(response.jobs().stream().map(Job::getId).collect(Collectors.toList()), hasItems(jobId1, jobId2)); + + // Test getting all jobs implicitly + response = execute(new GetJobRequest(), machineLearningClient::getJob, machineLearningClient::getJobAsync); + + assertTrue(response.count() >= 2L); + assertTrue(response.jobs().size() >= 2L); + assertThat(response.jobs().stream().map(Job::getId).collect(Collectors.toList()), hasItems(jobId1, jobId2)); + } + public void testDeleteJob() throws Exception { String jobId = randomValidJobId(); Job job = buildJob(jobId); @@ -75,6 +141,153 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { assertTrue(response.isOpened()); } + public void testCloseJob() throws Exception { + String jobId = randomValidJobId(); + Job job = buildJob(jobId); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + machineLearningClient.openJob(new OpenJobRequest(jobId), RequestOptions.DEFAULT); + + CloseJobResponse response = execute(new CloseJobRequest(jobId), + machineLearningClient::closeJob, + machineLearningClient::closeJobAsync); + assertTrue(response.isClosed()); + } + + public void testFlushJob() throws Exception { + String jobId = randomValidJobId(); + Job job = buildJob(jobId); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + machineLearningClient.openJob(new OpenJobRequest(jobId), RequestOptions.DEFAULT); + + FlushJobResponse response = execute(new FlushJobRequest(jobId), + machineLearningClient::flushJob, + machineLearningClient::flushJobAsync); + assertTrue(response.isFlushed()); + } + + public void testGetJobStats() throws Exception { + String jobId1 = "ml-get-job-stats-test-id-1"; + String jobId2 = "ml-get-job-stats-test-id-2"; + + Job job1 = buildJob(jobId1); + Job job2 = buildJob(jobId2); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putJob(new PutJobRequest(job1), RequestOptions.DEFAULT); + machineLearningClient.putJob(new PutJobRequest(job2), RequestOptions.DEFAULT); + + machineLearningClient.openJob(new OpenJobRequest(jobId1), RequestOptions.DEFAULT); + + GetJobStatsRequest request = new GetJobStatsRequest(jobId1, jobId2); + + // Test getting specific + GetJobStatsResponse response = execute(request, machineLearningClient::getJobStats, machineLearningClient::getJobStatsAsync); + + assertEquals(2, response.count()); + assertThat(response.jobStats(), hasSize(2)); + assertThat(response.jobStats().stream().map(JobStats::getJobId).collect(Collectors.toList()), containsInAnyOrder(jobId1, jobId2)); + for (JobStats stats : response.jobStats()) { + if (stats.getJobId().equals(jobId1)) { + assertEquals(JobState.OPENED, stats.getState()); + } else { + assertEquals(JobState.CLOSED, stats.getState()); + } + } + + // Test getting all explicitly + request = GetJobStatsRequest.getAllJobStatsRequest(); + response = execute(request, machineLearningClient::getJobStats, machineLearningClient::getJobStatsAsync); + + assertTrue(response.count() >= 2L); + assertTrue(response.jobStats().size() >= 2L); + assertThat(response.jobStats().stream().map(JobStats::getJobId).collect(Collectors.toList()), hasItems(jobId1, jobId2)); + + // Test getting all implicitly + response = execute(new GetJobStatsRequest(), machineLearningClient::getJobStats, machineLearningClient::getJobStatsAsync); + + assertTrue(response.count() >= 2L); + assertTrue(response.jobStats().size() >= 2L); + assertThat(response.jobStats().stream().map(JobStats::getJobId).collect(Collectors.toList()), hasItems(jobId1, jobId2)); + + // Test getting all with wildcard + request = new GetJobStatsRequest("ml-get-job-stats-test-id-*"); + response = execute(request, machineLearningClient::getJobStats, machineLearningClient::getJobStatsAsync); + assertTrue(response.count() >= 2L); + assertTrue(response.jobStats().size() >= 2L); + assertThat(response.jobStats().stream().map(JobStats::getJobId).collect(Collectors.toList()), hasItems(jobId1, jobId2)); + + // Test when allow_no_jobs is false + final GetJobStatsRequest erroredRequest = new GetJobStatsRequest("jobs-that-do-not-exist*"); + erroredRequest.setAllowNoJobs(false); + ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, + () -> execute(erroredRequest, machineLearningClient::getJobStats, machineLearningClient::getJobStatsAsync)); + assertThat(exception.status().getStatus(), equalTo(404)); + } + + public void testForecastJob() throws Exception { + String jobId = "ml-forecast-job-test"; + Job job = buildJob(jobId); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + machineLearningClient.openJob(new OpenJobRequest(jobId), RequestOptions.DEFAULT); + + PostDataRequest.JsonBuilder builder = new PostDataRequest.JsonBuilder(); + for(int i = 0; i < 30; i++) { + Map hashMap = new HashMap<>(); + hashMap.put("total", randomInt(1000)); + hashMap.put("timestamp", (i+1)*1000); + builder.addDoc(hashMap); + } + PostDataRequest postDataRequest = new PostDataRequest(jobId, builder); + machineLearningClient.postData(postDataRequest, RequestOptions.DEFAULT); + machineLearningClient.flushJob(new FlushJobRequest(jobId), RequestOptions.DEFAULT); + + ForecastJobRequest request = new ForecastJobRequest(jobId); + ForecastJobResponse response = execute(request, machineLearningClient::forecastJob, machineLearningClient::forecastJobAsync); + + assertTrue(response.isAcknowledged()); + assertNotNull(response.getForecastId()); + } + + public void testPostData() throws Exception { + String jobId = randomValidJobId(); + Job job = buildJob(jobId); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + machineLearningClient.openJob(new OpenJobRequest(jobId), RequestOptions.DEFAULT); + + PostDataRequest.JsonBuilder builder = new PostDataRequest.JsonBuilder(); + for(int i = 0; i < 10; i++) { + Map hashMap = new HashMap<>(); + hashMap.put("total", randomInt(1000)); + hashMap.put("timestamp", (i+1)*1000); + builder.addDoc(hashMap); + } + PostDataRequest postDataRequest = new PostDataRequest(jobId, builder); + + PostDataResponse response = execute(postDataRequest, machineLearningClient::postData, machineLearningClient::postDataAsync); + assertEquals(10, response.getDataCounts().getInputRecordCount()); + assertEquals(0, response.getDataCounts().getOutOfOrderTimeStampCount()); + } + + public void testUpdateJob() throws Exception { + String jobId = randomValidJobId(); + Job job = buildJob(jobId); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + UpdateJobRequest request = new UpdateJobRequest(new JobUpdate.Builder(jobId).setDescription("Updated description").build()); + + PutJobResponse response = execute(request, machineLearningClient::updateJob, machineLearningClient::updateJobAsync); + + assertEquals("Updated description", response.getResponse().getDescription()); + + GetJobRequest getRequest = new GetJobRequest(jobId); + GetJobResponse getResponse = machineLearningClient.getJob(getRequest, RequestOptions.DEFAULT); + assertEquals("Updated description", getResponse.jobs().get(0).getDescription()); + } + public static String randomValidJobId() { CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz0123456789".toCharArray()); return generator.ofCodePointsLength(random(), 10, 10); @@ -94,8 +307,8 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { builder.setAnalysisConfig(configBuilder); DataDescription.Builder dataDescription = new DataDescription.Builder(); - dataDescription.setTimeFormat(randomFrom(DataDescription.EPOCH_MS, DataDescription.EPOCH)); - dataDescription.setTimeField(randomAlphaOfLength(10)); + dataDescription.setTimeFormat(DataDescription.EPOCH_MS); + dataDescription.setTimeField("timestamp"); builder.setDataDescription(dataDescription); return builder.build(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationRequestConvertersTests.java new file mode 100644 index 00000000000..97a2cc16a7e --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationRequestConvertersTests.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpGet; +import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; +import org.elasticsearch.test.ESTestCase; + +import java.util.HashMap; +import java.util.Map; + +public class MigrationRequestConvertersTests extends ESTestCase { + + public static void testGetMigrationAssistance() { + IndexUpgradeInfoRequest upgradeInfoRequest = new IndexUpgradeInfoRequest(); + String expectedEndpoint = "/_xpack/migration/assistance"; + if (randomBoolean()) { + String[] indices = RequestConvertersTests.randomIndicesNames(1, 5); + upgradeInfoRequest.indices(indices); + expectedEndpoint += "/" + String.join(",", indices); + } + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomIndicesOptions(upgradeInfoRequest::indicesOptions, upgradeInfoRequest::indicesOptions, + expectedParams); + Request request = MigrationRequestConverters.getMigrationAssistance(upgradeInfoRequest); + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals(expectedEndpoint, request.getEndpoint()); + assertNull(request.getEntity()); + assertEquals(expectedParams, request.getParameters()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MlRestTestStateCleaner.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MlRestTestStateCleaner.java new file mode 100644 index 00000000000..7ad86576245 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MlRestTestStateCleaner.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * This is temporarily duplicated from the server side. + * @TODO Replace with an implementation using the HLRC once + * the APIs for managing datafeeds are implemented. + */ +public class MlRestTestStateCleaner { + + private final Logger logger; + private final RestClient adminClient; + + public MlRestTestStateCleaner(Logger logger, RestClient adminClient) { + this.logger = logger; + this.adminClient = adminClient; + } + + public void clearMlMetadata() throws IOException { + deleteAllDatafeeds(); + deleteAllJobs(); + // indices will be deleted by the ESRestTestCase class + } + + @SuppressWarnings("unchecked") + private void deleteAllDatafeeds() throws IOException { + final Request datafeedsRequest = new Request("GET", "/_xpack/ml/datafeeds"); + datafeedsRequest.addParameter("filter_path", "datafeeds"); + final Response datafeedsResponse = adminClient.performRequest(datafeedsRequest); + final List> datafeeds = + (List>) XContentMapValues.extractValue("datafeeds", ESRestTestCase.entityAsMap(datafeedsResponse)); + if (datafeeds == null) { + return; + } + + try { + adminClient.performRequest(new Request("POST", "/_xpack/ml/datafeeds/_all/_stop")); + } catch (Exception e1) { + logger.warn("failed to stop all datafeeds. Forcing stop", e1); + try { + adminClient.performRequest(new Request("POST", "/_xpack/ml/datafeeds/_all/_stop?force=true")); + } catch (Exception e2) { + logger.warn("Force-closing all data feeds failed", e2); + } + throw new RuntimeException( + "Had to resort to force-stopping datafeeds, something went wrong?", e1); + } + + for (Map datafeed : datafeeds) { + String datafeedId = (String) datafeed.get("datafeed_id"); + adminClient.performRequest(new Request("DELETE", "/_xpack/ml/datafeeds/" + datafeedId)); + } + } + + private void deleteAllJobs() throws IOException { + final Request jobsRequest = new Request("GET", "/_xpack/ml/anomaly_detectors"); + jobsRequest.addParameter("filter_path", "jobs"); + final Response response = adminClient.performRequest(jobsRequest); + @SuppressWarnings("unchecked") + final List> jobConfigs = + (List>) XContentMapValues.extractValue("jobs", ESRestTestCase.entityAsMap(response)); + if (jobConfigs == null) { + return; + } + + try { + adminClient.performRequest(new Request("POST", "/_xpack/ml/anomaly_detectors/_all/_close")); + } catch (Exception e1) { + logger.warn("failed to close all jobs. Forcing closed", e1); + try { + adminClient.performRequest(new Request("POST", "/_xpack/ml/anomaly_detectors/_all/_close?force=true")); + } catch (Exception e2) { + logger.warn("Force-closing all jobs failed", e2); + } + throw new RuntimeException("Had to resort to force-closing jobs, something went wrong?", + e1); + } + + for (Map jobConfig : jobConfigs) { + String jobId = (String) jobConfig.get("job_id"); + adminClient.performRequest(new Request("DELETE", "/_xpack/ml/anomaly_detectors/" + jobId)); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 47195f0bb2a..4ef8e8542c9 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -29,22 +29,9 @@ import org.apache.http.entity.ByteArrayEntity; import org.apache.http.util.EntityUtils; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; -import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; -import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; -import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; -import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; -import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsRequest; -import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; -import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; -import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; -import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; -import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; -import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; @@ -79,10 +66,6 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.ingest.DeletePipelineRequest; -import org.elasticsearch.action.ingest.GetPipelineRequest; -import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.action.ingest.SimulatePipelineRequest; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.SearchRequest; @@ -97,18 +80,14 @@ import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.RequestConverters.EndpointBuilder; -import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.CheckedFunction; -import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.ToXContent; @@ -116,6 +95,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.RandomCreateIndexGenerator; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.query.QueryBuilders; @@ -125,12 +105,12 @@ import org.elasticsearch.index.rankeval.RankEvalRequest; import org.elasticsearch.index.rankeval.RankEvalSpec; import org.elasticsearch.index.rankeval.RatedRequest; import org.elasticsearch.index.rankeval.RestRankEvalAction; -import org.elasticsearch.protocol.xpack.XPackInfoRequest; -import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; -import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; -import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; -import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.index.reindex.DeleteByQueryRequest; +import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.index.reindex.RemoteInfo; +import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.rest.action.search.RestSearchAction; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.mustache.MultiSearchTemplateRequest; import org.elasticsearch.script.mustache.SearchTemplateRequest; @@ -144,20 +124,14 @@ import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.rescore.QueryRescorerBuilder; import org.elasticsearch.search.suggest.SuggestBuilder; import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; -import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.RandomObjects; -import org.hamcrest.CoreMatchers; -import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; -import java.nio.charset.StandardCharsets; -import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -169,6 +143,7 @@ import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; +import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.elasticsearch.client.RequestConverters.REQUEST_BODY_CONTENT_TYPE; import static org.elasticsearch.client.RequestConverters.enforceSameContentType; @@ -176,13 +151,12 @@ import static org.elasticsearch.index.RandomCreateIndexGenerator.randomAliases; import static org.elasticsearch.index.RandomCreateIndexGenerator.randomCreateIndexRequest; import static org.elasticsearch.index.RandomCreateIndexGenerator.randomIndexSettings; import static org.elasticsearch.index.alias.RandomAliasActionsGenerator.randomAliasAction; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchRequest; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasKey; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; public class RequestConvertersTests extends ESTestCase { @@ -404,6 +378,165 @@ public class RequestConvertersTests extends ESTestCase { assertToXContentBody(indicesAliasesRequest, request.getEntity()); } + public void testReindex() throws IOException { + ReindexRequest reindexRequest = new ReindexRequest(); + reindexRequest.setSourceIndices("source_idx"); + reindexRequest.setDestIndex("dest_idx"); + Map expectedParams = new HashMap<>(); + if (randomBoolean()) { + XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); + RemoteInfo remoteInfo = new RemoteInfo("http", "remote-host", 9200, null, + BytesReference.bytes(matchAllQuery().toXContent(builder, ToXContent.EMPTY_PARAMS)), + "user", + "pass", + emptyMap(), + RemoteInfo.DEFAULT_SOCKET_TIMEOUT, + RemoteInfo.DEFAULT_CONNECT_TIMEOUT + ); + reindexRequest.setRemoteInfo(remoteInfo); + } + if (randomBoolean()) { + reindexRequest.setSourceDocTypes("doc", "tweet"); + } + if (randomBoolean()) { + reindexRequest.setSourceBatchSize(randomInt(100)); + } + if (randomBoolean()) { + reindexRequest.setDestDocType("tweet_and_doc"); + } + if (randomBoolean()) { + reindexRequest.setDestOpType("create"); + } + if (randomBoolean()) { + reindexRequest.setDestPipeline("my_pipeline"); + } + if (randomBoolean()) { + reindexRequest.setDestRouting("=cat"); + } + if (randomBoolean()) { + reindexRequest.setSize(randomIntBetween(100, 1000)); + } + if (randomBoolean()) { + reindexRequest.setAbortOnVersionConflict(false); + } + if (randomBoolean()) { + String ts = randomTimeValue(); + reindexRequest.setScroll(TimeValue.parseTimeValue(ts, "scroll")); + } + if (reindexRequest.getRemoteInfo() == null && randomBoolean()) { + reindexRequest.setSourceQuery(new TermQueryBuilder("foo", "fooval")); + } + setRandomTimeout(reindexRequest::setTimeout, ReplicationRequest.DEFAULT_TIMEOUT, expectedParams); + setRandomWaitForActiveShards(reindexRequest::setWaitForActiveShards, ActiveShardCount.DEFAULT, expectedParams); + expectedParams.put("scroll", reindexRequest.getScrollTime().getStringRep()); + Request request = RequestConverters.reindex(reindexRequest); + assertEquals("/_reindex", request.getEndpoint()); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals(expectedParams, request.getParameters()); + assertToXContentBody(reindexRequest, request.getEntity()); + } + + public void testUpdateByQuery() throws IOException { + UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); + updateByQueryRequest.indices(randomIndicesNames(1, 5)); + Map expectedParams = new HashMap<>(); + if (randomBoolean()) { + updateByQueryRequest.setDocTypes(generateRandomStringArray(5, 5, false, false)); + } + if (randomBoolean()) { + int batchSize = randomInt(100); + updateByQueryRequest.setBatchSize(batchSize); + expectedParams.put("scroll_size", Integer.toString(batchSize)); + } + if (randomBoolean()) { + updateByQueryRequest.setPipeline("my_pipeline"); + expectedParams.put("pipeline", "my_pipeline"); + } + if (randomBoolean()) { + updateByQueryRequest.setRouting("=cat"); + expectedParams.put("routing", "=cat"); + } + if (randomBoolean()) { + int size = randomIntBetween(100, 1000); + updateByQueryRequest.setSize(size); + expectedParams.put("size", Integer.toString(size)); + } + if (randomBoolean()) { + updateByQueryRequest.setAbortOnVersionConflict(false); + expectedParams.put("conflicts", "proceed"); + } + if (randomBoolean()) { + String ts = randomTimeValue(); + updateByQueryRequest.setScroll(TimeValue.parseTimeValue(ts, "scroll")); + expectedParams.put("scroll", ts); + } + if (randomBoolean()) { + updateByQueryRequest.setQuery(new TermQueryBuilder("foo", "fooval")); + } + if (randomBoolean()) { + updateByQueryRequest.setScript(new Script("ctx._source.last = \"lastname\"")); + } + setRandomIndicesOptions(updateByQueryRequest::setIndicesOptions, updateByQueryRequest::indicesOptions, expectedParams); + setRandomTimeout(updateByQueryRequest::setTimeout, ReplicationRequest.DEFAULT_TIMEOUT, expectedParams); + Request request = RequestConverters.updateByQuery(updateByQueryRequest); + StringJoiner joiner = new StringJoiner("/", "/", ""); + joiner.add(String.join(",", updateByQueryRequest.indices())); + if (updateByQueryRequest.getDocTypes().length > 0) + joiner.add(String.join(",", updateByQueryRequest.getDocTypes())); + joiner.add("_update_by_query"); + assertEquals(joiner.toString(), request.getEndpoint()); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals(expectedParams, request.getParameters()); + assertToXContentBody(updateByQueryRequest, request.getEntity()); + } + + public void testDeleteByQuery() throws IOException { + DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(); + deleteByQueryRequest.indices(randomIndicesNames(1, 5)); + Map expectedParams = new HashMap<>(); + if (randomBoolean()) { + deleteByQueryRequest.setDocTypes(generateRandomStringArray(5, 5, false, false)); + } + if (randomBoolean()) { + int batchSize = randomInt(100); + deleteByQueryRequest.setBatchSize(batchSize); + expectedParams.put("scroll_size", Integer.toString(batchSize)); + } + if (randomBoolean()) { + deleteByQueryRequest.setRouting("=cat"); + expectedParams.put("routing", "=cat"); + } + if (randomBoolean()) { + int size = randomIntBetween(100, 1000); + deleteByQueryRequest.setSize(size); + expectedParams.put("size", Integer.toString(size)); + } + if (randomBoolean()) { + deleteByQueryRequest.setAbortOnVersionConflict(false); + expectedParams.put("conflicts", "proceed"); + } + if (randomBoolean()) { + String ts = randomTimeValue(); + deleteByQueryRequest.setScroll(TimeValue.parseTimeValue(ts, "scroll")); + expectedParams.put("scroll", ts); + } + if (randomBoolean()) { + deleteByQueryRequest.setQuery(new TermQueryBuilder("foo", "fooval")); + } + setRandomIndicesOptions(deleteByQueryRequest::setIndicesOptions, deleteByQueryRequest::indicesOptions, expectedParams); + setRandomTimeout(deleteByQueryRequest::setTimeout, ReplicationRequest.DEFAULT_TIMEOUT, expectedParams); + Request request = RequestConverters.deleteByQuery(deleteByQueryRequest); + StringJoiner joiner = new StringJoiner("/", "/", ""); + joiner.add(String.join(",", deleteByQueryRequest.indices())); + if (deleteByQueryRequest.getDocTypes().length > 0) + joiner.add(String.join(",", deleteByQueryRequest.getDocTypes())); + joiner.add("_delete_by_query"); + assertEquals(joiner.toString(), request.getEndpoint()); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals(expectedParams, request.getParameters()); + assertToXContentBody(deleteByQueryRequest, request.getEntity()); + } + public void testPutMapping() throws IOException { PutMappingRequest putMappingRequest = new PutMappingRequest(); @@ -1668,189 +1801,6 @@ public class RequestConvertersTests extends ESTestCase { assertToXContentBody(resizeRequest, request.getEntity()); } - public void testClusterPutSettings() throws IOException { - ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); - Map expectedParams = new HashMap<>(); - setRandomMasterTimeout(request, expectedParams); - setRandomTimeout(request::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); - - Request expectedRequest = RequestConverters.clusterPutSettings(request); - assertEquals("/_cluster/settings", expectedRequest.getEndpoint()); - assertEquals(HttpPut.METHOD_NAME, expectedRequest.getMethod()); - assertEquals(expectedParams, expectedRequest.getParameters()); - } - - public void testClusterGetSettings() throws IOException { - ClusterGetSettingsRequest request = new ClusterGetSettingsRequest(); - Map expectedParams = new HashMap<>(); - setRandomMasterTimeout(request, expectedParams); - request.includeDefaults(randomBoolean()); - if (request.includeDefaults()) { - expectedParams.put("include_defaults", String.valueOf(true)); - } - - Request expectedRequest = RequestConverters.clusterGetSettings(request); - assertEquals("/_cluster/settings", expectedRequest.getEndpoint()); - assertEquals(HttpGet.METHOD_NAME, expectedRequest.getMethod()); - assertEquals(expectedParams, expectedRequest.getParameters()); - } - - public void testPutPipeline() throws IOException { - String pipelineId = "some_pipeline_id"; - PutPipelineRequest request = new PutPipelineRequest( - "some_pipeline_id", - new BytesArray("{}".getBytes(StandardCharsets.UTF_8)), - XContentType.JSON - ); - Map expectedParams = new HashMap<>(); - setRandomMasterTimeout(request, expectedParams); - setRandomTimeout(request::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); - - Request expectedRequest = RequestConverters.putPipeline(request); - StringJoiner endpoint = new StringJoiner("/", "/", ""); - endpoint.add("_ingest/pipeline"); - endpoint.add(pipelineId); - assertEquals(endpoint.toString(), expectedRequest.getEndpoint()); - assertEquals(HttpPut.METHOD_NAME, expectedRequest.getMethod()); - assertEquals(expectedParams, expectedRequest.getParameters()); - } - - public void testGetPipeline() { - String pipelineId = "some_pipeline_id"; - Map expectedParams = new HashMap<>(); - GetPipelineRequest request = new GetPipelineRequest("some_pipeline_id"); - setRandomMasterTimeout(request, expectedParams); - Request expectedRequest = RequestConverters.getPipeline(request); - StringJoiner endpoint = new StringJoiner("/", "/", ""); - endpoint.add("_ingest/pipeline"); - endpoint.add(pipelineId); - assertEquals(endpoint.toString(), expectedRequest.getEndpoint()); - assertEquals(HttpGet.METHOD_NAME, expectedRequest.getMethod()); - assertEquals(expectedParams, expectedRequest.getParameters()); - } - - public void testDeletePipeline() { - String pipelineId = "some_pipeline_id"; - Map expectedParams = new HashMap<>(); - DeletePipelineRequest request = new DeletePipelineRequest(pipelineId); - setRandomMasterTimeout(request, expectedParams); - setRandomTimeout(request::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); - Request expectedRequest = RequestConverters.deletePipeline(request); - StringJoiner endpoint = new StringJoiner("/", "/", ""); - endpoint.add("_ingest/pipeline"); - endpoint.add(pipelineId); - assertEquals(endpoint.toString(), expectedRequest.getEndpoint()); - assertEquals(HttpDelete.METHOD_NAME, expectedRequest.getMethod()); - assertEquals(expectedParams, expectedRequest.getParameters()); - } - - public void testSimulatePipeline() throws IOException { - String pipelineId = randomBoolean() ? "some_pipeline_id" : null; - boolean verbose = randomBoolean(); - String json = "{\"pipeline\":{" + - "\"description\":\"_description\"," + - "\"processors\":[{\"set\":{\"field\":\"field2\",\"value\":\"_value\"}}]}," + - "\"docs\":[{\"_index\":\"index\",\"_type\":\"_doc\",\"_id\":\"id\",\"_source\":{\"foo\":\"rab\"}}]}"; - SimulatePipelineRequest request = new SimulatePipelineRequest( - new BytesArray(json.getBytes(StandardCharsets.UTF_8)), - XContentType.JSON - ); - request.setId(pipelineId); - request.setVerbose(verbose); - Map expectedParams = new HashMap<>(); - expectedParams.put("verbose", Boolean.toString(verbose)); - - Request expectedRequest = RequestConverters.simulatePipeline(request); - StringJoiner endpoint = new StringJoiner("/", "/", ""); - endpoint.add("_ingest/pipeline"); - if (pipelineId != null && !pipelineId.isEmpty()) - endpoint.add(pipelineId); - endpoint.add("_simulate"); - assertEquals(endpoint.toString(), expectedRequest.getEndpoint()); - assertEquals(HttpPost.METHOD_NAME, expectedRequest.getMethod()); - assertEquals(expectedParams, expectedRequest.getParameters()); - assertToXContentBody(request, expectedRequest.getEntity()); - } - - public void testClusterHealth() { - ClusterHealthRequest healthRequest = new ClusterHealthRequest(); - Map expectedParams = new HashMap<>(); - setRandomLocal(healthRequest, expectedParams); - String timeoutType = randomFrom("timeout", "masterTimeout", "both", "none"); - String timeout = randomTimeValue(); - String masterTimeout = randomTimeValue(); - switch (timeoutType) { - case "timeout": - healthRequest.timeout(timeout); - expectedParams.put("timeout", timeout); - // If Master Timeout wasn't set it uses the same value as Timeout - expectedParams.put("master_timeout", timeout); - break; - case "masterTimeout": - expectedParams.put("timeout", "30s"); - healthRequest.masterNodeTimeout(masterTimeout); - expectedParams.put("master_timeout", masterTimeout); - break; - case "both": - healthRequest.timeout(timeout); - expectedParams.put("timeout", timeout); - healthRequest.masterNodeTimeout(timeout); - expectedParams.put("master_timeout", timeout); - break; - case "none": - expectedParams.put("timeout", "30s"); - expectedParams.put("master_timeout", "30s"); - break; - default: - throw new UnsupportedOperationException(); - } - setRandomWaitForActiveShards(healthRequest::waitForActiveShards, ActiveShardCount.NONE, expectedParams); - if (randomBoolean()) { - ClusterHealthRequest.Level level = randomFrom(ClusterHealthRequest.Level.values()); - healthRequest.level(level); - expectedParams.put("level", level.name().toLowerCase(Locale.ROOT)); - } else { - expectedParams.put("level", "cluster"); - } - if (randomBoolean()) { - Priority priority = randomFrom(Priority.values()); - healthRequest.waitForEvents(priority); - expectedParams.put("wait_for_events", priority.name().toLowerCase(Locale.ROOT)); - } - if (randomBoolean()) { - ClusterHealthStatus status = randomFrom(ClusterHealthStatus.values()); - healthRequest.waitForStatus(status); - expectedParams.put("wait_for_status", status.name().toLowerCase(Locale.ROOT)); - } - if (randomBoolean()) { - boolean waitForNoInitializingShards = randomBoolean(); - healthRequest.waitForNoInitializingShards(waitForNoInitializingShards); - if (waitForNoInitializingShards) { - expectedParams.put("wait_for_no_initializing_shards", Boolean.TRUE.toString()); - } - } - if (randomBoolean()) { - boolean waitForNoRelocatingShards = randomBoolean(); - healthRequest.waitForNoRelocatingShards(waitForNoRelocatingShards); - if (waitForNoRelocatingShards) { - expectedParams.put("wait_for_no_relocating_shards", Boolean.TRUE.toString()); - } - } - String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); - healthRequest.indices(indices); - - Request request = RequestConverters.clusterHealth(healthRequest); - assertThat(request, CoreMatchers.notNullValue()); - assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME)); - assertThat(request.getEntity(), nullValue()); - if (indices != null && indices.length > 0) { - assertThat(request.getEndpoint(), equalTo("/_cluster/health/" + String.join(",", indices))); - } else { - assertThat(request.getEndpoint(), equalTo("/_cluster/health")); - } - assertThat(request.getParameters(), equalTo(expectedParams)); - } - public void testRollover() throws IOException { RolloverRequest rolloverRequest = new RolloverRequest(randomAlphaOfLengthBetween(3, 10), randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10)); @@ -1945,306 +1895,6 @@ public class RequestConvertersTests extends ESTestCase { assertEquals(expectedParams, request.getParameters()); } - public void testCancelTasks() { - CancelTasksRequest request = new CancelTasksRequest(); - Map expectedParams = new HashMap<>(); - TaskId taskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong()); - TaskId parentTaskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong()); - request.setTaskId(taskId); - request.setParentTaskId(parentTaskId); - expectedParams.put("task_id", taskId.toString()); - expectedParams.put("parent_task_id", parentTaskId.toString()); - Request httpRequest = RequestConverters.cancelTasks(request); - assertThat(httpRequest, notNullValue()); - assertThat(httpRequest.getMethod(), equalTo(HttpPost.METHOD_NAME)); - assertThat(httpRequest.getEntity(), nullValue()); - assertThat(httpRequest.getEndpoint(), equalTo("/_tasks/_cancel")); - assertThat(httpRequest.getParameters(), equalTo(expectedParams)); - } - - public void testListTasks() { - { - ListTasksRequest request = new ListTasksRequest(); - Map expectedParams = new HashMap<>(); - if (randomBoolean()) { - request.setDetailed(randomBoolean()); - if (request.getDetailed()) { - expectedParams.put("detailed", "true"); - } - } - if (randomBoolean()) { - request.setWaitForCompletion(randomBoolean()); - if (request.getWaitForCompletion()) { - expectedParams.put("wait_for_completion", "true"); - } - } - if (randomBoolean()) { - String timeout = randomTimeValue(); - request.setTimeout(timeout); - expectedParams.put("timeout", timeout); - } - if (randomBoolean()) { - if (randomBoolean()) { - TaskId taskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong()); - request.setParentTaskId(taskId); - expectedParams.put("parent_task_id", taskId.toString()); - } else { - request.setParentTask(TaskId.EMPTY_TASK_ID); - } - } - if (randomBoolean()) { - String[] nodes = generateRandomStringArray(10, 8, false); - request.setNodes(nodes); - if (nodes.length > 0) { - expectedParams.put("nodes", String.join(",", nodes)); - } - } - if (randomBoolean()) { - String[] actions = generateRandomStringArray(10, 8, false); - request.setActions(actions); - if (actions.length > 0) { - expectedParams.put("actions", String.join(",", actions)); - } - } - expectedParams.put("group_by", "none"); - Request httpRequest = RequestConverters.listTasks(request); - assertThat(httpRequest, notNullValue()); - assertThat(httpRequest.getMethod(), equalTo(HttpGet.METHOD_NAME)); - assertThat(httpRequest.getEntity(), nullValue()); - assertThat(httpRequest.getEndpoint(), equalTo("/_tasks")); - assertThat(httpRequest.getParameters(), equalTo(expectedParams)); - } - { - ListTasksRequest request = new ListTasksRequest(); - request.setTaskId(new TaskId(randomAlphaOfLength(5), randomNonNegativeLong())); - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.listTasks(request)); - assertEquals("TaskId cannot be used for list tasks request", exception.getMessage()); - } - } - - public void testGetRepositories() { - Map expectedParams = new HashMap<>(); - StringBuilder endpoint = new StringBuilder("/_snapshot"); - - GetRepositoriesRequest getRepositoriesRequest = new GetRepositoriesRequest(); - setRandomMasterTimeout(getRepositoriesRequest, expectedParams); - setRandomLocal(getRepositoriesRequest, expectedParams); - - if (randomBoolean()) { - String[] entries = new String[] { "a", "b", "c" }; - getRepositoriesRequest.repositories(entries); - endpoint.append("/" + String.join(",", entries)); - } - - Request request = RequestConverters.getRepositories(getRepositoriesRequest); - assertThat(endpoint.toString(), equalTo(request.getEndpoint())); - assertThat(HttpGet.METHOD_NAME, equalTo(request.getMethod())); - assertThat(expectedParams, equalTo(request.getParameters())); - } - - public void testCreateRepository() throws IOException { - String repository = randomIndicesNames(1, 1)[0]; - String endpoint = "/_snapshot/" + repository; - Path repositoryLocation = PathUtils.get("."); - PutRepositoryRequest putRepositoryRequest = new PutRepositoryRequest(repository); - putRepositoryRequest.type(FsRepository.TYPE); - putRepositoryRequest.verify(randomBoolean()); - - putRepositoryRequest.settings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), repositoryLocation) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .build()); - - Request request = RequestConverters.createRepository(putRepositoryRequest); - assertThat(endpoint, equalTo(request.getEndpoint())); - assertThat(HttpPut.METHOD_NAME, equalTo(request.getMethod())); - assertToXContentBody(putRepositoryRequest, request.getEntity()); - } - - public void testDeleteRepository() { - Map expectedParams = new HashMap<>(); - String repository = randomIndicesNames(1, 1)[0]; - - StringBuilder endpoint = new StringBuilder("/_snapshot/" + repository); - - DeleteRepositoryRequest deleteRepositoryRequest = new DeleteRepositoryRequest(); - deleteRepositoryRequest.name(repository); - setRandomMasterTimeout(deleteRepositoryRequest, expectedParams); - setRandomTimeout(deleteRepositoryRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); - - Request request = RequestConverters.deleteRepository(deleteRepositoryRequest); - assertThat(endpoint.toString(), equalTo(request.getEndpoint())); - assertThat(HttpDelete.METHOD_NAME, equalTo(request.getMethod())); - assertThat(expectedParams, equalTo(request.getParameters())); - assertNull(request.getEntity()); - } - - public void testVerifyRepository() { - Map expectedParams = new HashMap<>(); - String repository = randomIndicesNames(1, 1)[0]; - String endpoint = "/_snapshot/" + repository + "/_verify"; - - VerifyRepositoryRequest verifyRepositoryRequest = new VerifyRepositoryRequest(repository); - setRandomMasterTimeout(verifyRepositoryRequest, expectedParams); - setRandomTimeout(verifyRepositoryRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); - - Request request = RequestConverters.verifyRepository(verifyRepositoryRequest); - assertThat(endpoint, equalTo(request.getEndpoint())); - assertThat(HttpPost.METHOD_NAME, equalTo(request.getMethod())); - assertThat(expectedParams, equalTo(request.getParameters())); - } - - public void testCreateSnapshot() throws IOException { - Map expectedParams = new HashMap<>(); - String repository = randomIndicesNames(1, 1)[0]; - String snapshot = "snapshot-" + generateRandomStringArray(1, randomInt(10), false, false)[0]; - String endpoint = "/_snapshot/" + repository + "/" + snapshot; - - CreateSnapshotRequest createSnapshotRequest = new CreateSnapshotRequest(repository, snapshot); - setRandomMasterTimeout(createSnapshotRequest, expectedParams); - Boolean waitForCompletion = randomBoolean(); - createSnapshotRequest.waitForCompletion(waitForCompletion); - - if (waitForCompletion) { - expectedParams.put("wait_for_completion", waitForCompletion.toString()); - } - - Request request = RequestConverters.createSnapshot(createSnapshotRequest); - assertThat(endpoint, equalTo(request.getEndpoint())); - assertThat(HttpPut.METHOD_NAME, equalTo(request.getMethod())); - assertThat(expectedParams, equalTo(request.getParameters())); - assertToXContentBody(createSnapshotRequest, request.getEntity()); - } - - public void testGetSnapshots() { - Map expectedParams = new HashMap<>(); - String repository = randomIndicesNames(1, 1)[0]; - String snapshot1 = "snapshot1-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT); - String snapshot2 = "snapshot2-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT); - - String endpoint = String.format(Locale.ROOT, "/_snapshot/%s/%s,%s", repository, snapshot1, snapshot2); - - GetSnapshotsRequest getSnapshotsRequest = new GetSnapshotsRequest(); - getSnapshotsRequest.repository(repository); - getSnapshotsRequest.snapshots(Arrays.asList(snapshot1, snapshot2).toArray(new String[0])); - setRandomMasterTimeout(getSnapshotsRequest, expectedParams); - - if (randomBoolean()) { - boolean ignoreUnavailable = randomBoolean(); - getSnapshotsRequest.ignoreUnavailable(ignoreUnavailable); - expectedParams.put("ignore_unavailable", Boolean.toString(ignoreUnavailable)); - } else { - expectedParams.put("ignore_unavailable", Boolean.FALSE.toString()); - } - - if (randomBoolean()) { - boolean verbose = randomBoolean(); - getSnapshotsRequest.verbose(verbose); - expectedParams.put("verbose", Boolean.toString(verbose)); - } else { - expectedParams.put("verbose", Boolean.TRUE.toString()); - } - - Request request = RequestConverters.getSnapshots(getSnapshotsRequest); - assertThat(endpoint, equalTo(request.getEndpoint())); - assertThat(HttpGet.METHOD_NAME, equalTo(request.getMethod())); - assertThat(expectedParams, equalTo(request.getParameters())); - assertNull(request.getEntity()); - } - - public void testGetAllSnapshots() { - Map expectedParams = new HashMap<>(); - String repository = randomIndicesNames(1, 1)[0]; - - String endpoint = String.format(Locale.ROOT, "/_snapshot/%s/_all", repository); - - GetSnapshotsRequest getSnapshotsRequest = new GetSnapshotsRequest(repository); - setRandomMasterTimeout(getSnapshotsRequest, expectedParams); - - boolean ignoreUnavailable = randomBoolean(); - getSnapshotsRequest.ignoreUnavailable(ignoreUnavailable); - expectedParams.put("ignore_unavailable", Boolean.toString(ignoreUnavailable)); - - boolean verbose = randomBoolean(); - getSnapshotsRequest.verbose(verbose); - expectedParams.put("verbose", Boolean.toString(verbose)); - - Request request = RequestConverters.getSnapshots(getSnapshotsRequest); - assertThat(endpoint, equalTo(request.getEndpoint())); - assertThat(HttpGet.METHOD_NAME, equalTo(request.getMethod())); - assertThat(expectedParams, equalTo(request.getParameters())); - assertNull(request.getEntity()); - } - - public void testSnapshotsStatus() { - Map expectedParams = new HashMap<>(); - String repository = randomIndicesNames(1, 1)[0]; - String[] snapshots = randomIndicesNames(1, 5); - StringBuilder snapshotNames = new StringBuilder(snapshots[0]); - for (int idx = 1; idx < snapshots.length; idx++) { - snapshotNames.append(",").append(snapshots[idx]); - } - boolean ignoreUnavailable = randomBoolean(); - String endpoint = "/_snapshot/" + repository + "/" + snapshotNames.toString() + "/_status"; - - SnapshotsStatusRequest snapshotsStatusRequest = new SnapshotsStatusRequest(repository, snapshots); - setRandomMasterTimeout(snapshotsStatusRequest, expectedParams); - snapshotsStatusRequest.ignoreUnavailable(ignoreUnavailable); - expectedParams.put("ignore_unavailable", Boolean.toString(ignoreUnavailable)); - - Request request = RequestConverters.snapshotsStatus(snapshotsStatusRequest); - assertThat(request.getEndpoint(), equalTo(endpoint)); - assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME)); - assertThat(request.getParameters(), equalTo(expectedParams)); - assertThat(request.getEntity(), is(nullValue())); - } - - public void testRestoreSnapshot() throws IOException { - Map expectedParams = new HashMap<>(); - String repository = randomIndicesNames(1, 1)[0]; - String snapshot = "snapshot-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT); - String endpoint = String.format(Locale.ROOT, "/_snapshot/%s/%s/_restore", repository, snapshot); - - RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(repository, snapshot); - setRandomMasterTimeout(restoreSnapshotRequest, expectedParams); - if (randomBoolean()) { - restoreSnapshotRequest.waitForCompletion(true); - expectedParams.put("wait_for_completion", "true"); - } - if (randomBoolean()) { - String timeout = randomTimeValue(); - restoreSnapshotRequest.masterNodeTimeout(timeout); - expectedParams.put("master_timeout", timeout); - } - - Request request = RequestConverters.restoreSnapshot(restoreSnapshotRequest); - assertThat(endpoint, equalTo(request.getEndpoint())); - assertThat(HttpPost.METHOD_NAME, equalTo(request.getMethod())); - assertThat(expectedParams, equalTo(request.getParameters())); - assertToXContentBody(restoreSnapshotRequest, request.getEntity()); - } - - public void testDeleteSnapshot() { - Map expectedParams = new HashMap<>(); - String repository = randomIndicesNames(1, 1)[0]; - String snapshot = "snapshot-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT); - - String endpoint = String.format(Locale.ROOT, "/_snapshot/%s/%s", repository, snapshot); - - DeleteSnapshotRequest deleteSnapshotRequest = new DeleteSnapshotRequest(); - deleteSnapshotRequest.repository(repository); - deleteSnapshotRequest.snapshot(snapshot); - setRandomMasterTimeout(deleteSnapshotRequest, expectedParams); - - Request request = RequestConverters.deleteSnapshot(deleteSnapshotRequest); - assertThat(endpoint, equalTo(request.getEndpoint())); - assertThat(HttpDelete.METHOD_NAME, equalTo(request.getMethod())); - assertThat(expectedParams, equalTo(request.getParameters())); - assertNull(request.getEntity()); - } - public void testPutTemplateRequest() throws Exception { Map names = new HashMap<>(); names.put("log", "log"); @@ -2338,6 +1988,42 @@ public class RequestConvertersTests extends ESTestCase { assertThat(request.getEntity(), nullValue()); } + public void testPutScript() throws Exception { + PutStoredScriptRequest putStoredScriptRequest = new PutStoredScriptRequest(); + + String id = randomAlphaOfLengthBetween(5, 10); + putStoredScriptRequest.id(id); + + XContentType xContentType = randomFrom(XContentType.values()); + try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { + builder.startObject(); + builder.startObject("script") + .field("lang", "painless") + .field("source", "Math.log(_score * 2) + params.multiplier") + .endObject(); + builder.endObject(); + + putStoredScriptRequest.content(BytesReference.bytes(builder), xContentType); + } + + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(putStoredScriptRequest, expectedParams); + setRandomTimeout(putStoredScriptRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + if (randomBoolean()) { + String context = randomAlphaOfLengthBetween(5, 10); + putStoredScriptRequest.context(context); + expectedParams.put("context", context); + } + + Request request = RequestConverters.putScript(putStoredScriptRequest); + + assertThat(request.getEndpoint(), equalTo("/_scripts/" + id)); + assertThat(request.getParameters(), equalTo(expectedParams)); + assertNotNull(request.getEntity()); + assertToXContentBody(putStoredScriptRequest, request.getEntity()); + } + public void testAnalyzeRequest() throws Exception { AnalyzeRequest indexAnalyzeRequest = new AnalyzeRequest() .text("Here is some text") @@ -2380,7 +2066,7 @@ public class RequestConvertersTests extends ESTestCase { assertThat(request.getEntity(), nullValue()); } - private static void assertToXContentBody(ToXContent expectedBody, HttpEntity actualEntity) throws IOException { + static void assertToXContentBody(ToXContent expectedBody, HttpEntity actualEntity) throws IOException { BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, REQUEST_BODY_CONTENT_TYPE, false); assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType().getValue()); assertEquals(expectedBytes, new BytesArray(EntityUtils.toByteArray(actualEntity))); @@ -2522,94 +2208,6 @@ public class RequestConvertersTests extends ESTestCase { + "previous requests have content-type [" + xContentType + "]", exception.getMessage()); } - public void testXPackInfo() { - XPackInfoRequest infoRequest = new XPackInfoRequest(); - Map expectedParams = new HashMap<>(); - infoRequest.setVerbose(randomBoolean()); - if (false == infoRequest.isVerbose()) { - expectedParams.put("human", "false"); - } - int option = between(0, 2); - switch (option) { - case 0: - infoRequest.setCategories(EnumSet.allOf(XPackInfoRequest.Category.class)); - break; - case 1: - infoRequest.setCategories(EnumSet.of(XPackInfoRequest.Category.FEATURES)); - expectedParams.put("categories", "features"); - break; - case 2: - infoRequest.setCategories(EnumSet.of(XPackInfoRequest.Category.FEATURES, XPackInfoRequest.Category.BUILD)); - expectedParams.put("categories", "build,features"); - break; - default: - throw new IllegalArgumentException("invalid option [" + option + "]"); - } - - Request request = RequestConverters.xPackInfo(infoRequest); - assertEquals(HttpGet.METHOD_NAME, request.getMethod()); - assertEquals("/_xpack", request.getEndpoint()); - assertNull(request.getEntity()); - assertEquals(expectedParams, request.getParameters()); - } - - public void testGetMigrationAssistance() { - IndexUpgradeInfoRequest upgradeInfoRequest = new IndexUpgradeInfoRequest(); - String expectedEndpoint = "/_xpack/migration/assistance"; - if (randomBoolean()) { - String[] indices = randomIndicesNames(1, 5); - upgradeInfoRequest.indices(indices); - expectedEndpoint += "/" + String.join(",", indices); - } - Map expectedParams = new HashMap<>(); - setRandomIndicesOptions(upgradeInfoRequest::indicesOptions, upgradeInfoRequest::indicesOptions, expectedParams); - Request request = RequestConverters.getMigrationAssistance(upgradeInfoRequest); - assertEquals(HttpGet.METHOD_NAME, request.getMethod()); - assertEquals(expectedEndpoint, request.getEndpoint()); - assertNull(request.getEntity()); - assertEquals(expectedParams, request.getParameters()); - } - - public void testXPackPutWatch() throws Exception { - PutWatchRequest putWatchRequest = new PutWatchRequest(); - String watchId = randomAlphaOfLength(10); - putWatchRequest.setId(watchId); - String body = randomAlphaOfLength(20); - putWatchRequest.setSource(new BytesArray(body), XContentType.JSON); - - Map expectedParams = new HashMap<>(); - if (randomBoolean()) { - putWatchRequest.setActive(false); - expectedParams.put("active", "false"); - } - - if (randomBoolean()) { - long version = randomLongBetween(10, 100); - putWatchRequest.setVersion(version); - expectedParams.put("version", String.valueOf(version)); - } - - Request request = RequestConverters.xPackWatcherPutWatch(putWatchRequest); - assertEquals(HttpPut.METHOD_NAME, request.getMethod()); - assertEquals("/_xpack/watcher/watch/" + watchId, request.getEndpoint()); - assertEquals(expectedParams, request.getParameters()); - assertThat(request.getEntity().getContentType().getValue(), is(XContentType.JSON.mediaTypeWithoutParameters())); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - request.getEntity().writeTo(bos); - assertThat(bos.toString("UTF-8"), is(body)); - } - - public void testXPackDeleteWatch() { - DeleteWatchRequest deleteWatchRequest = new DeleteWatchRequest(); - String watchId = randomAlphaOfLength(10); - deleteWatchRequest.setId(watchId); - - Request request = RequestConverters.xPackWatcherDeleteWatch(deleteWatchRequest); - assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); - assertEquals("/_xpack/watcher/watch/" + watchId, request.getEndpoint()); - assertThat(request.getEntity(), nullValue()); - } - /** * Randomize the {@link FetchSourceContext} request parameters. */ @@ -2672,8 +2270,8 @@ public class RequestConvertersTests extends ESTestCase { } } - private static void setRandomIndicesOptions(Consumer setter, Supplier getter, - Map expectedParams) { + static void setRandomIndicesOptions(Consumer setter, Supplier getter, + Map expectedParams) { if (randomBoolean()) { setter.accept(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); @@ -2721,11 +2319,11 @@ public class RequestConvertersTests extends ESTestCase { } } - private static void setRandomLocal(MasterNodeReadRequest request, Map expectedParams) { + static void setRandomLocal(MasterNodeReadRequest request, Map expectedParams) { setRandomLocal(request::local, expectedParams); } - private static void setRandomTimeout(Consumer setter, TimeValue defaultTimeout, Map expectedParams) { + static void setRandomTimeout(Consumer setter, TimeValue defaultTimeout, Map expectedParams) { if (randomBoolean()) { String timeout = randomTimeValue(); setter.accept(timeout); @@ -2735,7 +2333,7 @@ public class RequestConvertersTests extends ESTestCase { } } - private static void setRandomMasterTimeout(MasterNodeRequest request, Map expectedParams) { + static void setRandomMasterTimeout(MasterNodeRequest request, Map expectedParams) { if (randomBoolean()) { String masterTimeout = randomTimeValue(); request.masterNodeTimeout(masterTimeout); @@ -2749,8 +2347,8 @@ public class RequestConvertersTests extends ESTestCase { setRandomWaitForActiveShards(setter, ActiveShardCount.DEFAULT, expectedParams); } - private static void setRandomWaitForActiveShards(Consumer setter, ActiveShardCount defaultActiveShardCount, - Map expectedParams) { + static void setRandomWaitForActiveShards(Consumer setter, ActiveShardCount defaultActiveShardCount, + Map expectedParams) { if (randomBoolean()) { int waitForActiveShardsInt = randomIntBetween(-1, 5); String waitForActiveShardsString; @@ -2810,7 +2408,7 @@ public class RequestConvertersTests extends ESTestCase { return excludesParam.toString(); } - private static String[] randomIndicesNames(int minIndicesNum, int maxIndicesNum) { + static String[] randomIndicesNames(int minIndicesNum, int maxIndicesNum) { int numIndices = randomIntBetween(minIndicesNum, maxIndicesNum); String[] indices = new String[numIndices]; for (int i = 0; i < numIndices; i++) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index b5d8dbb628e..3bd47306e5e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -649,7 +649,6 @@ public class RestHighLevelClientTests extends ESTestCase { "cluster.remote_info", "count", "create", - "delete_by_query", "exists_source", "get_source", "indices.delete_alias", @@ -659,14 +658,11 @@ public class RestHighLevelClientTests extends ESTestCase { "indices.get_upgrade", "indices.put_alias", "mtermvectors", - "put_script", - "reindex", "reindex_rethrottle", "render_search_template", "scripts_painless_execute", "tasks.get", - "termvectors", - "update_by_query" + "termvectors" }; //These API are not required for high-level client feature completeness String[] notRequiredApi = new String[] { @@ -685,6 +681,7 @@ public class RestHighLevelClientTests extends ESTestCase { "nodes.stats", "nodes.hot_threads", "nodes.usage", + "nodes.reload_secure_settings", "search_shards", }; Set deprecatedMethods = new HashSet<>(); @@ -758,7 +755,9 @@ public class RestHighLevelClientTests extends ESTestCase { apiName.startsWith("license.") == false && apiName.startsWith("machine_learning.") == false && apiName.startsWith("watcher.") == false && - apiName.startsWith("migration.") == false) { + apiName.startsWith("graph.") == false && + apiName.startsWith("migration.") == false && + apiName.startsWith("security.") == false) { apiNotFound.add(apiName); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index 9c9c5425f00..063fce9bcac 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -256,7 +256,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase { assertNull(searchResponse.getSuggest()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(0, searchResponse.getHits().getHits().length); - assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); + assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f); Terms termsAgg = searchResponse.getAggregations().get("agg1"); assertEquals("agg1", termsAgg.getName()); assertEquals(2, termsAgg.getBuckets().size()); @@ -293,7 +293,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase { assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(5, searchResponse.getHits().totalHits); assertEquals(0, searchResponse.getHits().getHits().length); - assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); + assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f); Range rangeAgg = searchResponse.getAggregations().get("agg1"); assertEquals("agg1", rangeAgg.getName()); assertEquals(2, rangeAgg.getBuckets().size()); @@ -323,7 +323,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase { assertNull(searchResponse.getSuggest()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(0, searchResponse.getHits().getHits().length); - assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); + assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f); Terms termsAgg = searchResponse.getAggregations().get("agg1"); assertEquals("agg1", termsAgg.getName()); assertEquals(2, termsAgg.getBuckets().size()); @@ -375,7 +375,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase { assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(5, searchResponse.getHits().totalHits); assertEquals(0, searchResponse.getHits().getHits().length); - assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); + assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f); assertEquals(1, searchResponse.getAggregations().asList().size()); MatrixStats matrixStats = searchResponse.getAggregations().get("agg1"); assertEquals(5, matrixStats.getFieldCount("num")); @@ -474,7 +474,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase { assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(3, searchResponse.getHits().totalHits); assertEquals(0, searchResponse.getHits().getHits().length); - assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); + assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f); assertEquals(1, searchResponse.getAggregations().asList().size()); Terms terms = searchResponse.getAggregations().get("top-tags"); assertEquals(0, terms.getDocCountError()); @@ -513,7 +513,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase { assertNull(searchResponse.getAggregations()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(0, searchResponse.getHits().totalHits); - assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); + assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f); assertEquals(0, searchResponse.getHits().getHits().length); assertEquals(1, searchResponse.getSuggest().size()); @@ -1034,7 +1034,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase { assertTrue(explainResponse.isExists()); assertTrue(explainResponse.isMatch()); assertTrue(explainResponse.hasExplanation()); - assertThat(explainResponse.getExplanation().getValue(), greaterThan(0.0f)); + assertThat(explainResponse.getExplanation().getValue().floatValue(), greaterThan(0.0f)); assertNull(explainResponse.getGetResult()); } { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java new file mode 100644 index 00000000000..3670379cd9f --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.security.DisableUserRequest; +import org.elasticsearch.client.security.EnableUserRequest; +import org.elasticsearch.client.security.PutUserRequest; +import org.elasticsearch.client.security.RefreshPolicy; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.client.RequestConvertersTests.assertToXContentBody; + +public class SecurityRequestConvertersTests extends ESTestCase { + + public void testPutUser() throws IOException { + final String username = randomAlphaOfLengthBetween(4, 12); + final char[] password = randomBoolean() ? randomAlphaOfLengthBetween(8, 12).toCharArray() : null; + final List roles = Arrays.asList(generateRandomStringArray(randomIntBetween(2, 8), randomIntBetween(8, 16), false, true)); + final String email = randomBoolean() ? null : randomAlphaOfLengthBetween(12, 24); + final String fullName = randomBoolean() ? null : randomAlphaOfLengthBetween(7, 14); + final boolean enabled = randomBoolean(); + final Map metadata; + if (randomBoolean()) { + metadata = new HashMap<>(); + for (int i = 0; i < randomIntBetween(0, 10); i++) { + metadata.put(String.valueOf(i), randomAlphaOfLengthBetween(1, 12)); + } + } else { + metadata = null; + } + + final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); + final Map expectedParams = getExpectedParamsFromRefreshPolicy(refreshPolicy); + + PutUserRequest putUserRequest = new PutUserRequest(username, password, roles, fullName, email, enabled, metadata, refreshPolicy); + Request request = SecurityRequestConverters.putUser(putUserRequest); + assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/security/user/" + putUserRequest.getUsername(), request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + assertToXContentBody(putUserRequest, request.getEntity()); + } + + public void testEnableUser() { + final String username = randomAlphaOfLengthBetween(1, 12); + final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); + final Map expectedParams = getExpectedParamsFromRefreshPolicy(refreshPolicy); + EnableUserRequest enableUserRequest = new EnableUserRequest(username, refreshPolicy); + Request request = SecurityRequestConverters.enableUser(enableUserRequest); + assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/security/user/" + username + "/_enable", request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + assertNull(request.getEntity()); + } + + public void testDisableUser() { + final String username = randomAlphaOfLengthBetween(1, 12); + final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); + final Map expectedParams = getExpectedParamsFromRefreshPolicy(refreshPolicy); + DisableUserRequest disableUserRequest = new DisableUserRequest(username, refreshPolicy); + Request request = SecurityRequestConverters.disableUser(disableUserRequest); + assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/security/user/" + username + "/_disable", request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + assertNull(request.getEntity()); + } + + private static Map getExpectedParamsFromRefreshPolicy(RefreshPolicy refreshPolicy) { + if (refreshPolicy != RefreshPolicy.NONE) { + return Collections.singletonMap("refresh", refreshPolicy.getValue()); + } else { + return Collections.emptyMap(); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotRequestConvertersTests.java new file mode 100644 index 00000000000..efd321aa7ee --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotRequestConvertersTests.java @@ -0,0 +1,277 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class SnapshotRequestConvertersTests extends ESTestCase { + + public void testGetRepositories() { + Map expectedParams = new HashMap<>(); + StringBuilder endpoint = new StringBuilder("/_snapshot"); + + GetRepositoriesRequest getRepositoriesRequest = new GetRepositoriesRequest(); + RequestConvertersTests.setRandomMasterTimeout(getRepositoriesRequest, expectedParams); + RequestConvertersTests.setRandomLocal(getRepositoriesRequest, expectedParams); + + if (randomBoolean()) { + String[] entries = new String[] { "a", "b", "c" }; + getRepositoriesRequest.repositories(entries); + endpoint.append("/" + String.join(",", entries)); + } + + Request request = SnapshotRequestConverters.getRepositories(getRepositoriesRequest); + assertThat(endpoint.toString(), equalTo(request.getEndpoint())); + assertThat(HttpGet.METHOD_NAME, equalTo(request.getMethod())); + assertThat(expectedParams, equalTo(request.getParameters())); + } + + public void testCreateRepository() throws IOException { + String repository = RequestConvertersTests.randomIndicesNames(1, 1)[0]; + String endpoint = "/_snapshot/" + repository; + Path repositoryLocation = PathUtils.get("."); + PutRepositoryRequest putRepositoryRequest = new PutRepositoryRequest(repository); + putRepositoryRequest.type(FsRepository.TYPE); + putRepositoryRequest.verify(randomBoolean()); + + putRepositoryRequest.settings( + Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), repositoryLocation) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .build()); + + Request request = SnapshotRequestConverters.createRepository(putRepositoryRequest); + assertThat(endpoint, equalTo(request.getEndpoint())); + assertThat(HttpPut.METHOD_NAME, equalTo(request.getMethod())); + RequestConvertersTests.assertToXContentBody(putRepositoryRequest, request.getEntity()); + } + + public void testDeleteRepository() { + Map expectedParams = new HashMap<>(); + String repository = RequestConvertersTests.randomIndicesNames(1, 1)[0]; + + StringBuilder endpoint = new StringBuilder("/_snapshot/" + repository); + + DeleteRepositoryRequest deleteRepositoryRequest = new DeleteRepositoryRequest(); + deleteRepositoryRequest.name(repository); + RequestConvertersTests.setRandomMasterTimeout(deleteRepositoryRequest, expectedParams); + RequestConvertersTests.setRandomTimeout(deleteRepositoryRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = SnapshotRequestConverters.deleteRepository(deleteRepositoryRequest); + assertThat(endpoint.toString(), equalTo(request.getEndpoint())); + assertThat(HttpDelete.METHOD_NAME, equalTo(request.getMethod())); + assertThat(expectedParams, equalTo(request.getParameters())); + assertNull(request.getEntity()); + } + + public void testVerifyRepository() { + Map expectedParams = new HashMap<>(); + String repository = RequestConvertersTests.randomIndicesNames(1, 1)[0]; + String endpoint = "/_snapshot/" + repository + "/_verify"; + + VerifyRepositoryRequest verifyRepositoryRequest = new VerifyRepositoryRequest(repository); + RequestConvertersTests.setRandomMasterTimeout(verifyRepositoryRequest, expectedParams); + RequestConvertersTests.setRandomTimeout(verifyRepositoryRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = SnapshotRequestConverters.verifyRepository(verifyRepositoryRequest); + assertThat(endpoint, equalTo(request.getEndpoint())); + assertThat(HttpPost.METHOD_NAME, equalTo(request.getMethod())); + assertThat(expectedParams, equalTo(request.getParameters())); + } + + public void testCreateSnapshot() throws IOException { + Map expectedParams = new HashMap<>(); + String repository = RequestConvertersTests.randomIndicesNames(1, 1)[0]; + String snapshot = "snapshot-" + generateRandomStringArray(1, randomInt(10), false, false)[0]; + String endpoint = "/_snapshot/" + repository + "/" + snapshot; + + CreateSnapshotRequest createSnapshotRequest = new CreateSnapshotRequest(repository, snapshot); + RequestConvertersTests.setRandomMasterTimeout(createSnapshotRequest, expectedParams); + Boolean waitForCompletion = randomBoolean(); + createSnapshotRequest.waitForCompletion(waitForCompletion); + + if (waitForCompletion) { + expectedParams.put("wait_for_completion", waitForCompletion.toString()); + } + + Request request = SnapshotRequestConverters.createSnapshot(createSnapshotRequest); + assertThat(endpoint, equalTo(request.getEndpoint())); + assertThat(HttpPut.METHOD_NAME, equalTo(request.getMethod())); + assertThat(expectedParams, equalTo(request.getParameters())); + RequestConvertersTests.assertToXContentBody(createSnapshotRequest, request.getEntity()); + } + + public void testGetSnapshots() { + Map expectedParams = new HashMap<>(); + String repository = RequestConvertersTests.randomIndicesNames(1, 1)[0]; + String snapshot1 = "snapshot1-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT); + String snapshot2 = "snapshot2-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT); + + String endpoint = String.format(Locale.ROOT, "/_snapshot/%s/%s,%s", repository, snapshot1, snapshot2); + + GetSnapshotsRequest getSnapshotsRequest = new GetSnapshotsRequest(); + getSnapshotsRequest.repository(repository); + getSnapshotsRequest.snapshots(Arrays.asList(snapshot1, snapshot2).toArray(new String[0])); + RequestConvertersTests.setRandomMasterTimeout(getSnapshotsRequest, expectedParams); + + if (randomBoolean()) { + boolean ignoreUnavailable = randomBoolean(); + getSnapshotsRequest.ignoreUnavailable(ignoreUnavailable); + expectedParams.put("ignore_unavailable", Boolean.toString(ignoreUnavailable)); + } else { + expectedParams.put("ignore_unavailable", Boolean.FALSE.toString()); + } + + if (randomBoolean()) { + boolean verbose = randomBoolean(); + getSnapshotsRequest.verbose(verbose); + expectedParams.put("verbose", Boolean.toString(verbose)); + } else { + expectedParams.put("verbose", Boolean.TRUE.toString()); + } + + Request request = SnapshotRequestConverters.getSnapshots(getSnapshotsRequest); + assertThat(endpoint, equalTo(request.getEndpoint())); + assertThat(HttpGet.METHOD_NAME, equalTo(request.getMethod())); + assertThat(expectedParams, equalTo(request.getParameters())); + assertNull(request.getEntity()); + } + + public void testGetAllSnapshots() { + Map expectedParams = new HashMap<>(); + String repository = RequestConvertersTests.randomIndicesNames(1, 1)[0]; + + String endpoint = String.format(Locale.ROOT, "/_snapshot/%s/_all", repository); + + GetSnapshotsRequest getSnapshotsRequest = new GetSnapshotsRequest(repository); + RequestConvertersTests.setRandomMasterTimeout(getSnapshotsRequest, expectedParams); + + boolean ignoreUnavailable = randomBoolean(); + getSnapshotsRequest.ignoreUnavailable(ignoreUnavailable); + expectedParams.put("ignore_unavailable", Boolean.toString(ignoreUnavailable)); + + boolean verbose = randomBoolean(); + getSnapshotsRequest.verbose(verbose); + expectedParams.put("verbose", Boolean.toString(verbose)); + + Request request = SnapshotRequestConverters.getSnapshots(getSnapshotsRequest); + assertThat(endpoint, equalTo(request.getEndpoint())); + assertThat(HttpGet.METHOD_NAME, equalTo(request.getMethod())); + assertThat(expectedParams, equalTo(request.getParameters())); + assertNull(request.getEntity()); + } + + public void testSnapshotsStatus() { + Map expectedParams = new HashMap<>(); + String repository = RequestConvertersTests.randomIndicesNames(1, 1)[0]; + String[] snapshots = RequestConvertersTests.randomIndicesNames(1, 5); + StringBuilder snapshotNames = new StringBuilder(snapshots[0]); + for (int idx = 1; idx < snapshots.length; idx++) { + snapshotNames.append(",").append(snapshots[idx]); + } + boolean ignoreUnavailable = randomBoolean(); + String endpoint = "/_snapshot/" + repository + "/" + snapshotNames.toString() + "/_status"; + + SnapshotsStatusRequest snapshotsStatusRequest = new SnapshotsStatusRequest(repository, snapshots); + RequestConvertersTests.setRandomMasterTimeout(snapshotsStatusRequest, expectedParams); + snapshotsStatusRequest.ignoreUnavailable(ignoreUnavailable); + expectedParams.put("ignore_unavailable", Boolean.toString(ignoreUnavailable)); + + Request request = SnapshotRequestConverters.snapshotsStatus(snapshotsStatusRequest); + assertThat(request.getEndpoint(), equalTo(endpoint)); + assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME)); + assertThat(request.getParameters(), equalTo(expectedParams)); + assertThat(request.getEntity(), is(nullValue())); + } + + public void testRestoreSnapshot() throws IOException { + Map expectedParams = new HashMap<>(); + String repository = RequestConvertersTests.randomIndicesNames(1, 1)[0]; + String snapshot = "snapshot-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT); + String endpoint = String.format(Locale.ROOT, "/_snapshot/%s/%s/_restore", repository, snapshot); + + RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(repository, snapshot); + RequestConvertersTests.setRandomMasterTimeout(restoreSnapshotRequest, expectedParams); + if (randomBoolean()) { + restoreSnapshotRequest.waitForCompletion(true); + expectedParams.put("wait_for_completion", "true"); + } + if (randomBoolean()) { + String timeout = randomTimeValue(); + restoreSnapshotRequest.masterNodeTimeout(timeout); + expectedParams.put("master_timeout", timeout); + } + + Request request = SnapshotRequestConverters.restoreSnapshot(restoreSnapshotRequest); + assertThat(endpoint, equalTo(request.getEndpoint())); + assertThat(HttpPost.METHOD_NAME, equalTo(request.getMethod())); + assertThat(expectedParams, equalTo(request.getParameters())); + RequestConvertersTests.assertToXContentBody(restoreSnapshotRequest, request.getEntity()); + } + + public void testDeleteSnapshot() { + Map expectedParams = new HashMap<>(); + String repository = RequestConvertersTests.randomIndicesNames(1, 1)[0]; + String snapshot = "snapshot-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT); + + String endpoint = String.format(Locale.ROOT, "/_snapshot/%s/%s", repository, snapshot); + + DeleteSnapshotRequest deleteSnapshotRequest = new DeleteSnapshotRequest(); + deleteSnapshotRequest.repository(repository); + deleteSnapshotRequest.snapshot(snapshot); + RequestConvertersTests.setRandomMasterTimeout(deleteSnapshotRequest, expectedParams); + + Request request = SnapshotRequestConverters.deleteSnapshot(deleteSnapshotRequest); + assertThat(endpoint, equalTo(request.getEndpoint())); + assertThat(HttpDelete.METHOD_NAME, equalTo(request.getMethod())); + assertThat(expectedParams, equalTo(request.getParameters())); + assertNull(request.getEntity()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/StoredScriptsIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/StoredScriptsIT.java index 14734c4ab60..b15467d24ba 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/StoredScriptsIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/StoredScriptsIT.java @@ -1,4 +1,5 @@ -package org.elasticsearch.client;/* +package org.elasticsearch.client; +/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright @@ -17,31 +18,27 @@ package org.elasticsearch.client;/* * under the License. */ - -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.util.EntityUtils; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.Script; import org.elasticsearch.script.StoredScriptSource; import java.util.Collections; +import java.util.Map; -import static java.util.Collections.emptyMap; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.common.xcontent.support.XContentMapValues.extractValue; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; public class StoredScriptsIT extends ESRestHighLevelClientTestCase { - final String id = "calculate-score"; + private static final String id = "calculate-score"; public void testGetStoredScript() throws Exception { final StoredScriptSource scriptSource = @@ -49,16 +46,9 @@ public class StoredScriptsIT extends ESRestHighLevelClientTestCase { "Math.log(_score * 2) + params.my_modifier", Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType())); - final String script = Strings.toString(scriptSource.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS)); - // TODO: change to HighLevel PutStoredScriptRequest when it will be ready - // so far - using low-level REST API - Response putResponse = - adminClient() - .performRequest("PUT", "/_scripts/calculate-score", emptyMap(), - new StringEntity("{\"script\":" + script + "}", - ContentType.APPLICATION_JSON)); - assertEquals(putResponse.getStatusLine().getReasonPhrase(), 200, putResponse.getStatusLine().getStatusCode()); - assertEquals("{\"acknowledged\":true}", EntityUtils.toString(putResponse.getEntity())); + PutStoredScriptRequest request = + new PutStoredScriptRequest(id, "search", new BytesArray("{}"), XContentType.JSON, scriptSource); + assertAcked(execute(request, highLevelClient()::putScript, highLevelClient()::putScriptAsync)); GetStoredScriptRequest getRequest = new GetStoredScriptRequest("calculate-score"); getRequest.masterNodeTimeout("50s"); @@ -75,25 +65,14 @@ public class StoredScriptsIT extends ESRestHighLevelClientTestCase { "Math.log(_score * 2) + params.my_modifier", Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType())); - final String script = Strings.toString(scriptSource.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS)); - // TODO: change to HighLevel PutStoredScriptRequest when it will be ready - // so far - using low-level REST API - Response putResponse = - adminClient() - .performRequest("PUT", "/_scripts/" + id, emptyMap(), - new StringEntity("{\"script\":" + script + "}", - ContentType.APPLICATION_JSON)); - assertEquals(putResponse.getStatusLine().getReasonPhrase(), 200, putResponse.getStatusLine().getStatusCode()); - assertEquals("{\"acknowledged\":true}", EntityUtils.toString(putResponse.getEntity())); + PutStoredScriptRequest request = + new PutStoredScriptRequest(id, "search", new BytesArray("{}"), XContentType.JSON, scriptSource); + assertAcked(execute(request, highLevelClient()::putScript, highLevelClient()::putScriptAsync)); DeleteStoredScriptRequest deleteRequest = new DeleteStoredScriptRequest(id); deleteRequest.masterNodeTimeout("50s"); deleteRequest.timeout("50s"); - - AcknowledgedResponse deleteResponse = execute(deleteRequest, highLevelClient()::deleteScript, - highLevelClient()::deleteScriptAsync); - - assertThat(deleteResponse.isAcknowledged(), equalTo(true)); + assertAcked(execute(deleteRequest, highLevelClient()::deleteScript, highLevelClient()::deleteScriptAsync)); GetStoredScriptRequest getRequest = new GetStoredScriptRequest(id); @@ -102,4 +81,21 @@ public class StoredScriptsIT extends ESRestHighLevelClientTestCase { highLevelClient()::getScriptAsync)); assertThat(statusException.status(), equalTo(RestStatus.NOT_FOUND)); } + + public void testPutScript() throws Exception { + final StoredScriptSource scriptSource = + new StoredScriptSource("painless", + "Math.log(_score * 2) + params.my_modifier", + Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType())); + + PutStoredScriptRequest request = + new PutStoredScriptRequest(id, "search", new BytesArray("{}"), XContentType.JSON, scriptSource); + assertAcked(execute(request, highLevelClient()::putScript, highLevelClient()::putScriptAsync)); + + Map script = getAsMap("/_scripts/" + id); + assertThat(extractValue("_id", script), equalTo(id)); + assertThat(extractValue("found", script), equalTo(true)); + assertThat(extractValue("script.lang", script), equalTo("painless")); + assertThat(extractValue("script.source", script), equalTo("Math.log(_score * 2) + params.my_modifier")); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksRequestConvertersTests.java new file mode 100644 index 00000000000..ff6726faee1 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksRequestConvertersTests.java @@ -0,0 +1,115 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.test.ESTestCase; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class TasksRequestConvertersTests extends ESTestCase { + + public void testCancelTasks() { + CancelTasksRequest request = new CancelTasksRequest(); + Map expectedParams = new HashMap<>(); + TaskId taskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong()); + TaskId parentTaskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong()); + request.setTaskId(taskId); + request.setParentTaskId(parentTaskId); + expectedParams.put("task_id", taskId.toString()); + expectedParams.put("parent_task_id", parentTaskId.toString()); + Request httpRequest = TasksRequestConverters.cancelTasks(request); + assertThat(httpRequest, notNullValue()); + assertThat(httpRequest.getMethod(), equalTo(HttpPost.METHOD_NAME)); + assertThat(httpRequest.getEntity(), nullValue()); + assertThat(httpRequest.getEndpoint(), equalTo("/_tasks/_cancel")); + assertThat(httpRequest.getParameters(), equalTo(expectedParams)); + } + + public void testListTasks() { + { + ListTasksRequest request = new ListTasksRequest(); + Map expectedParams = new HashMap<>(); + if (randomBoolean()) { + request.setDetailed(randomBoolean()); + if (request.getDetailed()) { + expectedParams.put("detailed", "true"); + } + } + if (randomBoolean()) { + request.setWaitForCompletion(randomBoolean()); + if (request.getWaitForCompletion()) { + expectedParams.put("wait_for_completion", "true"); + } + } + if (randomBoolean()) { + String timeout = randomTimeValue(); + request.setTimeout(timeout); + expectedParams.put("timeout", timeout); + } + if (randomBoolean()) { + if (randomBoolean()) { + TaskId taskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong()); + request.setParentTaskId(taskId); + expectedParams.put("parent_task_id", taskId.toString()); + } else { + request.setParentTask(TaskId.EMPTY_TASK_ID); + } + } + if (randomBoolean()) { + String[] nodes = generateRandomStringArray(10, 8, false); + request.setNodes(nodes); + if (nodes.length > 0) { + expectedParams.put("nodes", String.join(",", nodes)); + } + } + if (randomBoolean()) { + String[] actions = generateRandomStringArray(10, 8, false); + request.setActions(actions); + if (actions.length > 0) { + expectedParams.put("actions", String.join(",", actions)); + } + } + expectedParams.put("group_by", "none"); + Request httpRequest = TasksRequestConverters.listTasks(request); + assertThat(httpRequest, notNullValue()); + assertThat(httpRequest.getMethod(), equalTo(HttpGet.METHOD_NAME)); + assertThat(httpRequest.getEntity(), nullValue()); + assertThat(httpRequest.getEndpoint(), equalTo("/_tasks")); + assertThat(httpRequest.getParameters(), equalTo(expectedParams)); + } + { + ListTasksRequest request = new ListTasksRequest(); + request.setTaskId(new TaskId(randomAlphaOfLength(5), randomNonNegativeLong())); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () + -> TasksRequestConverters.listTasks(request)); + assertEquals("TaskId cannot be used for list tasks request", exception.getMessage()); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherRequestConvertersTests.java new file mode 100644 index 00000000000..cf5af1dd594 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherRequestConvertersTests.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; +import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; +import org.elasticsearch.test.ESTestCase; + +import java.io.ByteArrayOutputStream; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class WatcherRequestConvertersTests extends ESTestCase { + + public void testPutWatch() throws Exception { + PutWatchRequest putWatchRequest = new PutWatchRequest(); + String watchId = randomAlphaOfLength(10); + putWatchRequest.setId(watchId); + String body = randomAlphaOfLength(20); + putWatchRequest.setSource(new BytesArray(body), XContentType.JSON); + + Map expectedParams = new HashMap<>(); + if (randomBoolean()) { + putWatchRequest.setActive(false); + expectedParams.put("active", "false"); + } + + if (randomBoolean()) { + long version = randomLongBetween(10, 100); + putWatchRequest.setVersion(version); + expectedParams.put("version", String.valueOf(version)); + } + + Request request = WatcherRequestConverters.putWatch(putWatchRequest); + assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/watcher/watch/" + watchId, request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + assertThat(request.getEntity().getContentType().getValue(), is(XContentType.JSON.mediaTypeWithoutParameters())); + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + request.getEntity().writeTo(bos); + assertThat(bos.toString("UTF-8"), is(body)); + } + + public void testDeleteWatch() { + DeleteWatchRequest deleteWatchRequest = new DeleteWatchRequest(); + String watchId = randomAlphaOfLength(10); + deleteWatchRequest.setId(watchId); + + Request request = WatcherRequestConverters.deleteWatch(deleteWatchRequest); + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/watcher/watch/" + watchId, request.getEndpoint()); + assertThat(request.getEntity(), nullValue()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/XPackRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/XPackRequestConvertersTests.java new file mode 100644 index 00000000000..d2f20273d4d --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/XPackRequestConvertersTests.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpGet; +import org.elasticsearch.protocol.xpack.XPackInfoRequest; +import org.elasticsearch.test.ESTestCase; +import org.junit.Assert; + +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Map; + +public class XPackRequestConvertersTests extends ESTestCase { + + public void testXPackInfo() { + XPackInfoRequest infoRequest = new XPackInfoRequest(); + Map expectedParams = new HashMap<>(); + infoRequest.setVerbose(ESTestCase.randomBoolean()); + if (false == infoRequest.isVerbose()) { + expectedParams.put("human", "false"); + } + int option = ESTestCase.between(0, 2); + switch (option) { + case 0: + infoRequest.setCategories(EnumSet.allOf(XPackInfoRequest.Category.class)); + break; + case 1: + infoRequest.setCategories(EnumSet.of(XPackInfoRequest.Category.FEATURES)); + expectedParams.put("categories", "features"); + break; + case 2: + infoRequest.setCategories(EnumSet.of(XPackInfoRequest.Category.FEATURES, XPackInfoRequest.Category.BUILD)); + expectedParams.put("categories", "build,features"); + break; + default: + throw new IllegalArgumentException("invalid option [" + option + "]"); + } + + Request request = XPackRequestConverters.info(infoRequest); + Assert.assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + Assert.assertEquals("/_xpack", request.getEndpoint()); + Assert.assertNull(request.getEntity()); + Assert.assertEquals(expectedParams, request.getParameters()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java index ad41c139ddc..142eacd820f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java @@ -39,6 +39,7 @@ import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.replication.ReplicationResponse; @@ -50,6 +51,8 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -59,13 +62,24 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.DeleteByQueryRequest; +import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.index.reindex.RemoteInfo; +import org.elasticsearch.index.reindex.ScrollableHitSource; +import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.search.sort.SortOrder; +import java.util.Collections; import java.util.Date; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -750,6 +764,370 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { } } + public void testReindex() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + String mapping = + "\"doc\": {\n" + + " \"properties\": {\n" + + " \"user\": {\n" + + " \"type\": \"text\"\n" + + " },\n" + + " \"field1\": {\n" + + " \"type\": \"integer\"\n" + + " },\n" + + " \"field2\": {\n" + + " \"type\": \"integer\"\n" + + " }\n" + + " }\n" + + " }"; + createIndex("source1", Settings.EMPTY, mapping); + createIndex("source2", Settings.EMPTY, mapping); + createPipeline("my_pipeline"); + } + { + // tag::reindex-request + ReindexRequest request = new ReindexRequest(); // <1> + request.setSourceIndices("source1", "source2"); // <2> + request.setDestIndex("dest"); // <3> + // end::reindex-request + // tag::reindex-request-versionType + request.setDestVersionType(VersionType.EXTERNAL); // <1> + // end::reindex-request-versionType + // tag::reindex-request-opType + request.setDestOpType("create"); // <1> + // end::reindex-request-opType + // tag::reindex-request-conflicts + request.setConflicts("proceed"); // <1> + // end::reindex-request-conflicts + // tag::reindex-request-typeOrQuery + request.setSourceDocTypes("doc"); // <1> + request.setSourceQuery(new TermQueryBuilder("user", "kimchy")); // <2> + // end::reindex-request-typeOrQuery + // tag::reindex-request-size + request.setSize(10); // <1> + // end::reindex-request-size + // tag::reindex-request-sourceSize + request.setSourceBatchSize(100); // <1> + // end::reindex-request-sourceSize + // tag::reindex-request-pipeline + request.setDestPipeline("my_pipeline"); // <1> + // end::reindex-request-pipeline + // tag::reindex-request-sort + request.addSortField("field1", SortOrder.DESC); // <1> + request.addSortField("field2", SortOrder.ASC); // <2> + // end::reindex-request-sort + // tag::reindex-request-script + request.setScript( + new Script( + ScriptType.INLINE, "painless", + "if (ctx._source.user == 'kimchy') {ctx._source.likes++;}", + Collections.emptyMap())); // <1> + // end::reindex-request-script + // tag::reindex-request-remote + request.setRemoteInfo( + new RemoteInfo( + "https", "localhost", 9002, null, new BytesArray(new MatchAllQueryBuilder().toString()), + "user", "pass", Collections.emptyMap(), new TimeValue(100, TimeUnit.MILLISECONDS), + new TimeValue(100, TimeUnit.SECONDS) + ) + ); // <1> + // end::reindex-request-remote + request.setRemoteInfo(null); // Remove it for tests + // tag::reindex-request-timeout + request.setTimeout(TimeValue.timeValueMinutes(2)); // <1> + // end::reindex-request-timeout + // tag::reindex-request-refresh + request.setRefresh(true); // <1> + // end::reindex-request-refresh + // tag::reindex-request-slices + request.setSlices(2); // <1> + // end::reindex-request-slices + // tag::reindex-request-scroll + request.setScroll(TimeValue.timeValueMinutes(10)); // <1> + // end::reindex-request-scroll + + + // tag::reindex-execute + BulkByScrollResponse bulkResponse = client.reindex(request, RequestOptions.DEFAULT); + // end::reindex-execute + assertSame(0, bulkResponse.getSearchFailures().size()); + assertSame(0, bulkResponse.getBulkFailures().size()); + // tag::reindex-response + TimeValue timeTaken = bulkResponse.getTook(); // <1> + boolean timedOut = bulkResponse.isTimedOut(); // <2> + long totalDocs = bulkResponse.getTotal(); // <3> + long updatedDocs = bulkResponse.getUpdated(); // <4> + long createdDocs = bulkResponse.getCreated(); // <5> + long deletedDocs = bulkResponse.getDeleted(); // <6> + long batches = bulkResponse.getBatches(); // <7> + long noops = bulkResponse.getNoops(); // <8> + long versionConflicts = bulkResponse.getVersionConflicts(); // <9> + long bulkRetries = bulkResponse.getBulkRetries(); // <10> + long searchRetries = bulkResponse.getSearchRetries(); // <11> + TimeValue throttledMillis = bulkResponse.getStatus().getThrottled(); // <12> + TimeValue throttledUntilMillis = bulkResponse.getStatus().getThrottledUntil(); // <13> + List searchFailures = bulkResponse.getSearchFailures(); // <14> + List bulkFailures = bulkResponse.getBulkFailures(); // <15> + // end::reindex-response + } + { + ReindexRequest request = new ReindexRequest(); + request.setSourceIndices("source1"); + request.setDestIndex("dest"); + + // tag::reindex-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(BulkByScrollResponse bulkResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::reindex-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::reindex-execute-async + client.reindexAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::reindex-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testUpdateByQuery() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + String mapping = + "\"doc\": {\n" + + " \"properties\": {\n" + + " \"user\": {\n" + + " \"type\": \"text\"\n" + + " },\n" + + " \"field1\": {\n" + + " \"type\": \"integer\"\n" + + " },\n" + + " \"field2\": {\n" + + " \"type\": \"integer\"\n" + + " }\n" + + " }\n" + + " }"; + createIndex("source1", Settings.EMPTY, mapping); + createIndex("source2", Settings.EMPTY, mapping); + createPipeline("my_pipeline"); + } + { + // tag::update-by-query-request + UpdateByQueryRequest request = new UpdateByQueryRequest("source1", "source2"); // <1> + // end::update-by-query-request + // tag::update-by-query-request-conflicts + request.setConflicts("proceed"); // <1> + // end::update-by-query-request-conflicts + // tag::update-by-query-request-typeOrQuery + request.setDocTypes("doc"); // <1> + request.setQuery(new TermQueryBuilder("user", "kimchy")); // <2> + // end::update-by-query-request-typeOrQuery + // tag::update-by-query-request-size + request.setSize(10); // <1> + // end::update-by-query-request-size + // tag::update-by-query-request-scrollSize + request.setBatchSize(100); // <1> + // end::update-by-query-request-scrollSize + // tag::update-by-query-request-pipeline + request.setPipeline("my_pipeline"); // <1> + // end::update-by-query-request-pipeline + // tag::update-by-query-request-script + request.setScript( + new Script( + ScriptType.INLINE, "painless", + "if (ctx._source.user == 'kimchy') {ctx._source.likes++;}", + Collections.emptyMap())); // <1> + // end::update-by-query-request-script + // tag::update-by-query-request-timeout + request.setTimeout(TimeValue.timeValueMinutes(2)); // <1> + // end::update-by-query-request-timeout + // tag::update-by-query-request-refresh + request.setRefresh(true); // <1> + // end::update-by-query-request-refresh + // tag::update-by-query-request-slices + request.setSlices(2); // <1> + // end::update-by-query-request-slices + // tag::update-by-query-request-scroll + request.setScroll(TimeValue.timeValueMinutes(10)); // <1> + // end::update-by-query-request-scroll + // tag::update-by-query-request-routing + request.setRouting("=cat"); // <1> + // end::update-by-query-request-routing + // tag::update-by-query-request-indicesOptions + request.setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); // <1> + // end::update-by-query-request-indicesOptions + + // tag::update-by-query-execute + BulkByScrollResponse bulkResponse = client.updateByQuery(request, RequestOptions.DEFAULT); + // end::update-by-query-execute + assertSame(0, bulkResponse.getSearchFailures().size()); + assertSame(0, bulkResponse.getBulkFailures().size()); + // tag::update-by-query-response + TimeValue timeTaken = bulkResponse.getTook(); // <1> + boolean timedOut = bulkResponse.isTimedOut(); // <2> + long totalDocs = bulkResponse.getTotal(); // <3> + long updatedDocs = bulkResponse.getUpdated(); // <4> + long deletedDocs = bulkResponse.getDeleted(); // <5> + long batches = bulkResponse.getBatches(); // <6> + long noops = bulkResponse.getNoops(); // <7> + long versionConflicts = bulkResponse.getVersionConflicts(); // <8> + long bulkRetries = bulkResponse.getBulkRetries(); // <9> + long searchRetries = bulkResponse.getSearchRetries(); // <10> + TimeValue throttledMillis = bulkResponse.getStatus().getThrottled(); // <11> + TimeValue throttledUntilMillis = bulkResponse.getStatus().getThrottledUntil(); // <12> + List searchFailures = bulkResponse.getSearchFailures(); // <13> + List bulkFailures = bulkResponse.getBulkFailures(); // <14> + // end::update-by-query-response + } + { + UpdateByQueryRequest request = new UpdateByQueryRequest(); + request.indices("source1"); + + // tag::update-by-query-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(BulkByScrollResponse bulkResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::update-by-query-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::update-by-query-execute-async + client.updateByQueryAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::update-by-query-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testDeleteByQuery() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + String mapping = + "\"doc\": {\n" + + " \"properties\": {\n" + + " \"user\": {\n" + + " \"type\": \"text\"\n" + + " },\n" + + " \"field1\": {\n" + + " \"type\": \"integer\"\n" + + " },\n" + + " \"field2\": {\n" + + " \"type\": \"integer\"\n" + + " }\n" + + " }\n" + + " }"; + createIndex("source1", Settings.EMPTY, mapping); + createIndex("source2", Settings.EMPTY, mapping); + } + { + // tag::delete-by-query-request + DeleteByQueryRequest request = new DeleteByQueryRequest("source1", "source2"); // <1> + // end::delete-by-query-request + // tag::delete-by-query-request-conflicts + request.setConflicts("proceed"); // <1> + // end::delete-by-query-request-conflicts + // tag::delete-by-query-request-typeOrQuery + request.setDocTypes("doc"); // <1> + request.setQuery(new TermQueryBuilder("user", "kimchy")); // <2> + // end::delete-by-query-request-typeOrQuery + // tag::delete-by-query-request-size + request.setSize(10); // <1> + // end::delete-by-query-request-size + // tag::delete-by-query-request-scrollSize + request.setBatchSize(100); // <1> + // end::delete-by-query-request-scrollSize + // tag::delete-by-query-request-timeout + request.setTimeout(TimeValue.timeValueMinutes(2)); // <1> + // end::delete-by-query-request-timeout + // tag::delete-by-query-request-refresh + request.setRefresh(true); // <1> + // end::delete-by-query-request-refresh + // tag::delete-by-query-request-slices + request.setSlices(2); // <1> + // end::delete-by-query-request-slices + // tag::delete-by-query-request-scroll + request.setScroll(TimeValue.timeValueMinutes(10)); // <1> + // end::delete-by-query-request-scroll + // tag::delete-by-query-request-routing + request.setRouting("=cat"); // <1> + // end::delete-by-query-request-routing + // tag::delete-by-query-request-indicesOptions + request.setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); // <1> + // end::delete-by-query-request-indicesOptions + + // tag::delete-by-query-execute + BulkByScrollResponse bulkResponse = client.deleteByQuery(request, RequestOptions.DEFAULT); + // end::delete-by-query-execute + assertSame(0, bulkResponse.getSearchFailures().size()); + assertSame(0, bulkResponse.getBulkFailures().size()); + // tag::delete-by-query-response + TimeValue timeTaken = bulkResponse.getTook(); // <1> + boolean timedOut = bulkResponse.isTimedOut(); // <2> + long totalDocs = bulkResponse.getTotal(); // <3> + long deletedDocs = bulkResponse.getDeleted(); // <4> + long batches = bulkResponse.getBatches(); // <5> + long noops = bulkResponse.getNoops(); // <6> + long versionConflicts = bulkResponse.getVersionConflicts(); // <7> + long bulkRetries = bulkResponse.getBulkRetries(); // <8> + long searchRetries = bulkResponse.getSearchRetries(); // <9> + TimeValue throttledMillis = bulkResponse.getStatus().getThrottled(); // <10> + TimeValue throttledUntilMillis = bulkResponse.getStatus().getThrottledUntil(); // <11> + List searchFailures = bulkResponse.getSearchFailures(); // <12> + List bulkFailures = bulkResponse.getBulkFailures(); // <13> + // end::delete-by-query-response + } + { + DeleteByQueryRequest request = new DeleteByQueryRequest(); + request.indices("source1"); + + // tag::delete-by-query-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(BulkByScrollResponse bulkResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::delete-by-query-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::delete-by-query-execute-async + client.deleteByQueryAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::delete-by-query-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testGet() throws Exception { RestHighLevelClient client = highLevelClient(); { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/GraphDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/GraphDocumentationIT.java new file mode 100644 index 00000000000..8631e18b873 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/GraphDocumentationIT.java @@ -0,0 +1,125 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.documentation; + +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.protocol.xpack.graph.Connection; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; +import org.elasticsearch.protocol.xpack.graph.Hop; +import org.elasticsearch.protocol.xpack.graph.Vertex; +import org.elasticsearch.protocol.xpack.graph.VertexRequest; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; + +public class GraphDocumentationIT extends ESRestHighLevelClientTestCase { + + + @Before + public void indexDocuments() throws IOException { + // Create chain of doc IDs across indices 1->2->3 + Request doc1 = new Request(HttpPut.METHOD_NAME, "/index1/type/1"); + doc1.setJsonEntity("{ \"participants\":[1,2], \"text\":\"let's start projectx\", \"attachment_md5\":\"324FHDGHFDG4564\"}"); + client().performRequest(doc1); + + Request doc2 = new Request(HttpPut.METHOD_NAME, "/index2/type/2"); + doc2.setJsonEntity("{\"participants\":[2,3,4], \"text\":\"got something you both may be interested in\"}"); + client().performRequest(doc2); + + client().performRequest(new Request(HttpPost.METHOD_NAME, "/_refresh")); + } + + @SuppressForbidden(reason = "system out is ok for a documentation example") + public void testExplore() throws Exception { + RestHighLevelClient client = highLevelClient(); + + + + // tag::x-pack-graph-explore-request + GraphExploreRequest request = new GraphExploreRequest(); + request.indices("index1", "index2"); + request.useSignificance(false); + TermQueryBuilder startingQuery = new TermQueryBuilder("text", "projectx"); + + Hop hop1 = request.createNextHop(startingQuery); // <1> + VertexRequest people = hop1.addVertexRequest("participants"); // <2> + people.minDocCount(1); + VertexRequest files = hop1.addVertexRequest("attachment_md5"); + files.minDocCount(1); + + Hop hop2 = request.createNextHop(null); // <3> + VertexRequest vr2 = hop2.addVertexRequest("participants"); + vr2.minDocCount(5); + + GraphExploreResponse exploreResponse = client.graph().explore(request, RequestOptions.DEFAULT); // <4> + // end::x-pack-graph-explore-request + + + // tag::x-pack-graph-explore-response + Collection v = exploreResponse.getVertices(); + Collection c = exploreResponse.getConnections(); + for (Vertex vertex : v) { + System.out.println(vertex.getField() + ":" + vertex.getTerm() + // <1> + " discovered at hop depth " + vertex.getHopDepth()); + } + for (Connection link : c) { + System.out.println(link.getFrom() + " -> " + link.getTo() // <2> + + " evidenced by " + link.getDocCount() + " docs"); + } + // end::x-pack-graph-explore-response + + + Collection initialVertices = exploreResponse.getVertices(); + + // tag::x-pack-graph-explore-expand + GraphExploreRequest expandRequest = new GraphExploreRequest(); + expandRequest.indices("index1", "index2"); + + + Hop expandHop1 = expandRequest.createNextHop(null); // <1> + VertexRequest fromPeople = expandHop1.addVertexRequest("participants"); // <2> + for (Vertex vertex : initialVertices) { + if (vertex.getField().equals("participants")) { + fromPeople.addInclude(vertex.getTerm(), 1f); + } + } + + Hop expandHop2 = expandRequest.createNextHop(null); + VertexRequest newPeople = expandHop2.addVertexRequest("participants"); // <3> + for (Vertex vertex : initialVertices) { + if (vertex.getField().equals("participants")) { + newPeople.addExclude(vertex.getTerm()); + } + } + + GraphExploreResponse expandResponse = client.graph().explore(expandRequest, RequestOptions.DEFAULT); + // end::x-pack-graph-explore-expand + + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index a77d8b43e57..9abef54d0d2 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -20,32 +20,87 @@ package org.elasticsearch.client.documentation; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.MachineLearningGetResultsIT; import org.elasticsearch.client.MachineLearningIT; +import org.elasticsearch.client.MlRestTestStateCleaner; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.ml.CloseJobRequest; +import org.elasticsearch.client.ml.CloseJobResponse; +import org.elasticsearch.client.ml.DeleteJobRequest; +import org.elasticsearch.client.ml.DeleteJobResponse; +import org.elasticsearch.client.ml.FlushJobRequest; +import org.elasticsearch.client.ml.FlushJobResponse; +import org.elasticsearch.client.ml.ForecastJobRequest; +import org.elasticsearch.client.ml.ForecastJobResponse; +import org.elasticsearch.client.ml.GetBucketsRequest; +import org.elasticsearch.client.ml.GetBucketsResponse; +import org.elasticsearch.client.ml.GetInfluencersRequest; +import org.elasticsearch.client.ml.GetInfluencersResponse; +import org.elasticsearch.client.ml.GetJobRequest; +import org.elasticsearch.client.ml.GetJobResponse; +import org.elasticsearch.client.ml.GetJobStatsRequest; +import org.elasticsearch.client.ml.GetJobStatsResponse; +import org.elasticsearch.client.ml.GetOverallBucketsRequest; +import org.elasticsearch.client.ml.GetOverallBucketsResponse; +import org.elasticsearch.client.ml.GetRecordsRequest; +import org.elasticsearch.client.ml.GetRecordsResponse; +import org.elasticsearch.client.ml.OpenJobRequest; +import org.elasticsearch.client.ml.OpenJobResponse; +import org.elasticsearch.client.ml.PostDataRequest; +import org.elasticsearch.client.ml.PostDataResponse; +import org.elasticsearch.client.ml.PutJobRequest; +import org.elasticsearch.client.ml.PutJobResponse; +import org.elasticsearch.client.ml.UpdateJobRequest; +import org.elasticsearch.client.ml.job.config.AnalysisConfig; +import org.elasticsearch.client.ml.job.config.AnalysisLimits; +import org.elasticsearch.client.ml.job.config.DataDescription; +import org.elasticsearch.client.ml.job.config.DetectionRule; +import org.elasticsearch.client.ml.job.config.Detector; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.process.DataCounts; +import org.elasticsearch.client.ml.job.config.JobUpdate; +import org.elasticsearch.client.ml.job.config.ModelPlotConfig; +import org.elasticsearch.client.ml.job.config.Operator; +import org.elasticsearch.client.ml.job.config.RuleCondition; +import org.elasticsearch.client.ml.job.results.AnomalyRecord; +import org.elasticsearch.client.ml.job.results.Bucket; +import org.elasticsearch.client.ml.job.results.Influencer; +import org.elasticsearch.client.ml.job.results.OverallBucket; +import org.elasticsearch.client.ml.job.stats.JobStats; +import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; -import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse; -import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; -import org.elasticsearch.protocol.xpack.ml.OpenJobResponse; -import org.elasticsearch.protocol.xpack.ml.PutJobRequest; -import org.elasticsearch.protocol.xpack.ml.PutJobResponse; -import org.elasticsearch.protocol.xpack.ml.job.config.AnalysisConfig; -import org.elasticsearch.protocol.xpack.ml.job.config.DataDescription; -import org.elasticsearch.protocol.xpack.ml.job.config.Detector; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; +import org.elasticsearch.common.xcontent.XContentType; +import org.junit.After; +import java.io.IOException; +import java.util.Arrays; import java.util.Collections; import java.util.Date; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.core.Is.is; public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { + @After + public void cleanUp() throws IOException { + new MlRestTestStateCleaner(logger, client()).clearMlMetadata(); + } + public void testCreateJob() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -124,6 +179,63 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { } } + public void testGetJob() throws Exception { + RestHighLevelClient client = highLevelClient(); + + String jobId = "get-machine-learning-job1"; + + Job job = MachineLearningIT.buildJob("get-machine-learning-job1"); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + Job secondJob = MachineLearningIT.buildJob("get-machine-learning-job2"); + client.machineLearning().putJob(new PutJobRequest(secondJob), RequestOptions.DEFAULT); + + { + //tag::x-pack-ml-get-job-request + GetJobRequest request = new GetJobRequest("get-machine-learning-job1", "get-machine-learning-job*"); //<1> + request.setAllowNoJobs(true); //<2> + //end::x-pack-ml-get-job-request + + //tag::x-pack-ml-get-job-execute + GetJobResponse response = client.machineLearning().getJob(request, RequestOptions.DEFAULT); + long numberOfJobs = response.count(); //<1> + List jobs = response.jobs(); //<2> + //end::x-pack-ml-get-job-execute + + assertEquals(2, response.count()); + assertThat(response.jobs(), hasSize(2)); + assertThat(response.jobs().stream().map(Job::getId).collect(Collectors.toList()), + containsInAnyOrder(job.getId(), secondJob.getId())); + } + { + GetJobRequest request = new GetJobRequest("get-machine-learning-job1", "get-machine-learning-job*"); + + // tag::x-pack-ml-get-job-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(GetJobResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-ml-get-job-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-get-job-execute-async + client.machineLearning().getJobAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-ml-get-job-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testDeleteJob() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -221,4 +333,782 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } + + public void testCloseJob() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + Job job = MachineLearningIT.buildJob("closing-my-first-machine-learning-job"); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + client.machineLearning().openJob(new OpenJobRequest(job.getId()), RequestOptions.DEFAULT); + + //tag::x-pack-ml-close-job-request + CloseJobRequest closeJobRequest = new CloseJobRequest("closing-my-first-machine-learning-job", "otherjobs*"); //<1> + closeJobRequest.setForce(false); //<2> + closeJobRequest.setAllowNoJobs(true); //<3> + closeJobRequest.setTimeout(TimeValue.timeValueMinutes(10)); //<4> + //end::x-pack-ml-close-job-request + + //tag::x-pack-ml-close-job-execute + CloseJobResponse closeJobResponse = client.machineLearning().closeJob(closeJobRequest, RequestOptions.DEFAULT); + boolean isClosed = closeJobResponse.isClosed(); //<1> + //end::x-pack-ml-close-job-execute + + } + { + Job job = MachineLearningIT.buildJob("closing-my-second-machine-learning-job"); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + client.machineLearning().openJob(new OpenJobRequest(job.getId()), RequestOptions.DEFAULT); + + //tag::x-pack-ml-close-job-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(CloseJobResponse closeJobResponse) { + //<1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + //end::x-pack-ml-close-job-listener + CloseJobRequest closeJobRequest = new CloseJobRequest("closing-my-second-machine-learning-job"); + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-close-job-execute-async + client.machineLearning().closeJobAsync(closeJobRequest, RequestOptions.DEFAULT, listener); //<1> + // end::x-pack-ml-close-job-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testUpdateJob() throws Exception { + RestHighLevelClient client = highLevelClient(); + String jobId = "test-update-job"; + Job tempJob = MachineLearningIT.buildJob(jobId); + Job job = new Job.Builder(tempJob) + .setAnalysisConfig(new AnalysisConfig.Builder(tempJob.getAnalysisConfig()) + .setCategorizationFieldName("categorization-field") + .setDetector(0, + new Detector.Builder().setFieldName("total") + .setFunction("sum") + .setPartitionFieldName("mlcategory") + .setDetectorDescription(randomAlphaOfLength(10)) + .build())) + .build(); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + { + + List detectionRules = Arrays.asList( + new DetectionRule.Builder(Arrays.asList(RuleCondition.createTime(Operator.GT, 100L))).build()); + Map customSettings = new HashMap<>(); + customSettings.put("custom-setting-1", "custom-value"); + + //tag::x-pack-ml-update-job-detector-options + JobUpdate.DetectorUpdate detectorUpdate = new JobUpdate.DetectorUpdate(0, //<1> + "detector description", //<2> + detectionRules); //<3> + //end::x-pack-ml-update-job-detector-options + + //tag::x-pack-ml-update-job-options + JobUpdate update = new JobUpdate.Builder(jobId) //<1> + .setDescription("My description") //<2> + .setAnalysisLimits(new AnalysisLimits(1000L, null)) //<3> + .setBackgroundPersistInterval(TimeValue.timeValueHours(3)) //<4> + .setCategorizationFilters(Arrays.asList("categorization-filter")) //<5> + .setDetectorUpdates(Arrays.asList(detectorUpdate)) //<6> + .setGroups(Arrays.asList("job-group-1")) //<7> + .setResultsRetentionDays(10L) //<8> + .setModelPlotConfig(new ModelPlotConfig(true, null)) //<9> + .setModelSnapshotRetentionDays(7L) //<10> + .setCustomSettings(customSettings) //<11> + .setRenormalizationWindowDays(3L) //<12> + .build(); + //end::x-pack-ml-update-job-options + + + //tag::x-pack-ml-update-job-request + UpdateJobRequest updateJobRequest = new UpdateJobRequest(update); //<1> + //end::x-pack-ml-update-job-request + + //tag::x-pack-ml-update-job-execute + PutJobResponse updateJobResponse = client.machineLearning().updateJob(updateJobRequest, RequestOptions.DEFAULT); + //end::x-pack-ml-update-job-execute + //tag::x-pack-ml-update-job-response + Job updatedJob = updateJobResponse.getResponse(); //<1> + //end::x-pack-ml-update-job-response + + assertEquals(update.getDescription(), updatedJob.getDescription()); + } + { + //tag::x-pack-ml-update-job-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(PutJobResponse updateJobResponse) { + //<1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + //end::x-pack-ml-update-job-listener + UpdateJobRequest updateJobRequest = new UpdateJobRequest(new JobUpdate.Builder(jobId).build()); + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-update-job-execute-async + client.machineLearning().updateJobAsync(updateJobRequest, RequestOptions.DEFAULT, listener); //<1> + // end::x-pack-ml-update-job-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testGetBuckets() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + String jobId = "test-get-buckets"; + Job job = MachineLearningIT.buildJob(jobId); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + // Let us index a bucket + IndexRequest indexRequest = new IndexRequest(".ml-anomalies-shared", "doc"); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + indexRequest.source("{\"job_id\":\"test-get-buckets\", \"result_type\":\"bucket\", \"timestamp\": 1533081600000," + + "\"bucket_span\": 600,\"is_interim\": false, \"anomaly_score\": 80.0}", XContentType.JSON); + client.index(indexRequest, RequestOptions.DEFAULT); + + { + // tag::x-pack-ml-get-buckets-request + GetBucketsRequest request = new GetBucketsRequest(jobId); // <1> + // end::x-pack-ml-get-buckets-request + + // tag::x-pack-ml-get-buckets-timestamp + request.setTimestamp("2018-08-17T00:00:00Z"); // <1> + // end::x-pack-ml-get-buckets-timestamp + + // Set timestamp to null as it is incompatible with other args + request.setTimestamp(null); + + // tag::x-pack-ml-get-buckets-anomaly-score + request.setAnomalyScore(75.0); // <1> + // end::x-pack-ml-get-buckets-anomaly-score + + // tag::x-pack-ml-get-buckets-desc + request.setDescending(true); // <1> + // end::x-pack-ml-get-buckets-desc + + // tag::x-pack-ml-get-buckets-end + request.setEnd("2018-08-21T00:00:00Z"); // <1> + // end::x-pack-ml-get-buckets-end + + // tag::x-pack-ml-get-buckets-exclude-interim + request.setExcludeInterim(true); // <1> + // end::x-pack-ml-get-buckets-exclude-interim + + // tag::x-pack-ml-get-buckets-expand + request.setExpand(true); // <1> + // end::x-pack-ml-get-buckets-expand + + // tag::x-pack-ml-get-buckets-page + request.setPageParams(new PageParams(100, 200)); // <1> + // end::x-pack-ml-get-buckets-page + + // Set page params back to null so the response contains the bucket we indexed + request.setPageParams(null); + + // tag::x-pack-ml-get-buckets-sort + request.setSort("anomaly_score"); // <1> + // end::x-pack-ml-get-buckets-sort + + // tag::x-pack-ml-get-buckets-start + request.setStart("2018-08-01T00:00:00Z"); // <1> + // end::x-pack-ml-get-buckets-start + + // tag::x-pack-ml-get-buckets-execute + GetBucketsResponse response = client.machineLearning().getBuckets(request, RequestOptions.DEFAULT); + // end::x-pack-ml-get-buckets-execute + + // tag::x-pack-ml-get-buckets-response + long count = response.count(); // <1> + List buckets = response.buckets(); // <2> + // end::x-pack-ml-get-buckets-response + assertEquals(1, buckets.size()); + } + { + GetBucketsRequest request = new GetBucketsRequest(jobId); + + // tag::x-pack-ml-get-buckets-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(GetBucketsResponse getBucketsResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-ml-get-buckets-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-get-buckets-execute-async + client.machineLearning().getBucketsAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-ml-get-buckets-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testFlushJob() throws Exception { + RestHighLevelClient client = highLevelClient(); + + Job job = MachineLearningIT.buildJob("flushing-my-first-machine-learning-job"); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + client.machineLearning().openJob(new OpenJobRequest(job.getId()), RequestOptions.DEFAULT); + + Job secondJob = MachineLearningIT.buildJob("flushing-my-second-machine-learning-job"); + client.machineLearning().putJob(new PutJobRequest(secondJob), RequestOptions.DEFAULT); + client.machineLearning().openJob(new OpenJobRequest(secondJob.getId()), RequestOptions.DEFAULT); + + { + //tag::x-pack-ml-flush-job-request + FlushJobRequest flushJobRequest = new FlushJobRequest("flushing-my-first-machine-learning-job"); //<1> + //end::x-pack-ml-flush-job-request + + //tag::x-pack-ml-flush-job-request-options + flushJobRequest.setCalcInterim(true); //<1> + flushJobRequest.setAdvanceTime("2018-08-31T16:35:07+00:00"); //<2> + flushJobRequest.setStart("2018-08-31T16:35:17+00:00"); //<3> + flushJobRequest.setEnd("2018-08-31T16:35:27+00:00"); //<4> + flushJobRequest.setSkipTime("2018-08-31T16:35:00+00:00"); //<5> + //end::x-pack-ml-flush-job-request-options + + //tag::x-pack-ml-flush-job-execute + FlushJobResponse flushJobResponse = client.machineLearning().flushJob(flushJobRequest, RequestOptions.DEFAULT); + //end::x-pack-ml-flush-job-execute + + //tag::x-pack-ml-flush-job-response + boolean isFlushed = flushJobResponse.isFlushed(); //<1> + Date lastFinalizedBucketEnd = flushJobResponse.getLastFinalizedBucketEnd(); //<2> + //end::x-pack-ml-flush-job-response + + } + { + //tag::x-pack-ml-flush-job-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(FlushJobResponse FlushJobResponse) { + //<1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + //end::x-pack-ml-flush-job-listener + FlushJobRequest flushJobRequest = new FlushJobRequest("flushing-my-second-machine-learning-job"); + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-flush-job-execute-async + client.machineLearning().flushJobAsync(flushJobRequest, RequestOptions.DEFAULT, listener); //<1> + // end::x-pack-ml-flush-job-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + + public void testGetJobStats() throws Exception { + RestHighLevelClient client = highLevelClient(); + + Job job = MachineLearningIT.buildJob("get-machine-learning-job-stats1"); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + Job secondJob = MachineLearningIT.buildJob("get-machine-learning-job-stats2"); + client.machineLearning().putJob(new PutJobRequest(secondJob), RequestOptions.DEFAULT); + + { + //tag::x-pack-ml-get-job-stats-request + GetJobStatsRequest request = new GetJobStatsRequest("get-machine-learning-job-stats1", "get-machine-learning-job-*"); //<1> + request.setAllowNoJobs(true); //<2> + //end::x-pack-ml-get-job-stats-request + + //tag::x-pack-ml-get-job-stats-execute + GetJobStatsResponse response = client.machineLearning().getJobStats(request, RequestOptions.DEFAULT); + //end::x-pack-ml-get-job-stats-execute + + //tag::x-pack-ml-get-job-stats-response + long numberOfJobStats = response.count(); //<1> + List jobStats = response.jobStats(); //<2> + //end::x-pack-ml-get-job-stats-response + + assertEquals(2, response.count()); + assertThat(response.jobStats(), hasSize(2)); + assertThat(response.jobStats().stream().map(JobStats::getJobId).collect(Collectors.toList()), + containsInAnyOrder(job.getId(), secondJob.getId())); + } + { + GetJobStatsRequest request = new GetJobStatsRequest("get-machine-learning-job-stats1", "get-machine-learning-job-*"); + + // tag::x-pack-ml-get-job-stats-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(GetJobStatsResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-ml-get-job-stats-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-get-job-stats-execute-async + client.machineLearning().getJobStatsAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-ml-get-job-stats-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testForecastJob() throws Exception { + RestHighLevelClient client = highLevelClient(); + + Job job = MachineLearningIT.buildJob("forecasting-my-first-machine-learning-job"); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + client.machineLearning().openJob(new OpenJobRequest(job.getId()), RequestOptions.DEFAULT); + + PostDataRequest.JsonBuilder builder = new PostDataRequest.JsonBuilder(); + for(int i = 0; i < 30; i++) { + Map hashMap = new HashMap<>(); + hashMap.put("total", randomInt(1000)); + hashMap.put("timestamp", (i+1)*1000); + builder.addDoc(hashMap); + } + PostDataRequest postDataRequest = new PostDataRequest(job.getId(), builder); + client.machineLearning().postData(postDataRequest, RequestOptions.DEFAULT); + client.machineLearning().flushJob(new FlushJobRequest(job.getId()), RequestOptions.DEFAULT); + + { + //tag::x-pack-ml-forecast-job-request + ForecastJobRequest forecastJobRequest = new ForecastJobRequest("forecasting-my-first-machine-learning-job"); //<1> + //end::x-pack-ml-forecast-job-request + + //tag::x-pack-ml-forecast-job-request-options + forecastJobRequest.setExpiresIn(TimeValue.timeValueHours(48)); //<1> + forecastJobRequest.setDuration(TimeValue.timeValueHours(24)); //<2> + //end::x-pack-ml-forecast-job-request-options + + //tag::x-pack-ml-forecast-job-execute + ForecastJobResponse forecastJobResponse = client.machineLearning().forecastJob(forecastJobRequest, RequestOptions.DEFAULT); + //end::x-pack-ml-forecast-job-execute + + //tag::x-pack-ml-forecast-job-response + boolean isAcknowledged = forecastJobResponse.isAcknowledged(); //<1> + String forecastId = forecastJobResponse.getForecastId(); //<2> + //end::x-pack-ml-forecast-job-response + assertTrue(isAcknowledged); + assertNotNull(forecastId); + } + { + //tag::x-pack-ml-forecast-job-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(ForecastJobResponse forecastJobResponse) { + //<1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + //end::x-pack-ml-forecast-job-listener + ForecastJobRequest forecastJobRequest = new ForecastJobRequest("forecasting-my-first-machine-learning-job"); + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-forecast-job-execute-async + client.machineLearning().forecastJobAsync(forecastJobRequest, RequestOptions.DEFAULT, listener); //<1> + // end::x-pack-ml-forecast-job-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testGetOverallBuckets() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + String jobId1 = "test-get-overall-buckets-1"; + String jobId2 = "test-get-overall-buckets-2"; + Job job1 = MachineLearningGetResultsIT.buildJob(jobId1); + Job job2 = MachineLearningGetResultsIT.buildJob(jobId2); + client.machineLearning().putJob(new PutJobRequest(job1), RequestOptions.DEFAULT); + client.machineLearning().putJob(new PutJobRequest(job2), RequestOptions.DEFAULT); + + // Let us index some buckets + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + + { + IndexRequest indexRequest = new IndexRequest(".ml-anomalies-shared", "doc"); + indexRequest.source("{\"job_id\":\"test-get-overall-buckets-1\", \"result_type\":\"bucket\", \"timestamp\": 1533081600000," + + "\"bucket_span\": 600,\"is_interim\": false, \"anomaly_score\": 60.0}", XContentType.JSON); + bulkRequest.add(indexRequest); + } + { + IndexRequest indexRequest = new IndexRequest(".ml-anomalies-shared", "doc"); + indexRequest.source("{\"job_id\":\"test-get-overall-buckets-2\", \"result_type\":\"bucket\", \"timestamp\": 1533081600000," + + "\"bucket_span\": 3600,\"is_interim\": false, \"anomaly_score\": 100.0}", XContentType.JSON); + bulkRequest.add(indexRequest); + } + + client.bulk(bulkRequest, RequestOptions.DEFAULT); + + { + // tag::x-pack-ml-get-overall-buckets-request + GetOverallBucketsRequest request = new GetOverallBucketsRequest(jobId1, jobId2); // <1> + // end::x-pack-ml-get-overall-buckets-request + + // tag::x-pack-ml-get-overall-buckets-bucket-span + request.setBucketSpan(TimeValue.timeValueHours(24)); // <1> + // end::x-pack-ml-get-overall-buckets-bucket-span + + // tag::x-pack-ml-get-overall-buckets-end + request.setEnd("2018-08-21T00:00:00Z"); // <1> + // end::x-pack-ml-get-overall-buckets-end + + // tag::x-pack-ml-get-overall-buckets-exclude-interim + request.setExcludeInterim(true); // <1> + // end::x-pack-ml-get-overall-buckets-exclude-interim + + // tag::x-pack-ml-get-overall-buckets-overall-score + request.setOverallScore(75.0); // <1> + // end::x-pack-ml-get-overall-buckets-overall-score + + // tag::x-pack-ml-get-overall-buckets-start + request.setStart("2018-08-01T00:00:00Z"); // <1> + // end::x-pack-ml-get-overall-buckets-start + + // tag::x-pack-ml-get-overall-buckets-top-n + request.setTopN(2); // <1> + // end::x-pack-ml-get-overall-buckets-top-n + + // tag::x-pack-ml-get-overall-buckets-execute + GetOverallBucketsResponse response = client.machineLearning().getOverallBuckets(request, RequestOptions.DEFAULT); + // end::x-pack-ml-get-overall-buckets-execute + + // tag::x-pack-ml-get-overall-buckets-response + long count = response.count(); // <1> + List overallBuckets = response.overallBuckets(); // <2> + // end::x-pack-ml-get-overall-buckets-response + + assertEquals(1, overallBuckets.size()); + assertThat(overallBuckets.get(0).getOverallScore(), is(closeTo(80.0, 0.001))); + + } + { + GetOverallBucketsRequest request = new GetOverallBucketsRequest(jobId1, jobId2); + + // tag::x-pack-ml-get-overall-buckets-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(GetOverallBucketsResponse getOverallBucketsResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-ml-get-overall-buckets-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-get-overall-buckets-execute-async + client.machineLearning().getOverallBucketsAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-ml-get-overall-buckets-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testGetRecords() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + String jobId = "test-get-records"; + Job job = MachineLearningIT.buildJob(jobId); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + // Let us index a record + IndexRequest indexRequest = new IndexRequest(".ml-anomalies-shared", "doc"); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + indexRequest.source("{\"job_id\":\"test-get-records\", \"result_type\":\"record\", \"timestamp\": 1533081600000," + + "\"bucket_span\": 600,\"is_interim\": false, \"record_score\": 80.0}", XContentType.JSON); + client.index(indexRequest, RequestOptions.DEFAULT); + + { + // tag::x-pack-ml-get-records-request + GetRecordsRequest request = new GetRecordsRequest(jobId); // <1> + // end::x-pack-ml-get-records-request + + // tag::x-pack-ml-get-records-desc + request.setDescending(true); // <1> + // end::x-pack-ml-get-records-desc + + // tag::x-pack-ml-get-records-end + request.setEnd("2018-08-21T00:00:00Z"); // <1> + // end::x-pack-ml-get-records-end + + // tag::x-pack-ml-get-records-exclude-interim + request.setExcludeInterim(true); // <1> + // end::x-pack-ml-get-records-exclude-interim + + // tag::x-pack-ml-get-records-page + request.setPageParams(new PageParams(100, 200)); // <1> + // end::x-pack-ml-get-records-page + + // Set page params back to null so the response contains the record we indexed + request.setPageParams(null); + + // tag::x-pack-ml-get-records-record-score + request.setRecordScore(75.0); // <1> + // end::x-pack-ml-get-records-record-score + + // tag::x-pack-ml-get-records-sort + request.setSort("probability"); // <1> + // end::x-pack-ml-get-records-sort + + // tag::x-pack-ml-get-records-start + request.setStart("2018-08-01T00:00:00Z"); // <1> + // end::x-pack-ml-get-records-start + + // tag::x-pack-ml-get-records-execute + GetRecordsResponse response = client.machineLearning().getRecords(request, RequestOptions.DEFAULT); + // end::x-pack-ml-get-records-execute + + // tag::x-pack-ml-get-records-response + long count = response.count(); // <1> + List records = response.records(); // <2> + // end::x-pack-ml-get-records-response + assertEquals(1, records.size()); + } + { + GetRecordsRequest request = new GetRecordsRequest(jobId); + + // tag::x-pack-ml-get-records-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(GetRecordsResponse getRecordsResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-ml-get-records-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-get-records-execute-async + client.machineLearning().getRecordsAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-ml-get-records-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testPostData() throws Exception { + RestHighLevelClient client = highLevelClient(); + + Job job = MachineLearningIT.buildJob("test-post-data"); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + client.machineLearning().openJob(new OpenJobRequest(job.getId()), RequestOptions.DEFAULT); + + { + //tag::x-pack-ml-post-data-request + PostDataRequest.JsonBuilder jsonBuilder = new PostDataRequest.JsonBuilder(); //<1> + Map mapData = new HashMap<>(); + mapData.put("total", 109); + jsonBuilder.addDoc(mapData); //<2> + jsonBuilder.addDoc("{\"total\":1000}"); //<3> + PostDataRequest postDataRequest = new PostDataRequest("test-post-data", jsonBuilder); //<4> + //end::x-pack-ml-post-data-request + + + //tag::x-pack-ml-post-data-request-options + postDataRequest.setResetStart("2018-08-31T16:35:07+00:00"); //<1> + postDataRequest.setResetEnd("2018-08-31T16:35:17+00:00"); //<2> + //end::x-pack-ml-post-data-request-options + postDataRequest.setResetEnd(null); + postDataRequest.setResetStart(null); + + //tag::x-pack-ml-post-data-execute + PostDataResponse postDataResponse = client.machineLearning().postData(postDataRequest, RequestOptions.DEFAULT); + //end::x-pack-ml-post-data-execute + + //tag::x-pack-ml-post-data-response + DataCounts dataCounts = postDataResponse.getDataCounts(); //<1> + //end::x-pack-ml-post-data-response + assertEquals(2, dataCounts.getInputRecordCount()); + + } + { + //tag::x-pack-ml-post-data-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(PostDataResponse postDataResponse) { + //<1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + //end::x-pack-ml-post-data-listener + PostDataRequest.JsonBuilder jsonBuilder = new PostDataRequest.JsonBuilder(); + Map mapData = new HashMap<>(); + mapData.put("total", 109); + jsonBuilder.addDoc(mapData); + PostDataRequest postDataRequest = new PostDataRequest("test-post-data", jsonBuilder); //<1> + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-post-data-execute-async + client.machineLearning().postDataAsync(postDataRequest, RequestOptions.DEFAULT, listener); //<1> + // end::x-pack-ml-post-data-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testGetInfluencers() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + String jobId = "test-get-influencers"; + Job job = MachineLearningIT.buildJob(jobId); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + // Let us index a record + IndexRequest indexRequest = new IndexRequest(".ml-anomalies-shared", "doc"); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + indexRequest.source("{\"job_id\":\"test-get-influencers\", \"result_type\":\"influencer\", \"timestamp\": 1533081600000," + + "\"bucket_span\": 600,\"is_interim\": false, \"influencer_score\": 80.0, \"influencer_field_name\": \"my_influencer\"," + + "\"influencer_field_value\":\"foo\"}", XContentType.JSON); + client.index(indexRequest, RequestOptions.DEFAULT); + + { + // tag::x-pack-ml-get-influencers-request + GetInfluencersRequest request = new GetInfluencersRequest(jobId); // <1> + // end::x-pack-ml-get-influencers-request + + // tag::x-pack-ml-get-influencers-desc + request.setDescending(true); // <1> + // end::x-pack-ml-get-influencers-desc + + // tag::x-pack-ml-get-influencers-end + request.setEnd("2018-08-21T00:00:00Z"); // <1> + // end::x-pack-ml-get-influencers-end + + // tag::x-pack-ml-get-influencers-exclude-interim + request.setExcludeInterim(true); // <1> + // end::x-pack-ml-get-influencers-exclude-interim + + // tag::x-pack-ml-get-influencers-influencer-score + request.setInfluencerScore(75.0); // <1> + // end::x-pack-ml-get-influencers-influencer-score + + // tag::x-pack-ml-get-influencers-page + request.setPageParams(new PageParams(100, 200)); // <1> + // end::x-pack-ml-get-influencers-page + + // Set page params back to null so the response contains the influencer we indexed + request.setPageParams(null); + + // tag::x-pack-ml-get-influencers-sort + request.setSort("probability"); // <1> + // end::x-pack-ml-get-influencers-sort + + // tag::x-pack-ml-get-influencers-start + request.setStart("2018-08-01T00:00:00Z"); // <1> + // end::x-pack-ml-get-influencers-start + + // tag::x-pack-ml-get-influencers-execute + GetInfluencersResponse response = client.machineLearning().getInfluencers(request, RequestOptions.DEFAULT); + // end::x-pack-ml-get-influencers-execute + + // tag::x-pack-ml-get-influencers-response + long count = response.count(); // <1> + List influencers = response.influencers(); // <2> + // end::x-pack-ml-get-influencers-response + assertEquals(1, influencers.size()); + } + { + GetInfluencersRequest request = new GetInfluencersRequest(jobId); + + // tag::x-pack-ml-get-influencers-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(GetInfluencersResponse getInfluencersResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-ml-get-influencers-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-get-influencers-execute-async + client.machineLearning().getInfluencersAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-ml-get-influencers-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index 2f743c786ba..d9d4f665f9d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -86,7 +86,7 @@ import org.elasticsearch.search.aggregations.bucket.range.Range; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.avg.Avg; +import org.elasticsearch.search.aggregations.metrics.Avg; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java new file mode 100644 index 00000000000..103b031fc0e --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -0,0 +1,176 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.documentation; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.security.DisableUserRequest; +import org.elasticsearch.client.security.EnableUserRequest; +import org.elasticsearch.client.security.PutUserRequest; +import org.elasticsearch.client.security.PutUserResponse; +import org.elasticsearch.client.security.RefreshPolicy; +import org.elasticsearch.client.security.EmptyResponse; + +import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +public class SecurityDocumentationIT extends ESRestHighLevelClientTestCase { + + public void testPutUser() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + //tag::put-user-execute + char[] password = new char[] { 'p', 'a', 's', 's', 'w', 'o', 'r', 'd' }; + PutUserRequest request = + new PutUserRequest("example", password, Collections.singletonList("superuser"), null, null, true, null, RefreshPolicy.NONE); + PutUserResponse response = client.security().putUser(request, RequestOptions.DEFAULT); + //end::put-user-execute + + //tag::put-user-response + boolean isCreated = response.isCreated(); // <1> + //end::put-user-response + + assertTrue(isCreated); + } + + { + char[] password = new char[] { 'p', 'a', 's', 's', 'w', 'o', 'r', 'd' }; + PutUserRequest request = new PutUserRequest("example2", password, Collections.singletonList("superuser"), null, null, true, + null, RefreshPolicy.NONE); + // tag::put-user-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(PutUserResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::put-user-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::put-user-execute-async + client.security().putUserAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::put-user-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testEnableUser() throws Exception { + RestHighLevelClient client = highLevelClient(); + char[] password = new char[]{'p', 'a', 's', 's', 'w', 'o', 'r', 'd'}; + PutUserRequest putUserRequest = new PutUserRequest("enable_user", password, Collections.singletonList("superuser"), null, + null, true, null, RefreshPolicy.IMMEDIATE); + PutUserResponse putUserResponse = client.security().putUser(putUserRequest, RequestOptions.DEFAULT); + assertTrue(putUserResponse.isCreated()); + + { + //tag::enable-user-execute + EnableUserRequest request = new EnableUserRequest("enable_user", RefreshPolicy.NONE); + EmptyResponse response = client.security().enableUser(request, RequestOptions.DEFAULT); + //end::enable-user-execute + + assertNotNull(response); + } + + { + //tag::enable-user-execute-listener + EnableUserRequest request = new EnableUserRequest("enable_user", RefreshPolicy.NONE); + ActionListener listener = new ActionListener() { + @Override + public void onResponse(EmptyResponse setUserEnabledResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + //end::enable-user-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::enable-user-execute-async + client.security().enableUserAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::enable-user-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testDisableUser() throws Exception { + RestHighLevelClient client = highLevelClient(); + char[] password = new char[]{'p', 'a', 's', 's', 'w', 'o', 'r', 'd'}; + PutUserRequest putUserRequest = new PutUserRequest("disable_user", password, Collections.singletonList("superuser"), null, + null, true, null, RefreshPolicy.IMMEDIATE); + PutUserResponse putUserResponse = client.security().putUser(putUserRequest, RequestOptions.DEFAULT); + assertTrue(putUserResponse.isCreated()); + { + //tag::disable-user-execute + DisableUserRequest request = new DisableUserRequest("disable_user", RefreshPolicy.NONE); + EmptyResponse response = client.security().disableUser(request, RequestOptions.DEFAULT); + //end::disable-user-execute + + assertNotNull(response); + } + + { + //tag::disable-user-execute-listener + DisableUserRequest request = new DisableUserRequest("disable_user", RefreshPolicy.NONE); + ActionListener listener = new ActionListener() { + @Override + public void onResponse(EmptyResponse setUserEnabledResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + //end::disable-user-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::disable-user-execute-async + client.security().disableUserAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::disable-user-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/StoredScriptsDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/StoredScriptsDocumentationIT.java index b1374ca85b6..c5d53abd978 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/StoredScriptsDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/StoredScriptsDocumentationIT.java @@ -17,22 +17,21 @@ package org.elasticsearch.client.documentation;/* * under the License. */ -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.util.EntityUtils; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; +import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RequestOptions; -import org.elasticsearch.client.Response; import org.elasticsearch.client.RestHighLevelClient; -import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.script.Script; import org.elasticsearch.script.StoredScriptSource; @@ -43,8 +42,8 @@ import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import static java.util.Collections.emptyMap; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.common.xcontent.support.XContentMapValues.extractValue; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; /** @@ -189,16 +188,124 @@ public class StoredScriptsDocumentationIT extends ESRestHighLevelClientTestCase assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + public void testPutScript() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + // tag::put-stored-script-request + PutStoredScriptRequest request = new PutStoredScriptRequest(); + request.id("id"); // <1> + request.content(new BytesArray( + "{\n" + + "\"script\": {\n" + + "\"lang\": \"painless\",\n" + + "\"source\": \"Math.log(_score * 2) + params.multiplier\"" + + "}\n" + + "}\n" + ), XContentType.JSON); // <2> + // end::put-stored-script-request + + // tag::put-stored-script-context + request.context("context"); // <1> + // end::put-stored-script-context + + // tag::put-stored-script-timeout + request.timeout(TimeValue.timeValueMinutes(2)); // <1> + request.timeout("2m"); // <2> + // end::put-stored-script-timeout + + // tag::put-stored-script-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::put-stored-script-masterTimeout + } + + { + PutStoredScriptRequest request = new PutStoredScriptRequest(); + request.id("id"); + + // tag::put-stored-script-content-painless + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + { + builder.startObject("script"); + { + builder.field("lang", "painless"); + builder.field("source", "Math.log(_score * 2) + params.multiplier"); + } + builder.endObject(); + } + builder.endObject(); + request.content(BytesReference.bytes(builder), XContentType.JSON); // <1> + // end::put-stored-script-content-painless + + + // tag::put-stored-script-execute + AcknowledgedResponse putStoredScriptResponse = client.putScript(request, RequestOptions.DEFAULT); + // end::put-stored-script-execute + + // tag::put-stored-script-response + boolean acknowledged = putStoredScriptResponse.isAcknowledged(); // <1> + // end::put-stored-script-response + + assertTrue(acknowledged); + + // tag::put-stored-script-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::put-stored-script-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::put-stored-script-execute-async + client.putScriptAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::put-stored-script-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + + { + PutStoredScriptRequest request = new PutStoredScriptRequest(); + request.id("id"); + + // tag::put-stored-script-content-mustache + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + { + builder.startObject("script"); + { + builder.field("lang", "mustache"); + builder.field("source", "{\"query\":{\"match\":{\"title\":\"{{query_string}}\"}}}"); + } + builder.endObject(); + } + builder.endObject(); + request.content(BytesReference.bytes(builder), XContentType.JSON); // <1> + // end::put-stored-script-content-mustache + + client.putScript(request, RequestOptions.DEFAULT); + + Map script = getAsMap("/_scripts/id"); + assertThat(extractValue("script.lang", script), equalTo("mustache")); + assertThat(extractValue("script.source", script), equalTo("{\"query\":{\"match\":{\"title\":\"{{query_string}}\"}}}")); + } + } + private void putStoredScript(String id, StoredScriptSource scriptSource) throws IOException { - final String script = Strings.toString(scriptSource.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS)); - // TODO: change to HighLevel PutStoredScriptRequest when it will be ready - // so far - using low-level REST API - Response putResponse = - adminClient() - .performRequest("PUT", "/_scripts/" + id, emptyMap(), - new StringEntity("{\"script\":" + script + "}", - ContentType.APPLICATION_JSON)); - assertEquals(putResponse.getStatusLine().getReasonPhrase(), 200, putResponse.getStatusLine().getStatusCode()); - assertEquals("{\"acknowledged\":true}", EntityUtils.toString(putResponse.getEntity())); + PutStoredScriptRequest request = + new PutStoredScriptRequest(id, "search", new BytesArray("{}"), XContentType.JSON, scriptSource); + assertAcked(execute(request, highLevelClient()::putScript, highLevelClient()::putScriptAsync)); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/CloseJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/CloseJobRequestTests.java new file mode 100644 index 00000000000..cf5f5ca3c0f --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/CloseJobRequestTests.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class CloseJobRequestTests extends AbstractXContentTestCase { + + public void testCloseAllJobsRequest() { + CloseJobRequest request = CloseJobRequest.closeAllJobsRequest(); + assertEquals(request.getJobIds().size(), 1); + assertEquals(request.getJobIds().get(0), "_all"); + } + + public void testWithNullJobIds() { + Exception exception = expectThrows(IllegalArgumentException.class, CloseJobRequest::new); + assertEquals(exception.getMessage(), "jobIds must not be empty"); + + exception = expectThrows(NullPointerException.class, () -> new CloseJobRequest("job1", null)); + assertEquals(exception.getMessage(), "jobIds must not contain null values"); + } + + + @Override + protected CloseJobRequest createTestInstance() { + int jobCount = randomIntBetween(1, 10); + List jobIds = new ArrayList<>(jobCount); + + for (int i = 0; i < jobCount; i++) { + jobIds.add(randomAlphaOfLength(10)); + } + + CloseJobRequest request = new CloseJobRequest(jobIds.toArray(new String[0])); + + if (randomBoolean()) { + request.setAllowNoJobs(randomBoolean()); + } + + if (randomBoolean()) { + request.setTimeout(TimeValue.timeValueMinutes(randomIntBetween(1, 10))); + } + + if (randomBoolean()) { + request.setForce(randomBoolean()); + } + + return request; + } + + @Override + protected CloseJobRequest doParseInstance(XContentParser parser) throws IOException { + return CloseJobRequest.PARSER.parse(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/CloseJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/CloseJobResponseTests.java new file mode 100644 index 00000000000..04389a3af7e --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/CloseJobResponseTests.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class CloseJobResponseTests extends AbstractXContentTestCase { + + @Override + protected CloseJobResponse createTestInstance() { + return new CloseJobResponse(randomBoolean()); + } + + @Override + protected CloseJobResponse doParseInstance(XContentParser parser) throws IOException { + return CloseJobResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/DeleteJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteJobRequestTests.java similarity index 93% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/DeleteJobRequestTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteJobRequestTests.java index fb8a38fa0c6..d3ccb98eeb6 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/DeleteJobRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteJobRequestTests.java @@ -16,9 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml; +package org.elasticsearch.client.ml; -import org.elasticsearch.protocol.xpack.ml.job.config.JobTests; +import org.elasticsearch.client.ml.job.config.JobTests; import org.elasticsearch.test.ESTestCase; public class DeleteJobRequestTests extends ESTestCase { diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/DeleteJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteJobResponseTests.java similarity index 96% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/DeleteJobResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteJobResponseTests.java index a73179a0898..2eb4d51e191 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/DeleteJobResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteJobResponseTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml; +package org.elasticsearch.client.ml; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FlushJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FlushJobRequestTests.java new file mode 100644 index 00000000000..c2bddd436cc --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FlushJobRequestTests.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class FlushJobRequestTests extends AbstractXContentTestCase { + + @Override + protected FlushJobRequest createTestInstance() { + FlushJobRequest request = new FlushJobRequest(randomAlphaOfLengthBetween(1, 20)); + + if (randomBoolean()) { + request.setCalcInterim(randomBoolean()); + } + if (randomBoolean()) { + request.setAdvanceTime(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setStart(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setEnd(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setSkipTime(String.valueOf(randomLong())); + } + return request; + } + + @Override + protected FlushJobRequest doParseInstance(XContentParser parser) throws IOException { + return FlushJobRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FlushJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FlushJobResponseTests.java new file mode 100644 index 00000000000..bc968ff4564 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FlushJobResponseTests.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.Date; + +public class FlushJobResponseTests extends AbstractXContentTestCase { + + @Override + protected FlushJobResponse createTestInstance() { + return new FlushJobResponse(randomBoolean(), + randomBoolean() ? null : new Date(randomNonNegativeLong())); + } + + @Override + protected FlushJobResponse doParseInstance(XContentParser parser) throws IOException { + return FlushJobResponse.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/ForecastJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/ForecastJobRequestTests.java new file mode 100644 index 00000000000..c6a33dad609 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/ForecastJobRequestTests.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class ForecastJobRequestTests extends AbstractXContentTestCase { + + @Override + protected ForecastJobRequest createTestInstance() { + ForecastJobRequest request = new ForecastJobRequest(randomAlphaOfLengthBetween(1, 20)); + + if (randomBoolean()) { + request.setExpiresIn(TimeValue.timeValueHours(randomInt(10))); + } + if (randomBoolean()) { + request.setDuration(TimeValue.timeValueHours(randomIntBetween(24, 72))); + } + return request; + } + + @Override + protected ForecastJobRequest doParseInstance(XContentParser parser) throws IOException { + return ForecastJobRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/ForecastJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/ForecastJobResponseTests.java new file mode 100644 index 00000000000..c7833a79cba --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/ForecastJobResponseTests.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class ForecastJobResponseTests extends AbstractXContentTestCase { + + @Override + protected ForecastJobResponse createTestInstance() { + return new ForecastJobResponse(randomBoolean(),randomAlphaOfLength(10)); + } + + @Override + protected ForecastJobResponse doParseInstance(XContentParser parser) throws IOException { + return ForecastJobResponse.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetBucketsRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetBucketsRequestTests.java new file mode 100644 index 00000000000..d6379886912 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetBucketsRequestTests.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class GetBucketsRequestTests extends AbstractXContentTestCase { + + @Override + protected GetBucketsRequest createTestInstance() { + GetBucketsRequest request = new GetBucketsRequest(randomAlphaOfLengthBetween(1, 20)); + + if (randomBoolean()) { + request.setTimestamp(String.valueOf(randomLong())); + } else { + if (randomBoolean()) { + request.setStart(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setEnd(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setExcludeInterim(randomBoolean()); + } + if (randomBoolean()) { + request.setAnomalyScore(randomDouble()); + } + if (randomBoolean()) { + int from = randomInt(10000); + int size = randomInt(10000); + request.setPageParams(new PageParams(from, size)); + } + if (randomBoolean()) { + request.setSort("anomaly_score"); + } + if (randomBoolean()) { + request.setDescending(randomBoolean()); + } + } + if (randomBoolean()) { + request.setExpand(randomBoolean()); + } + if (randomBoolean()) { + request.setExcludeInterim(randomBoolean()); + } + return request; + } + + @Override + protected GetBucketsRequest doParseInstance(XContentParser parser) throws IOException { + return GetBucketsRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetBucketsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetBucketsResponseTests.java new file mode 100644 index 00000000000..7b1934c2dfa --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetBucketsResponseTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.results.Bucket; +import org.elasticsearch.client.ml.job.results.BucketTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetBucketsResponseTests extends AbstractXContentTestCase { + + @Override + protected GetBucketsResponse createTestInstance() { + String jobId = randomAlphaOfLength(20); + int listSize = randomInt(10); + List buckets = new ArrayList<>(listSize); + for (int j = 0; j < listSize; j++) { + Bucket bucket = BucketTests.createTestInstance(jobId); + buckets.add(bucket); + } + return new GetBucketsResponse(buckets, listSize); + } + + @Override + protected GetBucketsResponse doParseInstance(XContentParser parser) throws IOException { + return GetBucketsResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetInfluencersRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetInfluencersRequestTests.java new file mode 100644 index 00000000000..94937cd7815 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetInfluencersRequestTests.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class GetInfluencersRequestTests extends AbstractXContentTestCase { + + @Override + protected GetInfluencersRequest createTestInstance() { + GetInfluencersRequest request = new GetInfluencersRequest(randomAlphaOfLengthBetween(1, 20)); + + if (randomBoolean()) { + request.setStart(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setEnd(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setExcludeInterim(randomBoolean()); + } + if (randomBoolean()) { + request.setInfluencerScore(randomDouble()); + } + if (randomBoolean()) { + int from = randomInt(10000); + int size = randomInt(10000); + request.setPageParams(new PageParams(from, size)); + } + if (randomBoolean()) { + request.setSort("influencer_score"); + } + if (randomBoolean()) { + request.setDescending(randomBoolean()); + } + if (randomBoolean()) { + request.setExcludeInterim(randomBoolean()); + } + return request; + } + + @Override + protected GetInfluencersRequest doParseInstance(XContentParser parser) throws IOException { + return GetInfluencersRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetInfluencersResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetInfluencersResponseTests.java new file mode 100644 index 00000000000..5f1fa3c1ab5 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetInfluencersResponseTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.results.Influencer; +import org.elasticsearch.client.ml.job.results.InfluencerTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetInfluencersResponseTests extends AbstractXContentTestCase { + + @Override + protected GetInfluencersResponse createTestInstance() { + String jobId = randomAlphaOfLength(20); + int listSize = randomInt(10); + List influencers = new ArrayList<>(listSize); + for (int j = 0; j < listSize; j++) { + Influencer influencer = InfluencerTests.createTestInstance(jobId); + influencers.add(influencer); + } + return new GetInfluencersResponse(influencers, listSize); + } + + @Override + protected GetInfluencersResponse doParseInstance(XContentParser parser) throws IOException { + return GetInfluencersResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobRequestTests.java new file mode 100644 index 00000000000..77b2109dd7c --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobRequestTests.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetJobRequestTests extends AbstractXContentTestCase { + + public void testAllJobsRequest() { + GetJobRequest request = GetJobRequest.getAllJobsRequest(); + + assertEquals(request.getJobIds().size(), 1); + assertEquals(request.getJobIds().get(0), "_all"); + } + + public void testNewWithJobId() { + Exception exception = expectThrows(NullPointerException.class, () -> new GetJobRequest("job",null)); + assertEquals(exception.getMessage(), "jobIds must not contain null values"); + } + + @Override + protected GetJobRequest createTestInstance() { + int jobCount = randomIntBetween(0, 10); + List jobIds = new ArrayList<>(jobCount); + + for (int i = 0; i < jobCount; i++) { + jobIds.add(randomAlphaOfLength(10)); + } + + GetJobRequest request = new GetJobRequest(jobIds); + + if (randomBoolean()) { + request.setAllowNoJobs(randomBoolean()); + } + + return request; + } + + @Override + protected GetJobRequest doParseInstance(XContentParser parser) throws IOException { + return GetJobRequest.PARSER.parse(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobResponseTests.java new file mode 100644 index 00000000000..8cc990730f7 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobResponseTests.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.config.JobTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Predicate; + +public class GetJobResponseTests extends AbstractXContentTestCase { + + @Override + protected GetJobResponse createTestInstance() { + + int count = randomIntBetween(1, 5); + List results = new ArrayList<>(count); + for(int i = 0; i < count; i++) { + results.add(JobTests.createRandomizedJobBuilder()); + } + + return new GetJobResponse(results, count); + } + + @Override + protected GetJobResponse doParseInstance(XContentParser parser) throws IOException { + return GetJobResponse.fromXContent(parser); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> !field.isEmpty(); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobStatsRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobStatsRequestTests.java new file mode 100644 index 00000000000..690e5829766 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobStatsRequestTests.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetJobStatsRequestTests extends AbstractXContentTestCase { + + public void testAllJobsRequest() { + GetJobStatsRequest request = GetJobStatsRequest.getAllJobStatsRequest(); + + assertEquals(request.getJobIds().size(), 1); + assertEquals(request.getJobIds().get(0), "_all"); + } + + public void testNewWithJobId() { + Exception exception = expectThrows(NullPointerException.class, () -> new GetJobStatsRequest("job", null)); + assertEquals(exception.getMessage(), "jobIds must not contain null values"); + } + + @Override + protected GetJobStatsRequest createTestInstance() { + int jobCount = randomIntBetween(0, 10); + List jobIds = new ArrayList<>(jobCount); + + for (int i = 0; i < jobCount; i++) { + jobIds.add(randomAlphaOfLength(10)); + } + + GetJobStatsRequest request = new GetJobStatsRequest(jobIds); + + if (randomBoolean()) { + request.setAllowNoJobs(randomBoolean()); + } + + return request; + } + + @Override + protected GetJobStatsRequest doParseInstance(XContentParser parser) throws IOException { + return GetJobStatsRequest.PARSER.parse(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobStatsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobStatsResponseTests.java new file mode 100644 index 00000000000..23f7bcc042b --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobStatsResponseTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.client.ml.job.stats.JobStats; +import org.elasticsearch.client.ml.job.stats.JobStatsTests; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetJobStatsResponseTests extends AbstractXContentTestCase { + + @Override + protected GetJobStatsResponse createTestInstance() { + + int count = randomIntBetween(1, 5); + List results = new ArrayList<>(count); + for(int i = 0; i < count; i++) { + results.add(JobStatsTests.createRandomInstance()); + } + + return new GetJobStatsResponse(results, count); + } + + @Override + protected GetJobStatsResponse doParseInstance(XContentParser parser) throws IOException { + return GetJobStatsResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetOverallBucketsRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetOverallBucketsRequestTests.java new file mode 100644 index 00000000000..e50278fabbd --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetOverallBucketsRequestTests.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class GetOverallBucketsRequestTests extends AbstractXContentTestCase { + + @Override + protected GetOverallBucketsRequest createTestInstance() { + GetOverallBucketsRequest request = new GetOverallBucketsRequest(randomAlphaOfLengthBetween(1, 20)); + + if (randomBoolean()) { + request.setTopN(randomIntBetween(1, 10)); + } + + if (randomBoolean()) { + request.setBucketSpan(TimeValue.timeValueSeconds(randomIntBetween(1, 1_000_000))); + } + if (randomBoolean()) { + request.setStart(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setEnd(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setExcludeInterim(randomBoolean()); + } + if (randomBoolean()) { + request.setOverallScore(randomDouble()); + } + if (randomBoolean()) { + request.setExcludeInterim(randomBoolean()); + } + return request; + } + + @Override + protected GetOverallBucketsRequest doParseInstance(XContentParser parser) throws IOException { + return GetOverallBucketsRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetOverallBucketsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetOverallBucketsResponseTests.java new file mode 100644 index 00000000000..2c67dad4aa4 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetOverallBucketsResponseTests.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.results.OverallBucket; +import org.elasticsearch.client.ml.job.results.OverallBucketTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetOverallBucketsResponseTests extends AbstractXContentTestCase { + + @Override + protected GetOverallBucketsResponse createTestInstance() { + int listSize = randomInt(10); + List overallBuckets = new ArrayList<>(listSize); + for (int j = 0; j < listSize; j++) { + OverallBucket overallBucket = OverallBucketTests.createRandom(); + overallBuckets.add(overallBucket); + } + return new GetOverallBucketsResponse(overallBuckets, listSize); + } + + @Override + protected GetOverallBucketsResponse doParseInstance(XContentParser parser) throws IOException { + return GetOverallBucketsResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetRecordsRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetRecordsRequestTests.java new file mode 100644 index 00000000000..f6f4b49889a --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetRecordsRequestTests.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class GetRecordsRequestTests extends AbstractXContentTestCase { + + @Override + protected GetRecordsRequest createTestInstance() { + GetRecordsRequest request = new GetRecordsRequest(randomAlphaOfLengthBetween(1, 20)); + + if (randomBoolean()) { + request.setStart(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setEnd(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setExcludeInterim(randomBoolean()); + } + if (randomBoolean()) { + request.setRecordScore(randomDouble()); + } + if (randomBoolean()) { + int from = randomInt(10000); + int size = randomInt(10000); + request.setPageParams(new PageParams(from, size)); + } + if (randomBoolean()) { + request.setSort("anomaly_score"); + } + if (randomBoolean()) { + request.setDescending(randomBoolean()); + } + if (randomBoolean()) { + request.setExcludeInterim(randomBoolean()); + } + return request; + } + + @Override + protected GetRecordsRequest doParseInstance(XContentParser parser) throws IOException { + return GetRecordsRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetRecordsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetRecordsResponseTests.java new file mode 100644 index 00000000000..3b37b771bef --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetRecordsResponseTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.results.AnomalyRecord; +import org.elasticsearch.client.ml.job.results.AnomalyRecordTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetRecordsResponseTests extends AbstractXContentTestCase { + + @Override + protected GetRecordsResponse createTestInstance() { + String jobId = randomAlphaOfLength(20); + int listSize = randomInt(10); + List records = new ArrayList<>(listSize); + for (int j = 0; j < listSize; j++) { + AnomalyRecord record = AnomalyRecordTests.createTestInstance(jobId); + records.add(record); + } + return new GetRecordsResponse(records, listSize); + } + + @Override + protected GetRecordsResponse doParseInstance(XContentParser parser) throws IOException { + return GetRecordsResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/NodeAttributesTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/NodeAttributesTests.java new file mode 100644 index 00000000000..cee1710a622 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/NodeAttributesTests.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Predicate; + +public class NodeAttributesTests extends AbstractXContentTestCase { + + public static NodeAttributes createRandom() { + int numberOfAttributes = randomIntBetween(1, 10); + Map attributes = new HashMap<>(numberOfAttributes); + for(int i = 0; i < numberOfAttributes; i++) { + String val = randomAlphaOfLength(10); + attributes.put("key-"+i, val); + } + return new NodeAttributes(randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + attributes); + } + + @Override + protected NodeAttributes createTestInstance() { + return createRandom(); + } + + @Override + protected NodeAttributes doParseInstance(XContentParser parser) throws IOException { + return NodeAttributes.PARSER.parse(parser, null); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> !field.isEmpty(); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/OpenJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/OpenJobRequestTests.java similarity index 93% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/OpenJobRequestTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/OpenJobRequestTests.java index 242f0cf4e8a..c6ce3436446 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/OpenJobRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/OpenJobRequestTests.java @@ -16,11 +16,11 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml; +package org.elasticsearch.client.ml; +import org.elasticsearch.client.ml.job.config.JobTests; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.xpack.ml.job.config.JobTests; import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/OpenJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/OpenJobResponseTests.java similarity index 96% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/OpenJobResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/OpenJobResponseTests.java index aadfb236d3a..7f177c6e1ef 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/OpenJobResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/OpenJobResponseTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml; +package org.elasticsearch.client.ml; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PostDataRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PostDataRequestTests.java new file mode 100644 index 00000000000..363d37c3ca4 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PostDataRequestTests.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Map; + + +public class PostDataRequestTests extends AbstractXContentTestCase { + + @Override + protected PostDataRequest createTestInstance() { + String jobId = randomAlphaOfLength(10); + XContentType contentType = randomFrom(XContentType.JSON, XContentType.SMILE); + + PostDataRequest request = new PostDataRequest(jobId, contentType, new byte[0]); + if (randomBoolean()) { + request.setResetEnd(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + request.setResetStart(randomAlphaOfLength(10)); + } + + return request; + } + + @Override + protected PostDataRequest doParseInstance(XContentParser parser) { + return PostDataRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public void testJsonBuilder() throws IOException { + + String jobId = randomAlphaOfLength(10); + PostDataRequest.JsonBuilder builder = new PostDataRequest.JsonBuilder(); + + Map obj1 = new HashMap<>(); + obj1.put("entry1", "value1"); + obj1.put("entry2", "value2"); + builder.addDoc(obj1); + + builder.addDoc("{\"entry3\":\"value3\"}"); + builder.addDoc("{\"entry4\":\"value4\"}".getBytes(StandardCharsets.UTF_8)); + + PostDataRequest request = new PostDataRequest(jobId, builder); + + assertEquals("{\"entry1\":\"value1\",\"entry2\":\"value2\"}{\"entry3\":\"value3\"}{\"entry4\":\"value4\"}", + request.getContent().utf8ToString()); + assertEquals(XContentType.JSON, request.getXContentType()); + assertEquals(jobId, request.getJobId()); + } + + public void testFromByteArray() { + String jobId = randomAlphaOfLength(10); + PostDataRequest request = new PostDataRequest(jobId, + XContentType.JSON, + "{\"others\":{\"foo\":100}}".getBytes(StandardCharsets.UTF_8)); + + assertEquals("{\"others\":{\"foo\":100}}", request.getContent().utf8ToString()); + assertEquals(XContentType.JSON, request.getXContentType()); + assertEquals(jobId, request.getJobId()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PostDataResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PostDataResponseTests.java new file mode 100644 index 00000000000..fc74040cc40 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PostDataResponseTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.process.DataCountsTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class PostDataResponseTests extends AbstractXContentTestCase { + + @Override + protected PostDataResponse createTestInstance() { + return new PostDataResponse(DataCountsTests.createTestInstance(randomAlphaOfLength(10))); + } + + @Override + protected PostDataResponse doParseInstance(XContentParser parser) throws IOException { + return PostDataResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/PutJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutJobRequestTests.java similarity index 89% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/PutJobRequestTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutJobRequestTests.java index 165934224b9..b58d849de1f 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/PutJobRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutJobRequestTests.java @@ -16,11 +16,11 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml; +package org.elasticsearch.client.ml; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.config.JobTests; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; -import org.elasticsearch.protocol.xpack.ml.job.config.JobTests; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/PutJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutJobResponseTests.java similarity index 92% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/PutJobResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutJobResponseTests.java index ed91e33635b..1f435783d0f 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/PutJobResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutJobResponseTests.java @@ -16,10 +16,10 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml; +package org.elasticsearch.client.ml; +import org.elasticsearch.client.ml.job.config.JobTests; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.xpack.ml.job.config.JobTests; import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/UpdateJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/UpdateJobRequestTests.java new file mode 100644 index 00000000000..4d2bbb2e200 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/UpdateJobRequestTests.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.config.JobTests; +import org.elasticsearch.client.ml.job.config.JobUpdate; +import org.elasticsearch.client.ml.job.config.JobUpdateTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + + +public class UpdateJobRequestTests extends AbstractXContentTestCase { + + @Override + protected UpdateJobRequest createTestInstance() { + return new UpdateJobRequest(JobUpdateTests.createRandom(JobTests.randomValidJobId())); + } + + @Override + protected UpdateJobRequest doParseInstance(XContentParser parser) { + return new UpdateJobRequest(JobUpdate.PARSER.apply(parser, null).build()); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/ChunkingConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/ChunkingConfigTests.java similarity index 97% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/ChunkingConfigTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/ChunkingConfigTests.java index c835788bb1c..c1c0daaa938 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/ChunkingConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/ChunkingConfigTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.datafeed; +package org.elasticsearch.client.ml.datafeed; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedConfigTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java index f45d88d318e..8ed51415521 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.datafeed; +package org.elasticsearch.client.ml.datafeed; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; import org.elasticsearch.common.settings.Settings; @@ -30,7 +30,7 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedUpdateTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedUpdateTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java index edbef8461e0..3dddad3c016 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedUpdateTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.datafeed; +package org.elasticsearch.client.ml.datafeed; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/AnalysisConfigTests.java similarity index 99% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisConfigTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/AnalysisConfigTests.java index 34f12fc067e..7b76688f4d3 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/AnalysisConfigTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisLimitsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/AnalysisLimitsTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisLimitsTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/AnalysisLimitsTests.java index 5003da10780..cb14d19300d 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisLimitsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/AnalysisLimitsTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/CategorizationAnalyzerConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/CategorizationAnalyzerConfigTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/CategorizationAnalyzerConfigTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/CategorizationAnalyzerConfigTests.java index 36fb51ed10e..889926e00d6 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/CategorizationAnalyzerConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/CategorizationAnalyzerConfigTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/DataDescriptionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/DataDescriptionTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/DataDescriptionTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/DataDescriptionTests.java index 8ca2dc494f3..9c1f361ce0e 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/DataDescriptionTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/DataDescriptionTests.java @@ -16,12 +16,12 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.xpack.ml.job.config.DataDescription.DataFormat; import org.elasticsearch.test.AbstractXContentTestCase; +import static org.elasticsearch.client.ml.job.config.DataDescription.DataFormat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.Is.is; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectionRuleTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/DetectionRuleTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectionRuleTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/DetectionRuleTests.java index bc70a404894..32c6ca426ca 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectionRuleTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/DetectionRuleTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectorTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/DetectorTests.java similarity index 99% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectorTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/DetectorTests.java index 0b1ba892acd..7801447e724 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectorTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/DetectorTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/FilterRefTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/FilterRefTests.java similarity index 96% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/FilterRefTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/FilterRefTests.java index 00862e5307b..cdc79b83524 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/FilterRefTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/FilterRefTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/JobTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/JobTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/JobTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/JobTests.java index 7ba4946efa7..1946f70a230 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/JobTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/JobTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; import org.elasticsearch.common.settings.Settings; @@ -210,7 +210,7 @@ public class JobTests extends AbstractXContentTestCase { return new AnalysisConfig.Builder(Arrays.asList(d1.build(), d2.build())); } - public static Job createRandomizedJob() { + public static Job.Builder createRandomizedJobBuilder() { String jobId = randomValidJobId(); Job.Builder builder = new Job.Builder(jobId); if (randomBoolean()) { @@ -265,7 +265,11 @@ public class JobTests extends AbstractXContentTestCase { if (randomBoolean()) { builder.setResultsIndexName(randomValidJobId()); } - return builder.build(); + return builder; + } + + public static Job createRandomizedJob() { + return createRandomizedJobBuilder().build(); } @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/JobUpdateTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/JobUpdateTests.java new file mode 100644 index 00000000000..b159fedb95d --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/JobUpdateTests.java @@ -0,0 +1,120 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.config; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.function.Predicate; + +public class JobUpdateTests extends AbstractXContentTestCase { + + @Override + protected JobUpdate createTestInstance() { + return createRandom(randomAlphaOfLength(4)); + } + + /** + * Creates a completely random update when the job is null + * or a random update that is is valid for the given job + */ + public static JobUpdate createRandom(String jobId) { + JobUpdate.Builder update = new JobUpdate.Builder(jobId); + if (randomBoolean()) { + int groupsNum = randomIntBetween(0, 10); + List groups = new ArrayList<>(groupsNum); + for (int i = 0; i < groupsNum; i++) { + groups.add(JobTests.randomValidJobId()); + } + update.setGroups(groups); + } + if (randomBoolean()) { + update.setDescription(randomAlphaOfLength(20)); + } + if (randomBoolean()) { + update.setDetectorUpdates(createRandomDetectorUpdates()); + } + if (randomBoolean()) { + update.setModelPlotConfig(new ModelPlotConfig(randomBoolean(), randomAlphaOfLength(10))); + } + if (randomBoolean()) { + update.setAnalysisLimits(AnalysisLimitsTests.createRandomized()); + } + if (randomBoolean()) { + update.setRenormalizationWindowDays(randomNonNegativeLong()); + } + if (randomBoolean()) { + update.setBackgroundPersistInterval(TimeValue.timeValueHours(randomIntBetween(1, 24))); + } + if (randomBoolean()) { + update.setModelSnapshotRetentionDays(randomNonNegativeLong()); + } + if (randomBoolean()) { + update.setResultsRetentionDays(randomNonNegativeLong()); + } + if (randomBoolean()) { + update.setCategorizationFilters(Arrays.asList(generateRandomStringArray(10, 10, false))); + } + if (randomBoolean()) { + update.setCustomSettings(Collections.singletonMap(randomAlphaOfLength(10), randomAlphaOfLength(10))); + } + + return update.build(); + } + + + private static List createRandomDetectorUpdates() { + int size = randomInt(10); + List detectorUpdates = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + String detectorDescription = null; + if (randomBoolean()) { + detectorDescription = randomAlphaOfLength(12); + } + List detectionRules = null; + if (randomBoolean()) { + detectionRules = new ArrayList<>(); + detectionRules.add(new DetectionRule.Builder( + Collections.singletonList(new RuleCondition(RuleCondition.AppliesTo.ACTUAL, Operator.GT, 5))).build()); + } + detectorUpdates.add(new JobUpdate.DetectorUpdate(i, detectorDescription, detectionRules)); + } + return detectorUpdates; + } + + @Override + protected JobUpdate doParseInstance(XContentParser parser) { + return JobUpdate.PARSER.apply(parser, null).build(); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> !field.isEmpty(); + } +} diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/MlFilterTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/MlFilterTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/MlFilterTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/MlFilterTests.java index 6c595e2d6da..5e218a8dce7 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/MlFilterTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/MlFilterTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/ModelPlotConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/ModelPlotConfigTests.java similarity index 96% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/ModelPlotConfigTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/ModelPlotConfigTests.java index 23f13c73212..50f1b49f414 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/ModelPlotConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/ModelPlotConfigTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleConditionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/RuleConditionTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleConditionTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/RuleConditionTests.java index 4348ea194d0..3386d3fdc52 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleConditionTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/RuleConditionTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleScopeTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/RuleScopeTests.java similarity index 97% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleScopeTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/RuleScopeTests.java index ac97e457ac4..2231b913251 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleScopeTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/RuleScopeTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/DataCountsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/DataCountsTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/DataCountsTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/DataCountsTests.java index 2232e8c88d9..7c261e8d4c9 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/DataCountsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/DataCountsTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.process; +package org.elasticsearch.client.ml.job.process; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; @@ -113,7 +113,7 @@ public class DataCountsTests extends AbstractXContentTestCase { private static DataCounts createCounts( long processedRecordCount, long processedFieldCount, long inputBytes, long inputFieldCount, - long invalidDateCount, long missingFieldCount, long outOfOrderTimeStampCount, + long invalidDateCount, long missingFieldCount, long outOfOrderTimeStampCount, long emptyBucketCount, long sparseBucketCount, long bucketCount, long earliestRecordTime, long latestRecordTime, long lastDataTimeStamp, long latestEmptyBucketTimeStamp, long latestSparseBucketTimeStamp) { diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSizeStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSizeStatsTests.java similarity index 96% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSizeStatsTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSizeStatsTests.java index e3341123fb0..4a12a75f2b1 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSizeStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSizeStatsTests.java @@ -16,15 +16,16 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.process; +package org.elasticsearch.client.ml.job.process; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; -import org.elasticsearch.protocol.xpack.ml.job.process.ModelSizeStats.MemoryStatus; import java.util.Date; +import static org.elasticsearch.client.ml.job.process.ModelSizeStats.MemoryStatus; + public class ModelSizeStatsTests extends AbstractXContentTestCase { public void testDefaultConstructor() { diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSnapshotTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSnapshotTests.java similarity index 99% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSnapshotTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSnapshotTests.java index 8c6a9bd83c9..9669f9bfa4f 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSnapshotTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSnapshotTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.process; +package org.elasticsearch.client.ml.job.process; import org.elasticsearch.Version; import org.elasticsearch.common.unit.TimeValue; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/QuantilesTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/QuantilesTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/QuantilesTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/QuantilesTests.java index 77ae21bc6f8..24c70f6d68f 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/QuantilesTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/QuantilesTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.process; +package org.elasticsearch.client.ml.job.process; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyCauseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/AnomalyCauseTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyCauseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/AnomalyCauseTests.java index 070b9f18b4d..3ac6a0b6ec4 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyCauseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/AnomalyCauseTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyRecordTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/AnomalyRecordTests.java similarity index 96% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyRecordTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/AnomalyRecordTests.java index d4cadb19796..a857cd3d9b1 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyRecordTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/AnomalyRecordTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; @@ -33,7 +33,7 @@ public class AnomalyRecordTests extends AbstractXContentTestCase return createTestInstance("foo"); } - public AnomalyRecord createTestInstance(String jobId) { + public static AnomalyRecord createTestInstance(String jobId) { AnomalyRecord anomalyRecord = new AnomalyRecord(jobId, new Date(randomNonNegativeLong()), randomNonNegativeLong()); anomalyRecord.setActual(Collections.singletonList(randomDouble())); anomalyRecord.setTypical(Collections.singletonList(randomDouble())); diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketInfluencerTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/BucketInfluencerTests.java similarity index 99% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketInfluencerTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/BucketInfluencerTests.java index 7e4c166d1fd..7b8ba138398 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketInfluencerTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/BucketInfluencerTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/BucketTests.java similarity index 97% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/BucketTests.java index 28b1893afe1..b9fac88facc 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/BucketTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; @@ -35,7 +35,7 @@ public class BucketTests extends AbstractXContentTestCase { return createTestInstance("foo"); } - public Bucket createTestInstance(String jobId) { + public static Bucket createTestInstance(String jobId) { Bucket bucket = new Bucket(jobId, new Date(randomNonNegativeLong()), randomNonNegativeLong()); if (randomBoolean()) { bucket.setAnomalyScore(randomDouble()); @@ -70,7 +70,7 @@ public class BucketTests extends AbstractXContentTestCase { int size = randomInt(10); List records = new ArrayList<>(size); for (int i = 0; i < size; i++) { - AnomalyRecord anomalyRecord = new AnomalyRecordTests().createTestInstance(jobId); + AnomalyRecord anomalyRecord = AnomalyRecordTests.createTestInstance(jobId); records.add(anomalyRecord); } bucket.setRecords(records); diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/CategoryDefinitionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/CategoryDefinitionTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/CategoryDefinitionTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/CategoryDefinitionTests.java index 28ef4a5ecb2..27e15a1600d 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/CategoryDefinitionTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/CategoryDefinitionTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/InfluenceTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/InfluenceTests.java similarity index 96% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/InfluenceTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/InfluenceTests.java index b029997d015..89b2e5dbcbb 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/InfluenceTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/InfluenceTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/InfluencerTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/InfluencerTests.java similarity index 95% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/InfluencerTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/InfluencerTests.java index 8125a1a5c72..33d1a33e9f1 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/InfluencerTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/InfluencerTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentHelper; @@ -29,7 +29,7 @@ import java.util.Date; public class InfluencerTests extends AbstractXContentTestCase { - public Influencer createTestInstance(String jobId) { + public static Influencer createTestInstance(String jobId) { Influencer influencer = new Influencer(jobId, randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20), new Date(randomNonNegativeLong()), randomNonNegativeLong()); influencer.setInterim(randomBoolean()); diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/OverallBucketTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/OverallBucketTests.java similarity index 94% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/OverallBucketTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/OverallBucketTests.java index babd7410d57..7f1af91d4df 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/OverallBucketTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/OverallBucketTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; @@ -32,6 +32,10 @@ public class OverallBucketTests extends AbstractXContentTestCase @Override protected OverallBucket createTestInstance() { + return createRandom(); + } + + public static OverallBucket createRandom() { int jobCount = randomIntBetween(0, 10); List jobs = new ArrayList<>(jobCount); for (int i = 0; i < jobCount; ++i) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/ForecastStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/ForecastStatsTests.java new file mode 100644 index 00000000000..16dfa305479 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/ForecastStatsTests.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.stats; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Predicate; + +public class ForecastStatsTests extends AbstractXContentTestCase { + + @Override + public ForecastStats createTestInstance() { + if (randomBoolean()) { + return createRandom(1, 22); + } + return new ForecastStats(0, null,null,null,null); + } + + @Override + protected ForecastStats doParseInstance(XContentParser parser) throws IOException { + return ForecastStats.PARSER.parse(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> !field.isEmpty(); + } + + public static ForecastStats createRandom(long minTotal, long maxTotal) { + return new ForecastStats( + randomLongBetween(minTotal, maxTotal), + SimpleStatsTests.createRandom(), + SimpleStatsTests.createRandom(), + SimpleStatsTests.createRandom(), + createCountStats()); + } + + private static Map createCountStats() { + Map countStats = new HashMap<>(); + for (int i = 0; i < randomInt(10); ++i) { + countStats.put(randomAlphaOfLengthBetween(1, 20), randomLongBetween(1L, 100L)); + } + return countStats; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/JobStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/JobStatsTests.java new file mode 100644 index 00000000000..5d00f879140 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/JobStatsTests.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.stats; + +import org.elasticsearch.client.ml.NodeAttributes; +import org.elasticsearch.client.ml.NodeAttributesTests; +import org.elasticsearch.client.ml.job.process.DataCounts; +import org.elasticsearch.client.ml.job.process.DataCountsTests; +import org.elasticsearch.client.ml.job.process.ModelSizeStats; +import org.elasticsearch.client.ml.job.process.ModelSizeStatsTests; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.client.ml.job.config.JobState; +import org.elasticsearch.client.ml.job.config.JobTests; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.function.Predicate; + + +public class JobStatsTests extends AbstractXContentTestCase { + + public static JobStats createRandomInstance() { + String jobId = JobTests.randomValidJobId(); + JobState state = randomFrom(JobState.CLOSING, JobState.CLOSED, JobState.OPENED, JobState.FAILED, JobState.OPENING); + DataCounts dataCounts = DataCountsTests.createTestInstance(jobId); + + ModelSizeStats modelSizeStats = randomBoolean() ? ModelSizeStatsTests.createRandomized() : null; + ForecastStats forecastStats = randomBoolean() ? ForecastStatsTests.createRandom(1, 22) : null; + NodeAttributes nodeAttributes = randomBoolean() ? NodeAttributesTests.createRandom() : null; + String assigmentExplanation = randomBoolean() ? randomAlphaOfLength(10) : null; + TimeValue openTime = randomBoolean() ? TimeValue.timeValueMillis(randomIntBetween(1, 10000)) : null; + + return new JobStats(jobId, dataCounts, state, modelSizeStats, forecastStats, nodeAttributes, assigmentExplanation, openTime); + } + + @Override + protected JobStats createTestInstance() { + return createRandomInstance(); + } + + @Override + protected JobStats doParseInstance(XContentParser parser) throws IOException { + return JobStats.PARSER.parse(parser, null); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> !field.isEmpty(); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/SimpleStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/SimpleStatsTests.java new file mode 100644 index 00000000000..eb9e47af9ba --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/SimpleStatsTests.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.stats; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + + +public class SimpleStatsTests extends AbstractXContentTestCase { + + @Override + protected SimpleStats createTestInstance() { + return createRandom(); + } + + @Override + protected SimpleStats doParseInstance(XContentParser parser) throws IOException { + return SimpleStats.PARSER.parse(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + public static SimpleStats createRandom() { + return new SimpleStats(randomDouble(), randomDouble(), randomDouble(), randomDouble()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/util/PageParamsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/util/PageParamsTests.java new file mode 100644 index 00000000000..f74cedf1437 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/util/PageParamsTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.util; + +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +public class PageParamsTests extends AbstractXContentTestCase { + + @Override + protected PageParams doParseInstance(XContentParser parser) { + return PageParams.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected PageParams createTestInstance() { + Integer from = randomBoolean() ? randomInt() : null; + Integer size = randomBoolean() ? randomInt() : null; + return new PageParams(from, size); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/EmptyResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/EmptyResponseTests.java new file mode 100644 index 00000000000..37e2e6bb515 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/EmptyResponseTests.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; + +public class EmptyResponseTests extends ESTestCase { + + public void testParseFromXContent() throws IOException { + try (XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, "{}")) { + + EmptyResponse response = EmptyResponse.fromXContent(parser); + assertNotNull(response); + } + + try (XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, "{\"foo\": \"bar\"}")) { + + XContentParseException exception = + expectThrows(XContentParseException.class, () -> EmptyResponse.fromXContent(parser)); + assertThat(exception.getMessage(), containsString("field [foo]")); + } + } +} diff --git a/client/rest/build.gradle b/client/rest/build.gradle index fc2ab0bc4c0..273836a31f0 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -1,3 +1,5 @@ +import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -16,9 +18,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' @@ -53,10 +52,9 @@ dependencies { testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}" } -forbiddenApisMain { +tasks.withType(ForbiddenApisCliTask) { //client does not depend on server, so only jdk and http signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), - PrecommitTasks.getResource('/forbidden/http-signatures.txt')] + replaceSignatureFiles ('jdk-signatures', 'http-signatures') } forbiddenPatterns { @@ -67,9 +65,6 @@ forbiddenApisTest { //we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage bundledSignatures -= 'jdk-non-portable' bundledSignatures += 'jdk-internal' - //client does not depend on server, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), - PrecommitTasks.getResource('/forbidden/http-signatures.txt')] } // JarHell is part of es server, which we don't want to pull in diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 934b9526086..a7afbc8ffbd 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -85,7 +85,7 @@ import static java.util.Collections.singletonList; * The hosts that are part of the cluster need to be provided at creation time, but can also be replaced later * by calling {@link #setNodes(Collection)}. *

- * The method {@link #performRequest(String, String, Map, HttpEntity, Header...)} allows to send a request to the cluster. When + * The method {@link #performRequest(Request)} allows to send a request to the cluster. When * sending a request, a host gets selected out of the provided ones in a round-robin fashion. Failing hosts are marked dead and * retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously * failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead nodes that @@ -145,17 +145,6 @@ public class RestClient implements Closeable { return new RestClientBuilder(hostsToNodes(hosts)); } - /** - * Replaces the hosts with which the client communicates. - * - * @deprecated prefer {@link #setNodes(Collection)} because it allows you - * to set metadata for use with {@link NodeSelector}s - */ - @Deprecated - public void setHosts(HttpHost... hosts) { - setNodes(hostsToNodes(hosts)); - } - /** * Replaces the nodes with which the client communicates. */ @@ -251,234 +240,6 @@ public class RestClient implements Closeable { } } - /** - * Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response - * to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without parameters - * and request body. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param headers the optional request headers - * @return the response returned by Elasticsearch - * @throws IOException in case of a problem or the connection was aborted - * @throws ClientProtocolException in case of an http protocol error - * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error - * @deprecated prefer {@link #performRequest(Request)} - */ - @Deprecated - public Response performRequest(String method, String endpoint, Header... headers) throws IOException { - Request request = new Request(method, endpoint); - addHeaders(request, headers); - return performRequest(request); - } - - /** - * Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response - * to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without request body. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param params the query_string parameters - * @param headers the optional request headers - * @return the response returned by Elasticsearch - * @throws IOException in case of a problem or the connection was aborted - * @throws ClientProtocolException in case of an http protocol error - * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error - * @deprecated prefer {@link #performRequest(Request)} - */ - @Deprecated - public Response performRequest(String method, String endpoint, Map params, Header... headers) throws IOException { - Request request = new Request(method, endpoint); - addParameters(request, params); - addHeaders(request, headers); - return performRequest(request); - } - - /** - * Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response - * to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, HttpAsyncResponseConsumerFactory, Header...)} - * which doesn't require specifying an {@link HttpAsyncResponseConsumerFactory} instance, - * {@link HttpAsyncResponseConsumerFactory} will be used to create the needed instances of {@link HttpAsyncResponseConsumer}. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param params the query_string parameters - * @param entity the body of the request, null if not applicable - * @param headers the optional request headers - * @return the response returned by Elasticsearch - * @throws IOException in case of a problem or the connection was aborted - * @throws ClientProtocolException in case of an http protocol error - * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error - * @deprecated prefer {@link #performRequest(Request)} - */ - @Deprecated - public Response performRequest(String method, String endpoint, Map params, - HttpEntity entity, Header... headers) throws IOException { - Request request = new Request(method, endpoint); - addParameters(request, params); - request.setEntity(entity); - addHeaders(request, headers); - return performRequest(request); - } - - /** - * Sends a request to the Elasticsearch cluster that the client points to. Blocks until the request is completed and returns - * its response or fails by throwing an exception. Selects a host out of the provided ones in a round-robin fashion. Failing hosts - * are marked dead and retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times - * they previously failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead - * nodes that deserve a retry) are retried until one responds or none of them does, in which case an {@link IOException} will be thrown. - * - * This method works by performing an asynchronous call and waiting - * for the result. If the asynchronous call throws an exception we wrap - * it and rethrow it so that the stack trace attached to the exception - * contains the call site. While we attempt to preserve the original - * exception this isn't always possible and likely haven't covered all of - * the cases. You can get the original exception from - * {@link Exception#getCause()}. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param params the query_string parameters - * @param entity the body of the request, null if not applicable - * @param httpAsyncResponseConsumerFactory the {@link HttpAsyncResponseConsumerFactory} used to create one - * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the response body gets streamed from a non-blocking HTTP - * connection on the client side. - * @param headers the optional request headers - * @return the response returned by Elasticsearch - * @throws IOException in case of a problem or the connection was aborted - * @throws ClientProtocolException in case of an http protocol error - * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error - * @deprecated prefer {@link #performRequest(Request)} - */ - @Deprecated - public Response performRequest(String method, String endpoint, Map params, - HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, - Header... headers) throws IOException { - Request request = new Request(method, endpoint); - addParameters(request, params); - request.setEntity(entity); - setOptions(request, httpAsyncResponseConsumerFactory, headers); - return performRequest(request); - } - - /** - * Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead - * the provided {@link ResponseListener} will be notified upon completion or failure. Shortcut to - * {@link #performRequestAsync(String, String, Map, HttpEntity, ResponseListener, Header...)} but without parameters and request body. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param responseListener the {@link ResponseListener} to notify when the request is completed or fails - * @param headers the optional request headers - * @deprecated prefer {@link #performRequestAsync(Request, ResponseListener)} - */ - @Deprecated - public void performRequestAsync(String method, String endpoint, ResponseListener responseListener, Header... headers) { - Request request; - try { - request = new Request(method, endpoint); - addHeaders(request, headers); - } catch (Exception e) { - responseListener.onFailure(e); - return; - } - performRequestAsync(request, responseListener); - } - - /** - * Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead - * the provided {@link ResponseListener} will be notified upon completion or failure. Shortcut to - * {@link #performRequestAsync(String, String, Map, HttpEntity, ResponseListener, Header...)} but without request body. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param params the query_string parameters - * @param responseListener the {@link ResponseListener} to notify when the request is completed or fails - * @param headers the optional request headers - * @deprecated prefer {@link #performRequestAsync(Request, ResponseListener)} - */ - @Deprecated - public void performRequestAsync(String method, String endpoint, Map params, - ResponseListener responseListener, Header... headers) { - Request request; - try { - request = new Request(method, endpoint); - addParameters(request, params); - addHeaders(request, headers); - } catch (Exception e) { - responseListener.onFailure(e); - return; - } - performRequestAsync(request, responseListener); - } - - /** - * Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead - * the provided {@link ResponseListener} will be notified upon completion or failure. - * Shortcut to {@link #performRequestAsync(String, String, Map, HttpEntity, HttpAsyncResponseConsumerFactory, ResponseListener, - * Header...)} which doesn't require specifying an {@link HttpAsyncResponseConsumerFactory} instance, - * {@link HttpAsyncResponseConsumerFactory} will be used to create the needed instances of {@link HttpAsyncResponseConsumer}. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param params the query_string parameters - * @param entity the body of the request, null if not applicable - * @param responseListener the {@link ResponseListener} to notify when the request is completed or fails - * @param headers the optional request headers - * @deprecated prefer {@link #performRequestAsync(Request, ResponseListener)} - */ - @Deprecated - public void performRequestAsync(String method, String endpoint, Map params, - HttpEntity entity, ResponseListener responseListener, Header... headers) { - Request request; - try { - request = new Request(method, endpoint); - addParameters(request, params); - request.setEntity(entity); - addHeaders(request, headers); - } catch (Exception e) { - responseListener.onFailure(e); - return; - } - performRequestAsync(request, responseListener); - } - - /** - * Sends a request to the Elasticsearch cluster that the client points to. The request is executed asynchronously - * and the provided {@link ResponseListener} gets notified upon request completion or failure. - * Selects a host out of the provided ones in a round-robin fashion. Failing hosts are marked dead and retried after a certain - * amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously failed (the more failures, - * the later they will be retried). In case of failures all of the alive nodes (or dead nodes that deserve a retry) are retried - * until one responds or none of them does, in which case an {@link IOException} will be thrown. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param params the query_string parameters - * @param entity the body of the request, null if not applicable - * @param httpAsyncResponseConsumerFactory the {@link HttpAsyncResponseConsumerFactory} used to create one - * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the response body gets streamed from a non-blocking HTTP - * connection on the client side. - * @param responseListener the {@link ResponseListener} to notify when the request is completed or fails - * @param headers the optional request headers - * @deprecated prefer {@link #performRequestAsync(Request, ResponseListener)} - */ - @Deprecated - public void performRequestAsync(String method, String endpoint, Map params, - HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, - ResponseListener responseListener, Header... headers) { - Request request; - try { - request = new Request(method, endpoint); - addParameters(request, params); - request.setEntity(entity); - setOptions(request, httpAsyncResponseConsumerFactory, headers); - } catch (Exception e) { - responseListener.onFailure(e); - return; - } - performRequestAsync(request, responseListener); - } - void performRequestAsyncNoCatch(Request request, ResponseListener listener) throws IOException { Map requestParams = new HashMap<>(request.getParameters()); //ignore is a special parameter supported by the clients, shouldn't be sent to es @@ -1035,42 +796,4 @@ public class RestClient implements Closeable { itr.remove(); } } - - /** - * Add all headers from the provided varargs argument to a {@link Request}. This only exists - * to support methods that exist for backwards compatibility. - */ - @Deprecated - private static void addHeaders(Request request, Header... headers) { - setOptions(request, RequestOptions.DEFAULT.getHttpAsyncResponseConsumerFactory(), headers); - } - - /** - * Add all headers from the provided varargs argument to a {@link Request}. This only exists - * to support methods that exist for backwards compatibility. - */ - @Deprecated - private static void setOptions(Request request, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, - Header... headers) { - Objects.requireNonNull(headers, "headers cannot be null"); - RequestOptions.Builder options = request.getOptions().toBuilder(); - for (Header header : headers) { - Objects.requireNonNull(header, "header cannot be null"); - options.addHeader(header.getName(), header.getValue()); - } - options.setHttpAsyncResponseConsumerFactory(httpAsyncResponseConsumerFactory); - request.setOptions(options); - } - - /** - * Add all parameters from a map to a {@link Request}. This only exists - * to support methods that exist for backwards compatibility. - */ - @Deprecated - private static void addParameters(Request request, Map parameters) { - Objects.requireNonNull(parameters, "parameters cannot be null"); - for (Map.Entry entry : parameters.entrySet()) { - request.addParameter(entry.getKey(), entry.getValue()); - } - } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java index 6b5bb3c98ee..fb58f18d42a 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java @@ -45,7 +45,6 @@ import java.io.OutputStream; import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.Arrays; -import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -215,9 +214,15 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase { } final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header"); final int statusCode = randomStatusCode(getRandom()); + Request request = new Request(method, "/" + statusCode); + RequestOptions.Builder options = request.getOptions().toBuilder(); + for (Header header : requestHeaders) { + options.addHeader(header.getName(), header.getValue()); + } + request.setOptions(options); Response esResponse; try { - esResponse = restClient.performRequest(method, "/" + statusCode, Collections.emptyMap(), requestHeaders); + esResponse = restClient.performRequest(request); } catch (ResponseException e) { esResponse = e.getResponse(); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index cb326f4a24c..0c589e6a40c 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -59,7 +59,6 @@ import java.net.URI; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; -import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -69,7 +68,6 @@ import static java.util.Collections.singletonList; import static org.elasticsearch.client.RestClientTestUtil.getAllErrorStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.elasticsearch.client.RestClientTestUtil.getOkStatusCodes; -import static org.elasticsearch.client.RestClientTestUtil.randomHttpMethod; import static org.elasticsearch.client.RestClientTestUtil.randomStatusCode; import static org.elasticsearch.client.SyncResponseListenerTests.assertExceptionStackContainsCallingMethod; import static org.hamcrest.CoreMatchers.equalTo; @@ -192,7 +190,7 @@ public class RestClientSingleHostTests extends RestClientTestCase { public void testOkStatusCodes() throws IOException { for (String method : getHttpMethods()) { for (int okStatusCode : getOkStatusCodes()) { - Response response = performRequest(method, "/" + okStatusCode); + Response response = restClient.performRequest(new Request(method, "/" + okStatusCode)); assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode)); } } @@ -223,13 +221,11 @@ public class RestClientSingleHostTests extends RestClientTestCase { //error status codes should cause an exception to be thrown for (int errorStatusCode : getAllErrorStatusCodes()) { try { - Map params; - if (ignoreParam.isEmpty()) { - params = Collections.emptyMap(); - } else { - params = Collections.singletonMap("ignore", ignoreParam); + Request request = new Request(method, "/" + errorStatusCode); + if (false == ignoreParam.isEmpty()) { + request.addParameter("ignore", ignoreParam); } - Response response = performRequest(method, "/" + errorStatusCode, params); + Response response = restClient.performRequest(request); if (expectedIgnores.contains(errorStatusCode)) { //no exception gets thrown although we got an error status code, as it was configured to be ignored assertEquals(errorStatusCode, response.getStatusLine().getStatusCode()); @@ -256,14 +252,14 @@ public class RestClientSingleHostTests extends RestClientTestCase { for (String method : getHttpMethods()) { //IOExceptions should be let bubble up try { - performRequest(method, "/coe"); + restClient.performRequest(new Request(method, "/coe")); fail("request should have failed"); } catch(IOException e) { assertThat(e, instanceOf(ConnectTimeoutException.class)); } failureListener.assertCalled(singletonList(node)); try { - performRequest(method, "/soe"); + restClient.performRequest(new Request(method, "/soe")); fail("request should have failed"); } catch(IOException e) { assertThat(e, instanceOf(SocketTimeoutException.class)); @@ -313,48 +309,6 @@ public class RestClientSingleHostTests extends RestClientTestCase { } } - /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests}. - */ - @Deprecated - public void tesPerformRequestOldStyleNullHeaders() throws IOException { - String method = randomHttpMethod(getRandom()); - int statusCode = randomStatusCode(getRandom()); - try { - performRequest(method, "/" + statusCode, (Header[])null); - fail("request should have failed"); - } catch(NullPointerException e) { - assertEquals("request headers must not be null", e.getMessage()); - } - try { - performRequest(method, "/" + statusCode, (Header)null); - fail("request should have failed"); - } catch(NullPointerException e) { - assertEquals("request header must not be null", e.getMessage()); - } - } - - /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddParameters()}. - */ - @Deprecated - public void testPerformRequestOldStyleWithNullParams() throws IOException { - String method = randomHttpMethod(getRandom()); - int statusCode = randomStatusCode(getRandom()); - try { - restClient.performRequest(method, "/" + statusCode, (Map)null); - fail("request should have failed"); - } catch(NullPointerException e) { - assertEquals("parameters cannot be null", e.getMessage()); - } - try { - restClient.performRequest(method, "/" + statusCode, null, (HttpEntity)null); - fail("request should have failed"); - } catch(NullPointerException e) { - assertEquals("parameters cannot be null", e.getMessage()); - } - } - /** * End to end test for request and response headers. Exercises the mock http client ability to send back * whatever headers it has received. @@ -464,35 +418,4 @@ public class RestClientSingleHostTests extends RestClientTestCase { } return expectedRequest; } - - /** - * @deprecated prefer {@link RestClient#performRequest(Request)}. - */ - @Deprecated - private Response performRequest(String method, String endpoint, Header... headers) throws IOException { - return performRequest(method, endpoint, Collections.emptyMap(), headers); - } - - /** - * @deprecated prefer {@link RestClient#performRequest(Request)}. - */ - @Deprecated - private Response performRequest(String method, String endpoint, Map params, Header... headers) throws IOException { - int methodSelector; - if (params.isEmpty()) { - methodSelector = randomIntBetween(0, 2); - } else { - methodSelector = randomIntBetween(1, 2); - } - switch(methodSelector) { - case 0: - return restClient.performRequest(method, endpoint, headers); - case 1: - return restClient.performRequest(method, endpoint, params, headers); - case 2: - return restClient.performRequest(method, endpoint, params, (HttpEntity)null, headers); - default: - throw new UnsupportedOperationException(); - } - } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index ef94b70542f..4a037b18404 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -42,7 +42,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import static java.util.Collections.singletonList; -import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertSame; @@ -90,88 +89,6 @@ public class RestClientTests extends RestClientTestCase { } } - /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link #testPerformAsyncWithUnsupportedMethod()}. - */ - @Deprecated - public void testPerformAsyncOldStyleWithUnsupportedMethod() throws Exception { - final CountDownLatch latch = new CountDownLatch(1); - try (RestClient restClient = createRestClient()) { - restClient.performRequestAsync("unsupported", randomAsciiLettersOfLength(5), new ResponseListener() { - @Override - public void onSuccess(Response response) { - throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); - } - - @Override - public void onFailure(Exception exception) { - try { - assertThat(exception, instanceOf(UnsupportedOperationException.class)); - assertEquals("http method not supported: unsupported", exception.getMessage()); - } finally { - latch.countDown(); - } - } - }); - assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); - } - } - - /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddParameters()}. - */ - @Deprecated - public void testPerformOldStyleAsyncWithNullParams() throws Exception { - final CountDownLatch latch = new CountDownLatch(1); - try (RestClient restClient = createRestClient()) { - restClient.performRequestAsync(randomAsciiLettersOfLength(5), randomAsciiLettersOfLength(5), null, new ResponseListener() { - @Override - public void onSuccess(Response response) { - throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); - } - - @Override - public void onFailure(Exception exception) { - try { - assertThat(exception, instanceOf(NullPointerException.class)); - assertEquals("parameters cannot be null", exception.getMessage()); - } finally { - latch.countDown(); - } - } - }); - assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); - } - } - - /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests}. - */ - @Deprecated - public void testPerformOldStyleAsyncWithNullHeaders() throws Exception { - final CountDownLatch latch = new CountDownLatch(1); - try (RestClient restClient = createRestClient()) { - ResponseListener listener = new ResponseListener() { - @Override - public void onSuccess(Response response) { - throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); - } - - @Override - public void onFailure(Exception exception) { - try { - assertThat(exception, instanceOf(NullPointerException.class)); - assertEquals("header cannot be null", exception.getMessage()); - } finally { - latch.countDown(); - } - } - }; - restClient.performRequestAsync("GET", randomAsciiLettersOfLength(5), listener, (Header) null); - assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); - } - } - public void testPerformAsyncWithWrongEndpoint() throws Exception { final CountDownLatch latch = new CountDownLatch(1); try (RestClient restClient = createRestClient()) { @@ -195,33 +112,6 @@ public class RestClientTests extends RestClientTestCase { } } - /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link #testPerformAsyncWithWrongEndpoint()}. - */ - @Deprecated - public void testPerformAsyncOldStyleWithWrongEndpoint() throws Exception { - final CountDownLatch latch = new CountDownLatch(1); - try (RestClient restClient = createRestClient()) { - restClient.performRequestAsync("GET", "::http:///", new ResponseListener() { - @Override - public void onSuccess(Response response) { - throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); - } - - @Override - public void onFailure(Exception exception) { - try { - assertThat(exception, instanceOf(IllegalArgumentException.class)); - assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage()); - } finally { - latch.countDown(); - } - } - }); - assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); - } - } - public void testBuildUriLeavesPathUntouched() { final Map emptyMap = Collections.emptyMap(); { @@ -259,34 +149,6 @@ public class RestClientTests extends RestClientTestCase { } } - @Deprecated - public void testSetHostsWrongArguments() throws IOException { - try (RestClient restClient = createRestClient()) { - restClient.setHosts((HttpHost[]) null); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("hosts must not be null nor empty", e.getMessage()); - } - try (RestClient restClient = createRestClient()) { - restClient.setHosts(); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("hosts must not be null nor empty", e.getMessage()); - } - try (RestClient restClient = createRestClient()) { - restClient.setHosts((HttpHost) null); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("host cannot be null", e.getMessage()); - } - try (RestClient restClient = createRestClient()) { - restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201)); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("host cannot be null", e.getMessage()); - } - } - public void testSetNodesWrongArguments() throws IOException { try (RestClient restClient = createRestClient()) { restClient.setNodes(null); @@ -348,23 +210,6 @@ public class RestClientTests extends RestClientTestCase { } } - /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testConstructor()}. - */ - @Deprecated - public void testNullPath() throws IOException { - try (RestClient restClient = createRestClient()) { - for (String method : getHttpMethods()) { - try { - restClient.performRequest(method, null); - fail("path set to null should fail!"); - } catch (NullPointerException e) { - assertEquals("endpoint cannot be null", e.getMessage()); - } - } - } - } - public void testSelectHosts() throws IOException { Node n1 = new Node(new HttpHost("1"), null, null, "1", null, null); Node n2 = new Node(new HttpHost("2"), null, null, "2", null, null); diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index 41146e0b7ec..6ba69c5713c 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -16,9 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' @@ -55,7 +52,7 @@ dependencies { forbiddenApisMain { //client does not depend on server, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } forbiddenApisTest { @@ -63,7 +60,7 @@ forbiddenApisTest { bundledSignatures -= 'jdk-non-portable' bundledSignatures += 'jdk-internal' //client does not depend on server, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } dependencyLicenses { diff --git a/client/test/build.gradle b/client/test/build.gradle index cc69a1828dc..e66d2be57f1 100644 --- a/client/test/build.gradle +++ b/client/test/build.gradle @@ -16,10 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks -import org.gradle.api.JavaVersion - apply plugin: 'elasticsearch.build' targetCompatibility = JavaVersion.VERSION_1_7 @@ -36,7 +32,7 @@ dependencies { forbiddenApisMain { //client does not depend on core, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } forbiddenApisTest { @@ -44,7 +40,7 @@ forbiddenApisTest { bundledSignatures -= 'jdk-non-portable' bundledSignatures += 'jdk-internal' //client does not depend on core, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } // JarHell is part of es server, which we don't want to pull in diff --git a/client/transport/build.gradle b/client/transport/build.gradle index 944a038edd9..269a37105fb 100644 --- a/client/transport/build.gradle +++ b/client/transport/build.gradle @@ -16,9 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' @@ -47,8 +44,7 @@ dependencyLicenses { forbiddenApisTest { // we don't use the core test-framework, no lucene classes present so we don't want the es-test-signatures to // be pulled in - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), - PrecommitTasks.getResource('/forbidden/es-all-signatures.txt')] + replaceSignatureFiles 'jdk-signatures', 'es-all-signatures' } namingConventions { diff --git a/distribution/archives/integ-test-zip/build.gradle b/distribution/archives/integ-test-zip/build.gradle index 4a6dde5fc0c..4c2ac7d1cf4 100644 --- a/distribution/archives/integ-test-zip/build.gradle +++ b/distribution/archives/integ-test-zip/build.gradle @@ -1,2 +1,23 @@ -// This file is intentionally blank. All configuration of the -// distribution is done in the parent project. +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +integTestRunner { + systemProperty 'tests.logfile', + "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }.log" +} diff --git a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeNameInLogsIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeNameInLogsIT.java new file mode 100644 index 00000000000..2d57644f9a7 --- /dev/null +++ b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeNameInLogsIT.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest; + +import org.elasticsearch.common.logging.NodeNameInLogsIntegTestCase; + +import java.io.IOException; +import java.io.BufferedReader; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivilegedAction; + +public class NodeNameInLogsIT extends NodeNameInLogsIntegTestCase { + @Override + protected BufferedReader openReader(Path logFile) throws IOException { + return AccessController.doPrivileged((PrivilegedAction) () -> { + try { + return Files.newBufferedReader(logFile, StandardCharsets.UTF_8); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } +} diff --git a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseTests.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java similarity index 95% rename from distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseTests.java rename to distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java index fab809a51bc..ffd3a1f6c0c 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseTests.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java @@ -20,6 +20,7 @@ package org.elasticsearch.test.rest; import org.apache.http.util.EntityUtils; +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Response; @@ -37,7 +38,8 @@ import java.util.Map; /** * Tests that wait for refresh is fired if the index is closed. */ -public class WaitForRefreshAndCloseTests extends ESRestTestCase { +@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33533") +public class WaitForRefreshAndCloseIT extends ESRestTestCase { @Before public void setupIndex() throws IOException { try { @@ -53,7 +55,7 @@ public class WaitForRefreshAndCloseTests extends ESRestTestCase { @After public void cleanupIndex() throws IOException { - client().performRequest("DELETE", indexName()); + client().performRequest(new Request("DELETE", indexName())); } private String indexName() { diff --git a/distribution/archives/integ-test-zip/src/test/resources/plugin-security.policy b/distribution/archives/integ-test-zip/src/test/resources/plugin-security.policy new file mode 100644 index 00000000000..d0d865c4ede --- /dev/null +++ b/distribution/archives/integ-test-zip/src/test/resources/plugin-security.policy @@ -0,0 +1,4 @@ +grant { + // Needed to read the log file + permission java.io.FilePermission "${tests.logfile}", "read"; +}; diff --git a/distribution/build.gradle b/distribution/build.gradle index 675799c5b22..317ece6bf2b 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -23,6 +23,7 @@ import org.elasticsearch.gradle.NoticeTask import org.elasticsearch.gradle.test.RunTask import org.apache.tools.ant.filters.FixCrLfFilter +import java.nio.file.Files import java.nio.file.Path Collection distributions = project('archives').subprojects + project('packages').subprojects @@ -504,4 +505,16 @@ subprojects { } return result } + + ext.assertLinesInFile = { Path path, List expectedLines -> + final List actualLines = Files.readAllLines(path) + int line = 0 + for (final String expectedLine : expectedLines) { + final String actualLine = actualLines.get(line) + if (expectedLine != actualLine) { + throw new GradleException("expected line [${line + 1}] in [${path}] to be [${expectedLine}] but was [${actualLine}]") + } + line++ + } + } } diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index b515c606cc3..52334b72a76 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -50,8 +50,8 @@ subprojects { apply plugin: 'distribution' // Not published so no need to assemble - tasks.remove(assemble) - build.dependsOn.remove('assemble') + assemble.enabled = false + assemble.dependsOn.remove('buildBwcVersion') File checkoutDir = file("${buildDir}/bwc/checkout-${bwcBranch}") @@ -196,11 +196,19 @@ subprojects { } } - artifacts { - for (File artifactFile : artifactFiles) { - String artifactName = artifactFile.name.contains('oss') ? 'elasticsearch-oss' : 'elasticsearch' - String suffix = artifactFile.toString()[-3..-1] - 'default' file: artifactFile, name: artifactName, type: suffix, builtBy: buildBwcVersion + if (gradle.startParameter.taskNames == ["assemble"]) { + // Gradle needs the `artifacts` declaration, including `builtBy` bellow to make projects dependencies on this + // project work, but it will also trigger the build of these for the `assemble` task. + // Since these are only used for testing, we don't want to assemble them if `assemble` is the single command being + // ran. + logger.info("Skipping BWC builds since `assemble` is the only task name provided on the command line") + } else { + artifacts { + for (File artifactFile : artifactFiles) { + String artifactName = artifactFile.name.contains('oss') ? 'elasticsearch-oss' : 'elasticsearch' + String suffix = artifactFile.toString()[-3..-1] + 'default' file: artifactFile, name: artifactName, type: suffix, builtBy: buildBwcVersion + } } } } diff --git a/distribution/tools/java-version-checker/build.gradle b/distribution/tools/java-version-checker/build.gradle index ad9b56fec05..6d18b79d4bd 100644 --- a/distribution/tools/java-version-checker/build.gradle +++ b/distribution/tools/java-version-checker/build.gradle @@ -1,11 +1,11 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' targetCompatibility = JavaVersion.VERSION_1_7 // java_version_checker do not depend on core so only JDK signatures should be checked -forbiddenApisMain.signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +forbiddenApisMain { + replaceSignatureFiles 'jdk-signatures' +} test.enabled = false namingConventions.enabled = false diff --git a/distribution/tools/launchers/build.gradle b/distribution/tools/launchers/build.gradle index a774691b2eb..ca1aa6bcac9 100644 --- a/distribution/tools/launchers/build.gradle +++ b/distribution/tools/launchers/build.gradle @@ -17,8 +17,9 @@ * under the License. */ -import org.elasticsearch.gradle.precommit.PrecommitTasks -import org.gradle.api.JavaVersion + + +import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask apply plugin: 'elasticsearch.build' @@ -31,10 +32,9 @@ dependencies { archivesBaseName = 'elasticsearch-launchers' -// java_version_checker do not depend on core so only JDK signatures should be checked -List jdkSignatures = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] -forbiddenApisMain.signaturesURLs = jdkSignatures -forbiddenApisTest.signaturesURLs = jdkSignatures +tasks.withType(ForbiddenApisCliTask) { + replaceSignatureFiles 'jdk-signatures' +} namingConventions { testClass = 'org.elasticsearch.tools.launchers.LaunchersTestCase' diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index c47786299bc..38be8db42ff 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -39,3 +39,9 @@ test { // TODO: find a way to add permissions for the tests in this module systemProperty 'tests.security.manager', 'false' } + +if (project.inFipsJvm) { + // FIPS JVM includes manny classes from bouncycastle which count as jar hell for the third party audit, + // rather than provide a long list of exclusions, disable the check on FIPS. + thirdPartyAudit.enabled = false +} diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index 3c54afb92c7..dd19594d29b 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -21,7 +21,7 @@ package org.elasticsearch.plugins; import joptsimple.OptionSet; import joptsimple.OptionSpec; -import org.apache.lucene.search.spell.LevensteinDistance; +import org.apache.lucene.search.spell.LevenshteinDistance; import org.apache.lucene.util.CollectionUtil; import org.bouncycastle.bcpg.ArmoredInputStream; import org.bouncycastle.jce.provider.BouncyCastleProvider; @@ -355,7 +355,7 @@ class InstallPluginCommand extends EnvironmentAwareCommand { /** Returns all the official plugin names that look similar to pluginId. **/ private List checkMisspelledPlugin(String pluginId) { - LevensteinDistance ld = new LevensteinDistance(); + LevenshteinDistance ld = new LevenshteinDistance(); List> scoredKeys = new ArrayList<>(); for (String officialPlugin : OFFICIAL_PLUGINS) { float distance = ld.getDistance(pluginId, officialPlugin); diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 6e127a6ccfc..f0303323d85 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,7 +1,7 @@ :version: 7.0.0-alpha1 :major-version: 7.x -:lucene_version: 7.5.0 -:lucene_version_path: 7_5_0 +:lucene_version: 8.0.0 +:lucene_version_path: 8_0_0 :branch: master :jdk: 1.8.0_131 :jdk_major: 8 diff --git a/docs/build.gradle b/docs/build.gradle index 8ee5c8a8e53..c6a7a8d4837 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -19,6 +19,22 @@ apply plugin: 'elasticsearch.docs-test' +/* List of files that have snippets that will not work until platinum tests can occur ... */ +buildRestTests.expectedUnconvertedCandidates = [ + 'reference/ml/transforms.asciidoc', + 'reference/ml/apis/delete-calendar-event.asciidoc', + 'reference/ml/apis/get-bucket.asciidoc', + 'reference/ml/apis/get-category.asciidoc', + 'reference/ml/apis/get-influencer.asciidoc', + 'reference/ml/apis/get-job-stats.asciidoc', + 'reference/ml/apis/get-overall-buckets.asciidoc', + 'reference/ml/apis/get-record.asciidoc', + 'reference/ml/apis/get-snapshot.asciidoc', + 'reference/ml/apis/post-data.asciidoc', + 'reference/ml/apis/revert-snapshot.asciidoc', + 'reference/ml/apis/update-snapshot.asciidoc', +] + integTestCluster { /* Enable regexes in painless so our tests don't complain about example * snippets that use them. */ @@ -41,9 +57,6 @@ integTestCluster { // TODO: remove this for 7.0, this exists to allow the doc examples in 6.x to continue using the defaults systemProperty 'es.scripting.use_java_time', 'false' systemProperty 'es.scripting.update.ctx_in_params', 'false' - - // TODO: remove this deprecation compatibility setting for 7.0 - systemProperty 'es.aggregations.enable_scripted_metric_agg_param', 'false' } // remove when https://github.com/elastic/elasticsearch/issues/31305 is fixed @@ -77,6 +90,17 @@ buildRestTests.docs = fileTree(projectDir) { exclude 'build' // Just syntax examples exclude 'README.asciidoc' + // Broken code snippet tests + exclude 'reference/rollup/rollup-getting-started.asciidoc' + exclude 'reference/rollup/apis/rollup-job-config.asciidoc' + exclude 'reference/rollup/apis/rollup-index-caps.asciidoc' + exclude 'reference/rollup/apis/put-job.asciidoc' + exclude 'reference/rollup/apis/stop-job.asciidoc' + exclude 'reference/rollup/apis/start-job.asciidoc' + exclude 'reference/rollup/apis/rollup-search.asciidoc' + exclude 'reference/rollup/apis/delete-job.asciidoc' + exclude 'reference/rollup/apis/get-job.asciidoc' + exclude 'reference/rollup/apis/rollup-caps.asciidoc' } listSnippets.docs = buildRestTests.docs @@ -597,3 +621,480 @@ buildRestTests.setups['library'] = ''' {"name": "The Moon is a Harsh Mistress", "author": "Robert A. Heinlein", "release_date": "1966-04-01", "page_count": 288} ''' +buildRestTests.setups['sensor_rollup_job'] = ''' + - do: + indices.create: + index: sensor-1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + timestamp: + type: date + temperature: + type: long + voltage: + type: float + node: + type: keyword + - do: + xpack.rollup.put_job: + id: "sensor" + body: > + { + "index_pattern": "sensor-*", + "rollup_index": "sensor_rollup", + "cron": "*/30 * * * * ?", + "page_size" :1000, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["node"] + } + }, + "metrics": [ + { + "field": "temperature", + "metrics": ["min", "max", "sum"] + }, + { + "field": "voltage", + "metrics": ["avg"] + } + ] + } +''' +buildRestTests.setups['sensor_started_rollup_job'] = ''' + - do: + indices.create: + index: sensor-1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + timestamp: + type: date + temperature: + type: long + voltage: + type: float + node: + type: keyword + + - do: + bulk: + index: sensor-1 + type: _doc + refresh: true + body: | + {"index":{}} + {"timestamp": 1516729294000, "temperature": 200, "voltage": 5.2, "node": "a"} + {"index":{}} + {"timestamp": 1516642894000, "temperature": 201, "voltage": 5.8, "node": "b"} + {"index":{}} + {"timestamp": 1516556494000, "temperature": 202, "voltage": 5.1, "node": "a"} + {"index":{}} + {"timestamp": 1516470094000, "temperature": 198, "voltage": 5.6, "node": "b"} + {"index":{}} + {"timestamp": 1516383694000, "temperature": 200, "voltage": 4.2, "node": "c"} + {"index":{}} + {"timestamp": 1516297294000, "temperature": 202, "voltage": 4.0, "node": "c"} + + - do: + xpack.rollup.put_job: + id: "sensor" + body: > + { + "index_pattern": "sensor-*", + "rollup_index": "sensor_rollup", + "cron": "* * * * * ?", + "page_size" :1000, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["node"] + } + }, + "metrics": [ + { + "field": "temperature", + "metrics": ["min", "max", "sum"] + }, + { + "field": "voltage", + "metrics": ["avg"] + } + ] + } + - do: + xpack.rollup.start_job: + id: "sensor" +''' + +buildRestTests.setups['sensor_index'] = ''' + - do: + indices.create: + index: sensor-1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + timestamp: + type: date + temperature: + type: long + voltage: + type: float + node: + type: keyword + load: + type: double + net_in: + type: long + net_out: + type: long + hostname: + type: keyword + datacenter: + type: keyword +''' + +buildRestTests.setups['sensor_prefab_data'] = ''' + - do: + indices.create: + index: sensor-1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + timestamp: + type: date + temperature: + type: long + voltage: + type: float + node: + type: keyword + - do: + indices.create: + index: sensor_rollup + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + node.terms.value: + type: keyword + temperature.sum.value: + type: double + temperature.max.value: + type: double + temperature.min.value: + type: double + timestamp.date_histogram.time_zone: + type: keyword + timestamp.date_histogram.interval: + type: keyword + timestamp.date_histogram.timestamp: + type: date + timestamp.date_histogram._count: + type: long + voltage.avg.value: + type: double + voltage.avg._count: + type: long + _rollup.id: + type: keyword + _rollup.version: + type: long + _meta: + _rollup: + sensor: + cron: "* * * * * ?" + rollup_index: "sensor_rollup" + index_pattern: "sensor-*" + timeout: "20s" + page_size: 1000 + groups: + date_histogram: + delay: "7d" + field: "timestamp" + interval: "1h" + time_zone: "UTC" + terms: + fields: + - "node" + id: sensor + metrics: + - field: "temperature" + metrics: + - min + - max + - sum + - field: "voltage" + metrics: + - avg + + - do: + bulk: + index: sensor_rollup + type: _doc + refresh: true + body: | + {"index":{}} + {"node.terms.value":"b","temperature.sum.value":201.0,"temperature.max.value":201.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":201.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.800000190734863,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516640400000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"c","temperature.sum.value":200.0,"temperature.max.value":200.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":200.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":4.199999809265137,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516381200000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"a","temperature.sum.value":202.0,"temperature.max.value":202.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":202.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.099999904632568,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516554000000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"a","temperature.sum.value":200.0,"temperature.max.value":200.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":200.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.199999809265137,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516726800000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"b","temperature.sum.value":198.0,"temperature.max.value":198.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":198.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.599999904632568,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516467600000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"c","temperature.sum.value":202.0,"temperature.max.value":202.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":202.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":4.0,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516294800000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + +''' +buildRestTests.setups['sample_job'] = ''' + - do: + xpack.ml.put_job: + job_id: "sample_job" + body: > + { + "description" : "Very basic job", + "analysis_config" : { + "bucket_span":"10m", + "detectors" :[ + { + "function": "count" + } + ]}, + "data_description" : { + "time_field":"timestamp", + "time_format": "epoch_ms" + } + } +''' +buildRestTests.setups['farequote_index'] = ''' + - do: + indices.create: + index: farequote + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + metric: + properties: + time: + type: date + responsetime: + type: float + airline: + type: keyword + doc_count: + type: integer +''' +buildRestTests.setups['farequote_data'] = buildRestTests.setups['farequote_index'] + ''' + - do: + bulk: + index: farequote + type: metric + refresh: true + body: | + {"index": {"_id":"1"}} + {"airline":"JZA","responsetime":990.4628,"time":"2016-02-07T00:00:00+0000", "doc_count": 5} + {"index": {"_id":"2"}} + {"airline":"JBU","responsetime":877.5927,"time":"2016-02-07T00:00:00+0000", "doc_count": 23} + {"index": {"_id":"3"}} + {"airline":"KLM","responsetime":1355.4812,"time":"2016-02-07T00:00:00+0000", "doc_count": 42} +''' +buildRestTests.setups['farequote_job'] = buildRestTests.setups['farequote_data'] + ''' + - do: + xpack.ml.put_job: + job_id: "farequote" + body: > + { + "analysis_config": { + "bucket_span": "60m", + "detectors": [{ + "function": "mean", + "field_name": "responsetime", + "by_field_name": "airline" + }], + "summary_count_field_name": "doc_count" + }, + "data_description": { + "time_field": "time" + } + } +''' +buildRestTests.setups['farequote_datafeed'] = buildRestTests.setups['farequote_job'] + ''' + - do: + xpack.ml.put_datafeed: + datafeed_id: "datafeed-farequote" + body: > + { + "job_id":"farequote", + "indexes":"farequote" + } +''' +buildRestTests.setups['server_metrics_index'] = ''' + - do: + indices.create: + index: server-metrics + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + metric: + properties: + timestamp: + type: date + total: + type: long +''' +buildRestTests.setups['server_metrics_data'] = buildRestTests.setups['server_metrics_index'] + ''' + - do: + bulk: + index: server-metrics + type: metric + refresh: true + body: | + {"index": {"_id":"1177"}} + {"timestamp":"2017-03-23T13:00:00","total":40476} + {"index": {"_id":"1178"}} + {"timestamp":"2017-03-23T13:00:00","total":15287} + {"index": {"_id":"1179"}} + {"timestamp":"2017-03-23T13:00:00","total":-776} + {"index": {"_id":"1180"}} + {"timestamp":"2017-03-23T13:00:00","total":11366} + {"index": {"_id":"1181"}} + {"timestamp":"2017-03-23T13:00:00","total":3606} + {"index": {"_id":"1182"}} + {"timestamp":"2017-03-23T13:00:00","total":19006} + {"index": {"_id":"1183"}} + {"timestamp":"2017-03-23T13:00:00","total":38613} + {"index": {"_id":"1184"}} + {"timestamp":"2017-03-23T13:00:00","total":19516} + {"index": {"_id":"1185"}} + {"timestamp":"2017-03-23T13:00:00","total":-258} + {"index": {"_id":"1186"}} + {"timestamp":"2017-03-23T13:00:00","total":9551} + {"index": {"_id":"1187"}} + {"timestamp":"2017-03-23T13:00:00","total":11217} + {"index": {"_id":"1188"}} + {"timestamp":"2017-03-23T13:00:00","total":22557} + {"index": {"_id":"1189"}} + {"timestamp":"2017-03-23T13:00:00","total":40508} + {"index": {"_id":"1190"}} + {"timestamp":"2017-03-23T13:00:00","total":11887} + {"index": {"_id":"1191"}} + {"timestamp":"2017-03-23T13:00:00","total":31659} +''' +buildRestTests.setups['server_metrics_job'] = buildRestTests.setups['server_metrics_data'] + ''' + - do: + xpack.ml.put_job: + job_id: "total-requests" + body: > + { + "description" : "Total sum of requests", + "analysis_config" : { + "bucket_span":"10m", + "detectors" :[ + { + "detector_description": "Sum of total", + "function": "sum", + "field_name": "total" + } + ]}, + "data_description" : { + "time_field":"timestamp", + "time_format": "epoch_ms" + } + } +''' +buildRestTests.setups['server_metrics_datafeed'] = buildRestTests.setups['server_metrics_job'] + ''' + - do: + xpack.ml.put_datafeed: + datafeed_id: "datafeed-total-requests" + body: > + { + "job_id":"total-requests", + "indexes":"server-metrics" + } +''' +buildRestTests.setups['server_metrics_openjob'] = buildRestTests.setups['server_metrics_datafeed'] + ''' + - do: + xpack.ml.open_job: + job_id: "total-requests" +''' +buildRestTests.setups['server_metrics_startdf'] = buildRestTests.setups['server_metrics_openjob'] + ''' + - do: + xpack.ml.start_datafeed: + datafeed_id: "datafeed-total-requests" +''' +buildRestTests.setups['calendar_outages'] = ''' + - do: + xpack.ml.put_calendar: + calendar_id: "planned-outages" +''' +buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['calendar_outages'] + ''' + - do: + xpack.ml.post_calendar_events: + calendar_id: "planned-outages" + body: > + { "description": "event 1", "start_time": "2017-12-01T00:00:00Z", "end_time": "2017-12-02T00:00:00Z", "calendar_id": "planned-outages" } + + +''' +buildRestTests.setups['calendar_outages_openjob'] = buildRestTests.setups['server_metrics_openjob'] + ''' + - do: + xpack.ml.put_calendar: + calendar_id: "planned-outages" +''' +buildRestTests.setups['calendar_outages_addjob'] = buildRestTests.setups['server_metrics_openjob'] + ''' + - do: + xpack.ml.put_calendar: + calendar_id: "planned-outages" + body: > + { + "job_ids": ["total-requests"] + } +''' +buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['calendar_outages_addjob'] + ''' + - do: + xpack.ml.post_calendar_events: + calendar_id: "planned-outages" + body: > + { "events" : [ + { "description": "event 1", "start_time": "1513641600000", "end_time": "1513728000000"}, + { "description": "event 2", "start_time": "1513814400000", "end_time": "1513900800000"}, + { "description": "event 3", "start_time": "1514160000000", "end_time": "1514246400000"} + ]} +''' + + diff --git a/docs/community-clients/index.asciidoc b/docs/community-clients/index.asciidoc index e28ec84f087..3266d3e365c 100644 --- a/docs/community-clients/index.asciidoc +++ b/docs/community-clients/index.asciidoc @@ -132,6 +132,9 @@ The following project appears to be abandoned: * https://github.com/mbuhot/eskotlin[ES Kotlin]: Elasticsearch Query DSL for kotlin based on the {client}/java-api/current/index.html[official Elasticsearch Java client]. + +* https://github.com/jillesvangurp/es-kotlin-wrapper-client[ES Kotlin Wrapper Client]: + Kotlin extension functions and abstractions for the {client}/java-api/current/index.html[official Elasticsearch Highlevel Client]. Aims to reduce the amount of boilerplate needed to do searches, bulk indexing and other common things users do with the client. [[lua]] == Lua diff --git a/docs/java-rest/high-level/document/delete-by-query.asciidoc b/docs/java-rest/high-level/document/delete-by-query.asciidoc new file mode 100644 index 00000000000..5ec246a9121 --- /dev/null +++ b/docs/java-rest/high-level/document/delete-by-query.asciidoc @@ -0,0 +1,163 @@ +[[java-rest-high-document-delete-by-query]] +=== Delete By Query API + +[[java-rest-high-document-delete-by-query-request]] +==== Delete By Query Request + +A `DeleteByQueryRequest` can be used to delete documents from an index. It requires an existing index (or a set of indices) +on which deletion is to be performed. + +The simplest form of a `DeleteByQueryRequest` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request] +-------------------------------------------------- +<1> Creates the `DeleteByQueryRequest` on a set of indices. + +By default version conflicts abort the `DeleteByQueryRequest` process but you can just count them by settings it to +`proceed` in the request body + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request-conflicts] +-------------------------------------------------- +<1> Set `proceed` on version conflict + +You can limit the documents by adding a type to the source or by adding a query. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request-typeOrQuery] +-------------------------------------------------- +<1> Only copy `doc` type +<2> Only copy documents which have field `user` set to `kimchy` + +It’s also possible to limit the number of processed documents by setting size. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request-size] +-------------------------------------------------- +<1> Only copy 10 documents + +By default `DeleteByQueryRequest` uses batches of 1000. You can change the batch size with `setBatchSize`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request-scrollSize] +-------------------------------------------------- +<1> Use batches of 100 documents + +`DeleteByQueryRequest` also helps in automatically parallelizing using `sliced-scroll` to +slice on `_uid`. Use `setSlices` to specify the number of slices to use. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request-slices] +-------------------------------------------------- +<1> set number of slices to use + +`DeleteByQueryRequest` uses the `scroll` parameter to control how long it keeps the "search context" alive. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request-scroll] +-------------------------------------------------- +<1> set scroll time + +If you provide routing then the routing is copied to the scroll query, limiting the process to the shards that match +that routing value. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request-routing] +-------------------------------------------------- +<1> set routing + + +==== Optional arguments +In addition to the options above the following arguments can optionally be also provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the delete by query request to be performed as a `TimeValue` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request-refresh] +-------------------------------------------------- +<1> Refresh index after calling delete by query + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request-indicesOptions] +-------------------------------------------------- +<1> Set indices options + + +[[java-rest-high-document-delete-by-query-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-execute] +-------------------------------------------------- + +[[java-rest-high-document-delete-by-query-async]] +==== Asynchronous Execution + +The asynchronous execution of an delete by query request requires both the `DeleteByQueryRequest` +instance and an `ActionListener` instance to be passed to the asynchronous +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-execute-async] +-------------------------------------------------- +<1> The `DeleteByQueryRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `BulkByScrollResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument and contains a list of individual results for each +operation that was executed. Note that one or more operations might have +failed while the others have been successfully executed. +<2> Called when the whole `DeleteByQueryRequest` fails. In this case the raised +exception is provided as an argument and no operation has been executed. + +[[java-rest-high-document-delete-by-query-execute-listener-response]] +==== Delete By Query Response + +The returned `BulkByScrollResponse` contains information about the executed operations and + allows to iterate over each result as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-response] +-------------------------------------------------- +<1> Get total time taken +<2> Check if the request timed out +<3> Get total number of docs processed +<4> Number of docs that were deleted +<5> Number of batches that were executed +<6> Number of skipped docs +<7> Number of version conflicts +<8> Number of times request had to retry bulk index operations +<9> Number of times request had to retry search operations +<10> The total time this request has throttled itself not including the current throttle time if it is currently sleeping +<11> Remaining delay of any current throttle sleep or 0 if not sleeping +<12> Failures during search phase +<13> Failures during bulk index operation diff --git a/docs/java-rest/high-level/document/reindex.asciidoc b/docs/java-rest/high-level/document/reindex.asciidoc new file mode 100644 index 00000000000..b6d98b42dc5 --- /dev/null +++ b/docs/java-rest/high-level/document/reindex.asciidoc @@ -0,0 +1,215 @@ +[[java-rest-high-document-reindex]] +=== Reindex API + +[[java-rest-high-document-reindex-request]] +==== Reindex Request + +A `ReindexRequest` can be used to copy documents from one or more indexes into a destination index. + +It requires an existing source index and a target index which may or may not exist pre-request. Reindex does not attempt +to set up the destination index. It does not copy the settings of the source index. You should set up the destination +index prior to running a _reindex action, including setting up mappings, shard counts, replicas, etc. + +The simplest form of a `ReindexRequest` looks like follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request] +-------------------------------------------------- +<1> Creates the `ReindexRequest` +<2> Adds a list of sources to copy from +<3> Adds the destination index + +The `dest` element can be configured like the index API to control optimistic concurrency control. Just leaving out +`versionType` (as above) or setting it to internal will cause Elasticsearch to blindly dump documents into the target. +Setting `versionType` to external will cause Elasticsearch to preserve the version from the source, create any documents +that are missing, and update any documents that have an older version in the destination index than they do in the +source index. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-versionType] +-------------------------------------------------- +<1> Set the versionType to `EXTERNAL` + +Setting `opType` to `create` will cause `_reindex` to only create missing documents in the target index. All existing +documents will cause a version conflict. The default `opType` is `index`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-opType] +-------------------------------------------------- +<1> Set the opType to `create` + +By default version conflicts abort the `_reindex` process but you can just count them by settings it to `proceed` +in the request body + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-conflicts] +-------------------------------------------------- +<1> Set `proceed` on version conflict + +You can limit the documents by adding a type to the source or by adding a query. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-typeOrQuery] +-------------------------------------------------- +<1> Only copy `doc` type +<2> Only copy documents which have field `user` set to `kimchy` + +It’s also possible to limit the number of processed documents by setting size. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-size] +-------------------------------------------------- +<1> Only copy 10 documents + +By default `_reindex` uses batches of 1000. You can change the batch size with `sourceBatchSize`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-sourceSize] +-------------------------------------------------- +<1> Use batches of 100 documents + +Reindex can also use the ingest feature by specifying a `pipeline`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-pipeline] +-------------------------------------------------- +<1> set pipeline to `my_pipeline` + +If you want a particular set of documents from the source index you’ll need to use sort. If possible, prefer a more +selective query to size and sort. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-sort] +-------------------------------------------------- +<1> add descending sort to`field1` +<2> add ascending sort to `field2` + +`ReindexRequest` also supports a `script` that modifies the document. It allows you to also change the document's +metadata. The following example illustrates that. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-script] +-------------------------------------------------- +<1> `setScript` to increment the `likes` field on all documents with user `kimchy`. + +`ReindexRequest` supports reindexing from a remote Elasticsearch cluster. When using a remote cluster the query should be +specified inside the `RemoteInfo` object and not using `setSourceQuery`. If both the remote info and the source query are +set it results in a validation error during the request. The reason for this is that the remote Elasticsearch may not +understand queries built by the modern query builders. The remote cluster support works all the way back to Elasticsearch +0.90 and the query language has changed since then. When reaching older versions, it is safer to write the query by hand +in JSON. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-remote] +-------------------------------------------------- +<1> set remote elastic cluster + +`ReindexRequest` also helps in automatically parallelizing using `sliced-scroll` to +slice on `_uid`. Use `setSlices` to specify the number of slices to use. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-slices] +-------------------------------------------------- +<1> set number of slices to use + +`ReindexRequest` uses the `scroll` parameter to control how long it keeps the "search context" alive. +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-scroll] +-------------------------------------------------- +<1> set scroll time + + +==== Optional arguments +In addition to the options above the following arguments can optionally be also provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the reindex request to be performed as a `TimeValue` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-refresh] +-------------------------------------------------- +<1> Refresh index after calling reindex + + +[[java-rest-high-document-reindex-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-execute] +-------------------------------------------------- + +[[java-rest-high-document-reindex-async]] +==== Asynchronous Execution + +The asynchronous execution of a reindex request requires both the `ReindexRequest` +instance and an `ActionListener` instance to be passed to the asynchronous +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-execute-async] +-------------------------------------------------- +<1> The `ReindexRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `BulkByScrollResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument and contains a list of individual results for each +operation that was executed. Note that one or more operations might have +failed while the others have been successfully executed. +<2> Called when the whole `ReindexRequest` fails. In this case the raised +exception is provided as an argument and no operation has been executed. + +[[java-rest-high-document-reindex-response]] +==== Reindex Response + +The returned `BulkByScrollResponse` contains information about the executed operations and + allows to iterate over each result as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-response] +-------------------------------------------------- +<1> Get total time taken +<2> Check if the request timed out +<3> Get total number of docs processed +<4> Number of docs that were updated +<5> Number of docs that were created +<6> Number of docs that were deleted +<7> Number of batches that were executed +<8> Number of skipped docs +<9> Number of version conflicts +<10> Number of times request had to retry bulk index operations +<11> Number of times request had to retry search operations +<12> The total time this request has throttled itself not including the current throttle time if it is currently sleeping +<13> Remaining delay of any current throttle sleep or 0 if not sleeping +<14> Failures during search phase +<15> Failures during bulk index operation diff --git a/docs/java-rest/high-level/document/update-by-query.asciidoc b/docs/java-rest/high-level/document/update-by-query.asciidoc new file mode 100644 index 00000000000..324385a442b --- /dev/null +++ b/docs/java-rest/high-level/document/update-by-query.asciidoc @@ -0,0 +1,181 @@ +[[java-rest-high-document-update-by-query]] +=== Update By Query API + +[[java-rest-high-document-update-by-query-request]] +==== Update By Query Request + +A `UpdateByQueryRequest` can be used to update documents in an index. + +It requires an existing index (or a set of indices) on which the update is to be performed. + +The simplest form of a `UpdateByQueryRequest` looks like follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request] +-------------------------------------------------- +<1> Creates the `UpdateByQueryRequest` on a set of indices. + +By default version conflicts abort the `UpdateByQueryRequest` process but you can just count them by settings it to +`proceed` in the request body + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-conflicts] +-------------------------------------------------- +<1> Set `proceed` on version conflict + +You can limit the documents by adding a type to the source or by adding a query. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-typeOrQuery] +-------------------------------------------------- +<1> Only copy `doc` type +<2> Only copy documents which have field `user` set to `kimchy` + +It’s also possible to limit the number of processed documents by setting size. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-size] +-------------------------------------------------- +<1> Only copy 10 documents + +By default `UpdateByQueryRequest` uses batches of 1000. You can change the batch size with `setBatchSize`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-scrollSize] +-------------------------------------------------- +<1> Use batches of 100 documents + +Update by query can also use the ingest feature by specifying a `pipeline`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-pipeline] +-------------------------------------------------- +<1> set pipeline to `my_pipeline` + +`UpdateByQueryRequest` also supports a `script` that modifies the document. The following example illustrates that. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-script] +-------------------------------------------------- +<1> `setScript` to increment the `likes` field on all documents with user `kimchy`. + +`UpdateByQueryRequest` also helps in automatically parallelizing using `sliced-scroll` to +slice on `_uid`. Use `setSlices` to specify the number of slices to use. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-slices] +-------------------------------------------------- +<1> set number of slices to use + +`UpdateByQueryRequest` uses the `scroll` parameter to control how long it keeps the "search context" alive. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-scroll] +-------------------------------------------------- +<1> set scroll time + +If you provide routing then the routing is copied to the scroll query, limiting the process to the shards that match +that routing value. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-routing] +-------------------------------------------------- +<1> set routing + + +==== Optional arguments +In addition to the options above the following arguments can optionally be also provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the update by query request to be performed as a `TimeValue` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-refresh] +-------------------------------------------------- +<1> Refresh index after calling update by query + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-indicesOptions] +-------------------------------------------------- +<1> Set indices options + + +[[java-rest-high-document-update-by-query-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-execute] +-------------------------------------------------- + +[[java-rest-high-document-update-by-query-async]] +==== Asynchronous Execution + +The asynchronous execution of an update by query request requires both the `UpdateByQueryRequest` +instance and an `ActionListener` instance to be passed to the asynchronous +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-execute-async] +-------------------------------------------------- +<1> The `UpdateByQueryRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `BulkByScrollResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument and contains a list of individual results for each +operation that was executed. Note that one or more operations might have +failed while the others have been successfully executed. +<2> Called when the whole `UpdateByQueryRequest` fails. In this case the raised +exception is provided as an argument and no operation has been executed. + +[[java-rest-high-document-update-by-query-execute-listener-response]] +==== Update By Query Response + +The returned `BulkByScrollResponse` contains information about the executed operations and + allows to iterate over each result as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-response] +-------------------------------------------------- +<1> Get total time taken +<2> Check if the request timed out +<3> Get total number of docs processed +<4> Number of docs that were updated +<5> Number of docs that were deleted +<6> Number of batches that were executed +<7> Number of skipped docs +<8> Number of version conflicts +<9> Number of times request had to retry bulk index operations +<10> Number of times request had to retry search operations +<11> The total time this request has throttled itself not including the current throttle time if it is currently sleeping +<12> Remaining delay of any current throttle sleep or 0 if not sleeping +<13> Failures during search phase +<14> Failures during bulk index operation diff --git a/docs/java-rest/high-level/graph/explore.asciidoc b/docs/java-rest/high-level/graph/explore.asciidoc new file mode 100644 index 00000000000..f2718209f4b --- /dev/null +++ b/docs/java-rest/high-level/graph/explore.asciidoc @@ -0,0 +1,53 @@ +[[java-rest-high-x-pack-graph-explore]] +=== X-Pack Graph explore API + +[[java-rest-high-x-pack-graph-explore-execution]] +==== Initial request + +Graph queries are executed using the `explore()` method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/GraphDocumentationIT.java[x-pack-graph-explore-request] +-------------------------------------------------- +<1> In this example we seed the exploration with a query to find messages mentioning the mysterious `projectx` +<2> What we want to discover in these messages are the ids of `participants` in the communications and the md5 hashes +of any attached files. In each case, we want to find people or files that have had at least one document connecting them +to projectx. +<3> The next "hop" in the graph exploration is to find the people who have shared several messages with the people or files +discovered in the previous hop (the projectx conspirators). The `minDocCount` control is used here to ensure the people +discovered have had at least 5 communications with projectx entities. Note we could also supply a "guiding query" here e.g. a +date range to consider only recent communications but we pass null to consider all connections. +<4> Finally we call the graph explore API with the GraphExploreRequest object. + + +==== Response + +Graph responses consist of Vertex and Connection objects (aka "nodes" and "edges" respectively): + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/GraphDocumentationIT.java[x-pack-graph-explore-response] +-------------------------------------------------- +<1> Each Vertex is a unique term (a combination of fieldname and term value). The "hopDepth" property tells us at which point in the +requested exploration this term was first discovered. +<2> Each Connection is a pair of Vertex objects and includes a docCount property telling us how many times these two +Vertex terms have been sighted together + + +[[java-rest-high-x-pack-graph-expand-execution]] +==== Expanding a client-side Graph + +Typically once an application has rendered an initial GraphExploreResponse as a collection of vertices and connecting lines (graph visualization toolkits such as D3, sigma.js or Keylines help here) the next step a user may want to do is "expand". This involves finding new vertices that might be connected to the existing ones currently shown. + +To do this we use the same `explore` method but our request contains details about which vertices to expand from and which vertices to avoid re-discovering. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/GraphDocumentationIT.java[x-pack-graph-explore-expand] +-------------------------------------------------- +<1> Unlike the initial request we do not need to pass a starting query +<2> In the first hop which represents our "from" vertices we explicitly list the terms that we already have on-screen and want to expand by using the `addInclude` filter. +We can supply a boost for those terms that are considered more important to follow than others but here we select a common value of 1 for all. +<3> When defining the second hop which represents the "to" vertices we hope to discover we explicitly list the terms that we already know about using the `addExclude` filter + diff --git a/docs/java-rest/high-level/licensing/put-license.asciidoc b/docs/java-rest/high-level/licensing/put-license.asciidoc index a270d658ddd..945d447317b 100644 --- a/docs/java-rest/high-level/licensing/put-license.asciidoc +++ b/docs/java-rest/high-level/licensing/put-license.asciidoc @@ -10,7 +10,7 @@ The license can be added or updated using the `putLicense()` method: -------------------------------------------------- include-tagged::{doc-tests}/LicensingDocumentationIT.java[put-license-execute] -------------------------------------------------- -<1> Set the categories of information to retrieve. The the default is to +<1> Set the categories of information to retrieve. The default is to return no information which is useful for checking if {xpack} is installed but not much else. <2> A JSON document containing the license information. diff --git a/docs/java-rest/high-level/migration.asciidoc b/docs/java-rest/high-level/migration.asciidoc index ad4e0613fc1..662df0f5640 100644 --- a/docs/java-rest/high-level/migration.asciidoc +++ b/docs/java-rest/high-level/migration.asciidoc @@ -270,7 +270,7 @@ include-tagged::{doc-tests}/MigrationDocumentationIT.java[migration-cluster-heal helper requires the content type of the response to be passed as an argument and returns a `Map` of objects. Values in the map can be of any type, including inner `Map` that are used to represent the JSON object hierarchy. -<5> Retrieve the value of the `status` field in the response map, casts it as a a `String` +<5> Retrieve the value of the `status` field in the response map, casts it as a `String` object and use the `ClusterHealthStatus.fromString()` method to convert it as a `ClusterHealthStatus` object. This method throws an exception if the value does not corresponds to a valid cluster health status. diff --git a/docs/java-rest/high-level/miscellaneous/x-pack-info.asciidoc b/docs/java-rest/high-level/miscellaneous/x-pack-info.asciidoc index f877ed720db..b432b10d3b8 100644 --- a/docs/java-rest/high-level/miscellaneous/x-pack-info.asciidoc +++ b/docs/java-rest/high-level/miscellaneous/x-pack-info.asciidoc @@ -13,7 +13,7 @@ include-tagged::{doc-tests}/MiscellaneousDocumentationIT.java[x-pack-info-execut -------------------------------------------------- <1> Enable verbose mode. The default is `false` but `true` will return more information. -<2> Set the categories of information to retrieve. The the default is to +<2> Set the categories of information to retrieve. The default is to return no information which is useful for checking if {xpack} is installed but not much else. diff --git a/docs/java-rest/high-level/ml/close-job.asciidoc b/docs/java-rest/high-level/ml/close-job.asciidoc new file mode 100644 index 00000000000..edadb9f40a2 --- /dev/null +++ b/docs/java-rest/high-level/ml/close-job.asciidoc @@ -0,0 +1,59 @@ +[[java-rest-high-x-pack-ml-close-job]] +=== Close Job API + +The Close Job API provides the ability to close {ml} jobs in the cluster. +It accepts a `CloseJobRequest` object and responds +with a `CloseJobResponse` object. + +[[java-rest-high-x-pack-ml-close-job-request]] +==== Close Job Request + +A `CloseJobRequest` object gets created with an existing non-null `jobId`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-close-job-request] +-------------------------------------------------- +<1> Constructing a new request referencing existing job IDs +<2> Optionally used to close a failed job, or to forcefully close a job +which has not responded to its initial close request. +<3> Optionally set to ignore if a wildcard expression matches no jobs. + (This includes `_all` string or when no jobs have been specified) +<4> Optionally setting the `timeout` value for how long the +execution should wait for the job to be closed. + +[[java-rest-high-x-pack-ml-close-job-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-close-job-execute] +-------------------------------------------------- +<1> `isClosed()` from the `CloseJobResponse` indicates if the job was successfully +closed or not. + +[[java-rest-high-x-pack-ml-close-job-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-close-job-execute-async] +-------------------------------------------------- +<1> The `CloseJobRequest` to execute and the `ActionListener` to use when +the execution completes + +The method does not block and returns immediately. The passed `ActionListener` is used +to notify the caller of completion. A typical `ActionListener` for `CloseJobResponse` may +look like + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-close-job-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs diff --git a/docs/java-rest/high-level/ml/flush-job.asciidoc b/docs/java-rest/high-level/ml/flush-job.asciidoc new file mode 100644 index 00000000000..1f815bba0d5 --- /dev/null +++ b/docs/java-rest/high-level/ml/flush-job.asciidoc @@ -0,0 +1,83 @@ +[[java-rest-high-x-pack-ml-flush-job]] +=== Flush Job API + +The Flush Job API provides the ability to flush a {ml} job's +datafeed in the cluster. +It accepts a `FlushJobRequest` object and responds +with a `FlushJobResponse` object. + +[[java-rest-high-x-pack-ml-flush-job-request]] +==== Flush Job Request + +A `FlushJobRequest` object gets created with an existing non-null `jobId`. +All other fields are optional for the request. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-request] +-------------------------------------------------- +<1> Constructing a new request referencing an existing `jobId` + +==== Optional Arguments + +The following arguments are optional. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-request-options] +-------------------------------------------------- +<1> Set request to calculate the interim results +<2> Set the advanced time to flush to the particular time value +<3> Set the start time for the range of buckets on which +to calculate the interim results (requires `calc_interim` to be `true`) +<4> Set the end time for the range of buckets on which +to calculate interim results (requires `calc_interim` to be `true`) +<5> Set the skip time to skip a particular time value + +[[java-rest-high-x-pack-ml-flush-job-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-execute] +-------------------------------------------------- + +[[java-rest-high-x-pack-ml-flush-job-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-execute-async] +-------------------------------------------------- +<1> The `FlushJobRequest` to execute and the `ActionListener` to use when +the execution completes + +The method does not block and returns immediately. The passed `ActionListener` is used +to notify the caller of completion. A typical `ActionListener` for `FlushJobResponse` may +look like + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs + +[[java-rest-high-x-pack-ml-flush-job-response]] +==== Flush Job Response + +A `FlushJobResponse` contains an acknowledgement and an optional end date for the +last finalized bucket + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-response] +-------------------------------------------------- +<1> `isFlushed()` indicates if the job was successfully flushed or not. +<2> `getLastFinalizedBucketEnd()` provides the timestamp +(in milliseconds-since-the-epoch) of the end of the last bucket that was processed. \ No newline at end of file diff --git a/docs/java-rest/high-level/ml/forecast-job.asciidoc b/docs/java-rest/high-level/ml/forecast-job.asciidoc new file mode 100644 index 00000000000..88bd5fdb532 --- /dev/null +++ b/docs/java-rest/high-level/ml/forecast-job.asciidoc @@ -0,0 +1,76 @@ +[[java-rest-high-x-pack-ml-forecast-job]] +=== Forecast Job API + +The Forecast Job API provides the ability to forecast a {ml} job's behavior based +on historical data. +It accepts a `ForecastJobRequest` object and responds +with a `ForecastJobResponse` object. + +[[java-rest-high-x-pack-ml-forecast-job-request]] +==== Forecast Job Request + +A `ForecastJobRequest` object gets created with an existing non-null `jobId`. +All other fields are optional for the request. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-forecast-job-request] +-------------------------------------------------- +<1> Constructing a new request referencing an existing `jobId` + +==== Optional Arguments + +The following arguments are optional. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-forecast-job-request-options] +-------------------------------------------------- +<1> Set when the forecast for the job should expire +<2> Set how far into the future should the forecast predict + +[[java-rest-high-x-pack-ml-forecast-job-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-forecast-job-execute] +-------------------------------------------------- + +[[java-rest-high-x-pack-ml-forecast-job-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-forecast-job-execute-async] +-------------------------------------------------- +<1> The `ForecastJobRequest` to execute and the `ActionListener` to use when +the execution completes + +The method does not block and returns immediately. The passed `ActionListener` is used +to notify the caller of completion. A typical `ActionListener` for `ForecastJobResponse` may +look like + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-forecast-job-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs + +[[java-rest-high-x-pack-ml-forecast-job-response]] +==== Forecast Job Response + +A `ForecastJobResponse` contains an acknowledgement and the forecast ID + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-forecast-job-response] +-------------------------------------------------- +<1> `isAcknowledged()` indicates if the forecast was successful +<2> `getForecastId()` provides the ID of the forecast that was created \ No newline at end of file diff --git a/docs/java-rest/high-level/ml/get-buckets.asciidoc b/docs/java-rest/high-level/ml/get-buckets.asciidoc new file mode 100644 index 00000000000..33a3059166c --- /dev/null +++ b/docs/java-rest/high-level/ml/get-buckets.asciidoc @@ -0,0 +1,125 @@ +[[java-rest-high-x-pack-ml-get-buckets]] +=== Get Buckets API + +The Get Buckets API retrieves one or more bucket results. +It accepts a `GetBucketsRequest` object and responds +with a `GetBucketsResponse` object. + +[[java-rest-high-x-pack-ml-get-buckets-request]] +==== Get Buckets Request + +A `GetBucketsRequest` object gets created with an existing non-null `jobId`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-request] +-------------------------------------------------- +<1> Constructing a new request referencing an existing `jobId` + +==== Optional Arguments +The following arguments are optional: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-timestamp] +-------------------------------------------------- +<1> The timestamp of the bucket to get. Otherwise it will return all buckets. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-anomaly-score] +-------------------------------------------------- +<1> Buckets with anomaly scores greater or equal than this value will be returned. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-desc] +-------------------------------------------------- +<1> If `true`, the buckets are sorted in descending order. Defaults to `false`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-end] +-------------------------------------------------- +<1> Buckets with timestamps earlier than this time will be returned. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-exclude-interim] +-------------------------------------------------- +<1> If `true`, interim results will be excluded. Defaults to `false`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-expand] +-------------------------------------------------- +<1> If `true`, buckets will include their anomaly records. Defaults to `false`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-page] +-------------------------------------------------- +<1> The page parameters `from` and `size`. `from` specifies the number of buckets to skip. +`size` specifies the maximum number of buckets to get. Defaults to `0` and `100` respectively. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-sort] +-------------------------------------------------- +<1> The field to sort buckets on. Defaults to `timestamp`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-start] +-------------------------------------------------- +<1> Buckets with timestamps on or after this time will be returned. + +[[java-rest-high-x-pack-ml-get-buckets-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-execute] +-------------------------------------------------- + + +[[java-rest-high-x-pack-ml-get-buckets-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-execute-async] +-------------------------------------------------- +<1> The `GetBucketsRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back with the `onResponse` method +if the execution is successful or the `onFailure` method if the execution +failed. + +A typical listener for `GetBucketsResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs + +[[java-rest-high-snapshot-ml-get-buckets-response]] +==== Get Buckets Response + +The returned `GetBucketsResponse` contains the requested buckets: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-response] +-------------------------------------------------- +<1> The count of buckets that were matched +<2> The buckets retrieved \ No newline at end of file diff --git a/docs/java-rest/high-level/ml/get-influencers.asciidoc b/docs/java-rest/high-level/ml/get-influencers.asciidoc new file mode 100644 index 00000000000..e53e92ff1df --- /dev/null +++ b/docs/java-rest/high-level/ml/get-influencers.asciidoc @@ -0,0 +1,112 @@ +[[java-rest-high-x-pack-ml-get-influencers]] +=== Get Influencers API + +The Get Influencers API retrieves one or more influencer results. +It accepts a `GetInfluencersRequest` object and responds +with a `GetInfluencersResponse` object. + +[[java-rest-high-x-pack-ml-get-influencers-request]] +==== Get Influencers Request + +A `GetInfluencersRequest` object gets created with an existing non-null `jobId`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-influencers-request] +-------------------------------------------------- +<1> Constructing a new request referencing an existing `jobId` + +==== Optional Arguments +The following arguments are optional: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-influencers-desc] +-------------------------------------------------- +<1> If `true`, the influencers are sorted in descending order. Defaults to `false`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-influencers-end] +-------------------------------------------------- +<1> Influencers with timestamps earlier than this time will be returned. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-influencers-exclude-interim] +-------------------------------------------------- +<1> If `true`, interim results will be excluded. Defaults to `false`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-influencers-influencer-score] +-------------------------------------------------- +<1> Influencers with influencer_score greater or equal than this value will be returned. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-influencers-page] +-------------------------------------------------- +<1> The page parameters `from` and `size`. `from` specifies the number of influencers to skip. +`size` specifies the maximum number of influencers to get. Defaults to `0` and `100` respectively. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-influencers-sort] +-------------------------------------------------- +<1> The field to sort influencers on. Defaults to `influencer_score`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-influencers-start] +-------------------------------------------------- +<1> Influencers with timestamps on or after this time will be returned. + +[[java-rest-high-x-pack-ml-get-influencers-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-influencers-execute] +-------------------------------------------------- + +[[java-rest-high-x-pack-ml-get-influencers-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-influencers-execute-async] +-------------------------------------------------- +<1> The `GetInfluencersRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back with the `onResponse` method +if the execution is successful or the `onFailure` method if the execution +failed. + +A typical listener for `GetInfluencersResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-influencers-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs + +[[java-rest-high-snapshot-ml-get-influencers-response]] +==== Get Influencers Response + +The returned `GetInfluencersResponse` contains the requested influencers: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-influencers-response] +-------------------------------------------------- +<1> The count of influencers that were matched +<2> The influencers retrieved \ No newline at end of file diff --git a/docs/java-rest/high-level/ml/get-job-stats.asciidoc b/docs/java-rest/high-level/ml/get-job-stats.asciidoc new file mode 100644 index 00000000000..90f7794ae76 --- /dev/null +++ b/docs/java-rest/high-level/ml/get-job-stats.asciidoc @@ -0,0 +1,67 @@ +[[java-rest-high-x-pack-ml-get-job-stats]] +=== Get Job Stats API + +The Get Job Stats API provides the ability to get any number of + {ml} job's statistics in the cluster. +It accepts a `GetJobStatsRequest` object and responds +with a `GetJobStatsResponse` object. + +[[java-rest-high-x-pack-ml-get-job-stats-request]] +==== Get Job Stats Request + +A `GetJobsStatsRequest` object can have any number of `jobId` +entries. However, they all must be non-null. An empty list is the same as +requesting statistics for all jobs. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-stats-request] +-------------------------------------------------- +<1> Constructing a new request referencing existing `jobIds`, can contain wildcards +<2> Whether to ignore if a wildcard expression matches no jobs. + (This includes `_all` string or when no jobs have been specified) + +[[java-rest-high-x-pack-ml-get-job-stats-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-stats-execute] +-------------------------------------------------- + +[[java-rest-high-x-pack-ml-get-job-stats-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-stats-execute-async] +-------------------------------------------------- +<1> The `GetJobsStatsRequest` to execute and the `ActionListener` to use when +the execution completes + +The method does not block and returns immediately. The passed `ActionListener` is used +to notify the caller of completion. A typical `ActionListener` for `GetJobsStatsResponse` may +look like + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-stats-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs + +[[java-rest-high-x-pack-ml-get-job-stats-response]] +==== Get Job Stats Response +The returned `GetJobStatsResponse` contains the requested job statistics: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-stats-response] +-------------------------------------------------- +<1> `getCount()` indicates the number of jobs statistics found +<2> `getJobStats()` is the collection of {ml} `JobStats` objects found \ No newline at end of file diff --git a/docs/java-rest/high-level/ml/get-job.asciidoc b/docs/java-rest/high-level/ml/get-job.asciidoc new file mode 100644 index 00000000000..4ecf70e8e65 --- /dev/null +++ b/docs/java-rest/high-level/ml/get-job.asciidoc @@ -0,0 +1,57 @@ +[[java-rest-high-x-pack-ml-get-job]] +=== Get Job API + +The Get Job API provides the ability to get {ml} jobs in the cluster. +It accepts a `GetJobRequest` object and responds +with a `GetJobResponse` object. + +[[java-rest-high-x-pack-ml-get-job-request]] +==== Get Job Request + +A `GetJobRequest` object gets can have any number of `jobId` or `groupName` +entries. However, they all must be non-null. An empty list is the same as +requesting for all jobs. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-request] +-------------------------------------------------- +<1> Constructing a new request referencing existing `jobIds`, can contain wildcards +<2> Whether to ignore if a wildcard expression matches no jobs. + (This includes `_all` string or when no jobs have been specified) + +[[java-rest-high-x-pack-ml-get-job-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-execute] +-------------------------------------------------- +<1> `getCount()` from the `GetJobResponse` indicates the number of jobs found +<2> `getJobs()` is the collection of {ml} `Job` objects found + +[[java-rest-high-x-pack-ml-get-job-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-execute-async] +-------------------------------------------------- +<1> The `GetJobRequest` to execute and the `ActionListener` to use when +the execution completes + +The method does not block and returns immediately. The passed `ActionListener` is used +to notify the caller of completion. A typical `ActionListener` for `GetJobResponse` may +look like + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs diff --git a/docs/java-rest/high-level/ml/get-overall-buckets.asciidoc b/docs/java-rest/high-level/ml/get-overall-buckets.asciidoc new file mode 100644 index 00000000000..832eb8f2514 --- /dev/null +++ b/docs/java-rest/high-level/ml/get-overall-buckets.asciidoc @@ -0,0 +1,107 @@ +[[java-rest-high-x-pack-ml-get-overall-buckets]] +=== Get Overall Buckets API + +The Get Overall Buckets API retrieves overall bucket results that +summarize the bucket results of multiple jobs. +It accepts a `GetOverallBucketsRequest` object and responds +with a `GetOverallBucketsResponse` object. + +[[java-rest-high-x-pack-ml-get-overall-buckets-request]] +==== Get Overall Buckets Request + +A `GetOverallBucketsRequest` object gets created with one or more `jobId`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-overall-buckets-request] +-------------------------------------------------- +<1> Constructing a new request referencing job IDs `jobId1` and `jobId2`. + +==== Optional Arguments +The following arguments are optional: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-overall-buckets-bucket-span] +-------------------------------------------------- +<1> The span of the overall buckets. Must be greater or equal to the jobs' largest `bucket_span`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-overall-buckets-end] +-------------------------------------------------- +<1> Overall buckets with timestamps earlier than this time will be returned. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-overall-buckets-exclude-interim] +-------------------------------------------------- +<1> If `true`, interim results will be excluded. Overall buckets are interim if any of the job buckets +within the overall bucket interval are interim. Defaults to `false`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-overall-buckets-overall-score] +-------------------------------------------------- +<1> Overall buckets with overall scores greater or equal than this value will be returned. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-overall-buckets-start] +-------------------------------------------------- +<1> Overall buckets with timestamps on or after this time will be returned. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-overall-buckets-top-n] +-------------------------------------------------- +<1> The number of top job bucket scores to be used in the `overall_score` calculation. Defaults to `1`. + +[[java-rest-high-x-pack-ml-get-overall-buckets-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-overall-buckets-execute] +-------------------------------------------------- + +[[java-rest-high-x-pack-ml-get-overall-buckets-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-overall-buckets-execute-async] +-------------------------------------------------- +<1> The `GetOverallBucketsRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back with the `onResponse` method +if the execution is successful or the `onFailure` method if the execution +failed. + +A typical listener for `GetBucketsResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-overall-buckets-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs + +[[java-rest-high-snapshot-ml-get-overall-buckets-response]] +==== Get Overall Buckets Response + +The returned `GetOverallBucketsResponse` contains the requested buckets: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-overall-buckets-response] +-------------------------------------------------- +<1> The count of overall buckets that were matched +<2> The overall buckets retrieved \ No newline at end of file diff --git a/docs/java-rest/high-level/ml/get-records.asciidoc b/docs/java-rest/high-level/ml/get-records.asciidoc new file mode 100644 index 00000000000..f8a88f34d33 --- /dev/null +++ b/docs/java-rest/high-level/ml/get-records.asciidoc @@ -0,0 +1,113 @@ +[[java-rest-high-x-pack-ml-get-records]] +=== Get Records API + +The Get Records API retrieves one or more record results. +It accepts a `GetRecordsRequest` object and responds +with a `GetRecordsResponse` object. + +[[java-rest-high-x-pack-ml-get-records-request]] +==== Get Records Request + +A `GetRecordsRequest` object gets created with an existing non-null `jobId`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-request] +-------------------------------------------------- +<1> Constructing a new request referencing an existing `jobId` + +==== Optional Arguments +The following arguments are optional: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-desc] +-------------------------------------------------- +<1> If `true`, the records are sorted in descending order. Defaults to `false`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-end] +-------------------------------------------------- +<1> Records with timestamps earlier than this time will be returned. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-exclude-interim] +-------------------------------------------------- +<1> If `true`, interim results will be excluded. Defaults to `false`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-page] +-------------------------------------------------- +<1> The page parameters `from` and `size`. `from` specifies the number of records to skip. +`size` specifies the maximum number of records to get. Defaults to `0` and `100` respectively. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-record-score] +-------------------------------------------------- +<1> Records with record_score greater or equal than this value will be returned. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-sort] +-------------------------------------------------- +<1> The field to sort records on. Defaults to `record_score`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-start] +-------------------------------------------------- +<1> Records with timestamps on or after this time will be returned. + +[[java-rest-high-x-pack-ml-get-records-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-execute] +-------------------------------------------------- + + +[[java-rest-high-x-pack-ml-get-records-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-execute-async] +-------------------------------------------------- +<1> The `GetRecordsRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back with the `onResponse` method +if the execution is successful or the `onFailure` method if the execution +failed. + +A typical listener for `GetRecordsResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs + +[[java-rest-high-snapshot-ml-get-records-response]] +==== Get Records Response + +The returned `GetRecordsResponse` contains the requested records: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-response] +-------------------------------------------------- +<1> The count of records that were matched +<2> The records retrieved \ No newline at end of file diff --git a/docs/java-rest/high-level/ml/open-job.asciidoc b/docs/java-rest/high-level/ml/open-job.asciidoc index ad575121818..be6a518df19 100644 --- a/docs/java-rest/high-level/ml/open-job.asciidoc +++ b/docs/java-rest/high-level/ml/open-job.asciidoc @@ -44,7 +44,7 @@ include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-open-job-exec the execution completes The method does not block and returns immediately. The passed `ActionListener` is used -to notify the caller of completion. A typical `ActionListner` for `OpenJobResponse` may +to notify the caller of completion. A typical `ActionListener` for `OpenJobResponse` may look like ["source","java",subs="attributes,callouts,macros"] diff --git a/docs/java-rest/high-level/ml/post-data.asciidoc b/docs/java-rest/high-level/ml/post-data.asciidoc new file mode 100644 index 00000000000..2c8ca8f18a3 --- /dev/null +++ b/docs/java-rest/high-level/ml/post-data.asciidoc @@ -0,0 +1,86 @@ +[[java-rest-high-x-pack-ml-post-data]] +=== Post Data API + +The Post Data API provides the ability to post data to an open + {ml} job in the cluster. +It accepts a `PostDataRequest` object and responds +with a `PostDataResponse` object. + +[[java-rest-high-x-pack-ml-post-data-request]] +==== Post Data Request + +A `PostDataRequest` object gets created with an existing non-null `jobId` +and the `XContentType` being sent. Individual docs can be added +incrementally via the `PostDataRequest.JsonBuilder#addDoc` method. +These are then serialized and sent in bulk when passed to the `PostDataRequest`. + +Alternatively, the serialized bulk content can be set manually, along with its `XContentType` +through one of the other `PostDataRequest` constructors. + +Only `XContentType.JSON` and `XContentType.SMILE` are supported. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-post-data-request] +-------------------------------------------------- +<1> Create a new `PostDataRequest.JsonBuilder` object for incrementally adding documents +<2> Add a new document as a `Map` object +<3> Add a new document as a serialized JSON formatted String. +<4> Constructing a new request referencing an opened `jobId`, and a JsonBuilder + +==== Optional Arguments + +The following arguments are optional. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-post-data-request-options] +-------------------------------------------------- +<1> Set the start of the bucket resetting time +<2> Set the end of the bucket resetting time + +[[java-rest-high-x-pack-ml-post-data-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-post-data-execute] +-------------------------------------------------- + +[[java-rest-high-x-pack-ml-post-data-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-post-data-execute-async] +-------------------------------------------------- +<1> The `PostDataRequest` to execute and the `ActionListener` to use when +the execution completes + +The method does not block and returns immediately. The passed `ActionListener` is used +to notify the caller of completion. A typical `ActionListener` for `PostDataResponse` may +look like + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-post-data-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs + +[[java-rest-high-x-pack-ml-post-data-response]] +==== Post Data Response + +A `PostDataResponse` contains current data processing statistics. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-post-data-response] +-------------------------------------------------- +<1> `getDataCounts()` a `DataCounts` object containing the current +data processing counts. diff --git a/docs/java-rest/high-level/ml/update-job.asciidoc b/docs/java-rest/high-level/ml/update-job.asciidoc new file mode 100644 index 00000000000..3e1d1e2313b --- /dev/null +++ b/docs/java-rest/high-level/ml/update-job.asciidoc @@ -0,0 +1,93 @@ +[[java-rest-high-x-pack-ml-update-job]] +=== Update Job API + +The Update Job API provides the ability to update a {ml} job. +It accepts a `UpdateJobRequest` object and responds +with a `PutJobResponse` object. + +[[java-rest-high-x-pack-ml-update-job-request]] +==== Update Job Request + +An `UpdateJobRequest` object gets created with a `JobUpdate` object. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-update-job-request] +-------------------------------------------------- +<1> Constructing a new request referencing a `JobUpdate` object + +==== Optional Arguments + +The `JobUpdate` object has many optional arguments with which to update an existing {ml} +job. An existing, non-null `jobId` must be referenced in its creation. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-update-job-options] +-------------------------------------------------- +<1> Mandatory, non-null `jobId` referencing an existing {ml} job +<2> Updated description +<3> Updated analysis limits +<4> Updated background persistence interval +<5> Updated analysis config's categorization filters +<6> Updated detectors through the `JobUpdate.DetectorUpdate` object +<7> Updated group membership +<8> Updated result retention +<9> Updated model plot configuration +<10> Updated model snapshot retention setting +<11> Updated custom settings +<12> Updated renormalization window + +Included with these options are specific optional `JobUpdate.DetectorUpdate` updates. +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-update-job-detector-options] +-------------------------------------------------- +<1> The index of the detector. `O` means unknown +<2> The optional description of the detector +<3> The `DetectionRule` rules that apply to this detector + +[[java-rest-high-x-pack-ml-update-job-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-update-job-execute] +-------------------------------------------------- + +[[java-rest-high-x-pack-ml-update-job-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-update-job-execute-async] +-------------------------------------------------- +<1> The `UpdateJobRequest` to execute and the `ActionListener` to use when +the execution completes + +The method does not block and returns immediately. The passed `ActionListener` is used +to notify the caller of completion. A typical `ActionListener` for `PutJobResponse` may +look like + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-update-job-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs + +[[java-rest-high-x-pack-ml-update-job-response]] +==== Update Job Response + +A `PutJobResponse` contains the updated `Job` object + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-update-job-response] +-------------------------------------------------- +<1> `getResponse()` returns the updated `Job` object diff --git a/docs/java-rest/high-level/script/put_script.asciidoc b/docs/java-rest/high-level/script/put_script.asciidoc new file mode 100644 index 00000000000..acc80e82d11 --- /dev/null +++ b/docs/java-rest/high-level/script/put_script.asciidoc @@ -0,0 +1,106 @@ +[[java-rest-high-put-stored-script]] +=== Put Stored Script API + +[[java-rest-high-put-stored-script-request]] +==== Put Stored Script Request + +A `PutStoredScriptRequest` requires an `id` and `content`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[put-stored-script-request] +-------------------------------------------------- +<1> The id of the script +<2> The content of the script + +[[java-rest-high-put-stored-script-content]] +==== Content +The content of a script can be written in different languages and provided in +different ways: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[put-stored-script-content-painless] +-------------------------------------------------- +<1> Specify a painless script and provided as `XContentBuilder` object. +Note that the builder needs to be passed as a `BytesReference` object + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[put-stored-script-content-mustache] +-------------------------------------------------- +<1> Specify a mustache script and provided as `XContentBuilder` object. +Note that value of source can be directly provided as a JSON string + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[put-stored-script-context] +-------------------------------------------------- +<1> The context the script should be executed in. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[put-stored-script-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the script creation as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the script creation as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[put-stored-script-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +[[java-rest-high-put-stored-script-sync]] +==== Synchronous Execution +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[put-stored-script-execute] +-------------------------------------------------- + +[[java-rest-high-put-stored-script-async]] +==== Asynchronous Execution + +The asynchronous execution of a put stored script request requires both the `PutStoredScriptRequest` +instance and an `ActionListener` instance to be passed to the asynchronous method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[put-stored-script-execute-async] +-------------------------------------------------- +<1> The `PutStoredScriptRequest` to execute and the `ActionListener` to use when +the execution completes + +[[java-rest-high-put-stored-script-listener]] +===== Action Listener + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `AcknowledgedResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[put-stored-script-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-put-stored-script-response]] +==== Put Stored Script Response + +The returned `AcknowledgedResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[put-stored-script-response] +-------------------------------------------------- +<1> Indicates whether all of the nodes have acknowledged the request \ No newline at end of file diff --git a/docs/java-rest/high-level/security/disable-user.asciidoc b/docs/java-rest/high-level/security/disable-user.asciidoc new file mode 100644 index 00000000000..8bb2299946c --- /dev/null +++ b/docs/java-rest/high-level/security/disable-user.asciidoc @@ -0,0 +1,46 @@ +[[java-rest-high-security-disable-user]] +=== Disable User API + +[[java-rest-high-security-disable-user-execution]] +==== Execution + +Disabling a user can be performed using the `security().disableUser()` +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[disable-user-execute] +-------------------------------------------------- + +[[java-rest-high-security-disable-user-response]] +==== Response + +The returned `EmptyResponse` does not contain any fields. The return of this +response indicates a successful request. + +[[java-rest-high-security-disable-user-async]] +==== Asynchronous Execution + +This request can be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[disable-user-execute-async] +-------------------------------------------------- +<1> The `DisableUser` request to execute and the `ActionListener` to use when +the execution completes. + +The asynchronous method does not block and returns immediately. Once the request +has completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for a `EmptyResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[disable-user-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument. +<2> Called in case of failure. The raised exception is provided as an argument. diff --git a/docs/java-rest/high-level/security/enable-user.asciidoc b/docs/java-rest/high-level/security/enable-user.asciidoc new file mode 100644 index 00000000000..76016532697 --- /dev/null +++ b/docs/java-rest/high-level/security/enable-user.asciidoc @@ -0,0 +1,46 @@ +[[java-rest-high-security-enable-user]] +=== Enable User API + +[[java-rest-high-security-enable-user-execution]] +==== Execution + +Enabling a disabled user can be performed using the `security().enableUser()` +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[enable-user-execute] +-------------------------------------------------- + +[[java-rest-high-security-enable-user-response]] +==== Response + +The returned `EmptyResponse` does not contain any fields. The return of this +response indicates a successful request. + +[[java-rest-high-security-enable-user-async]] +==== Asynchronous Execution + +This request can be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[enable-user-execute-async] +-------------------------------------------------- +<1> The `EnableUser` request to execute and the `ActionListener` to use when +the execution completes. + +The asynchronous method does not block and returns immediately. Once the request +has completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for a `EmptyResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[enable-user-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument. +<2> Called in case of failure. The raised exception is provided as an argument. diff --git a/docs/java-rest/high-level/security/put-user.asciidoc b/docs/java-rest/high-level/security/put-user.asciidoc new file mode 100644 index 00000000000..aca69b81828 --- /dev/null +++ b/docs/java-rest/high-level/security/put-user.asciidoc @@ -0,0 +1,52 @@ +[[java-rest-high-security-put-user]] +=== Put User API + +[[java-rest-high-security-put-user-execution]] +==== Execution + +Creating and updating a user can be performed using the `security().putUser()` +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[put-user-execute] +-------------------------------------------------- + +[[java-rest-high-security-put-user-response]] +==== Response + +The returned `PutUserResponse` contains a single field, `created`. This field +serves as an indication if a user was created or if an existing entry was updated. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[put-user-response] +-------------------------------------------------- +<1> `created` is a boolean indicating whether the user was created or updated + +[[java-rest-high-security-put-user-async]] +==== Asynchronous Execution + +This request can be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[put-user-execute-async] +-------------------------------------------------- +<1> The `PutUserRequest` to execute and the `ActionListener` to use when +the execution completes. + +The asynchronous method does not block and returns immediately. Once the request +has completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for a `PutUserResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[put-user-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument. +<2> Called in case of failure. The raised exception is provided as an argument. diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 6bcb736243a..8d92653ce57 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -15,6 +15,9 @@ Single document APIs:: Multi-document APIs:: * <> * <> +* <> +* <> +* <> include::document/index.asciidoc[] include::document/get.asciidoc[] @@ -23,6 +26,9 @@ include::document/delete.asciidoc[] include::document/update.asciidoc[] include::document/bulk.asciidoc[] include::document/multi-get.asciidoc[] +include::document/reindex.asciidoc[] +include::document/update-by-query.asciidoc[] +include::document/delete-by-query.asciidoc[] == Search APIs @@ -183,9 +189,11 @@ include::tasks/cancel_tasks.asciidoc[] The Java High Level REST Client supports the following Scripts APIs: * <> +* <> * <> include::script/get_script.asciidoc[] +include::script/put_script.asciidoc[] include::script/delete_script.asciidoc[] == Licensing APIs @@ -205,12 +213,34 @@ include::licensing/delete-license.asciidoc[] The Java High Level REST Client supports the following Machine Learning APIs: * <> +* <> * <> * <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> include::ml/put-job.asciidoc[] +include::ml/get-job.asciidoc[] include::ml/delete-job.asciidoc[] include::ml/open-job.asciidoc[] +include::ml/close-job.asciidoc[] +include::ml/update-job.asciidoc[] +include::ml/flush-job.asciidoc[] +include::ml/get-job-stats.asciidoc[] +include::ml/forecast-job.asciidoc[] +include::ml/get-buckets.asciidoc[] +include::ml/get-overall-buckets.asciidoc[] +include::ml/get-records.asciidoc[] +include::ml/post-data.asciidoc[] +include::ml/get-influencers.asciidoc[] == Migration APIs @@ -220,6 +250,18 @@ The Java High Level REST Client supports the following Migration APIs: include::migration/get-assistance.asciidoc[] +== Security APIs + +The Java High Level REST Client supports the following Security APIs: + +* <> +* <> +* <> + +include::security/put-user.asciidoc[] +include::security/enable-user.asciidoc[] +include::security/disable-user.asciidoc[] + == Watcher APIs The Java High Level REST Client supports the following Watcher APIs: @@ -229,3 +271,11 @@ The Java High Level REST Client supports the following Watcher APIs: include::watcher/put-watch.asciidoc[] include::watcher/delete-watch.asciidoc[] + +== Graph APIs + +The Java High Level REST Client supports the following Graph APIs: + +* <> + +include::graph/explore.asciidoc[] diff --git a/docs/painless/painless-contexts/index.asciidoc b/docs/painless/painless-contexts/index.asciidoc index a9d3982133e..a71fde0be32 100644 --- a/docs/painless/painless-contexts/index.asciidoc +++ b/docs/painless/painless-contexts/index.asciidoc @@ -30,6 +30,8 @@ include::painless-metric-agg-reduce-context.asciidoc[] include::painless-bucket-agg-context.asciidoc[] +include::painless-analysis-predicate-context.asciidoc[] + include::painless-watcher-condition-context.asciidoc[] include::painless-watcher-transform-context.asciidoc[] diff --git a/docs/painless/painless-contexts/painless-analysis-predicate-context.asciidoc b/docs/painless/painless-contexts/painless-analysis-predicate-context.asciidoc new file mode 100644 index 00000000000..07914b671e7 --- /dev/null +++ b/docs/painless/painless-contexts/painless-analysis-predicate-context.asciidoc @@ -0,0 +1,43 @@ +[[painless-analysis-predicate-context]] +=== Analysis Predicate Context + +Use a painless script to determine whether or not the current token in an +analysis chain matches a predicate. + +*Variables* + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. + +`token.term` (`CharSequence`, read-only):: + The characters of the current token + +`token.position` (`int`, read-only):: + The position of the current token + +`token.positionIncrement` (`int`, read-only):: + The position increment of the current token + +`token.positionLength` (`int`, read-only):: + The position length of the current token + +`token.startOffset` (`int`, read-only):: + The start offset of the current token + +`token.endOffset` (`int`, read-only):: + The end offset of the current token + +`token.type` (`String`, read-only):: + The type of the current token + +`token.keyword` ('boolean`, read-only):: + Whether or not the current token is marked as a keyword + +*Return* + +`boolean`:: + Whether or not the current token matches the predicate + +*API* + +The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-debugging.asciidoc b/docs/painless/painless-debugging.asciidoc index 8523116616d..c141cbc5325 100644 --- a/docs/painless/painless-debugging.asciidoc +++ b/docs/painless/painless-debugging.asciidoc @@ -5,7 +5,7 @@ Painless doesn't have a https://en.wikipedia.org/wiki/Read%E2%80%93eval%E2%80%93print_loop[REPL] -and while it'd be nice for it to have one one day, it wouldn't tell you the +and while it'd be nice for it to have one day, it wouldn't tell you the whole story around debugging painless scripts embedded in Elasticsearch because the data that the scripts have access to or "context" is so important. For now the best way to debug embedded scripts is by throwing exceptions at choice diff --git a/docs/painless/painless-execute-script.asciidoc b/docs/painless/painless-execute-script.asciidoc index 2aca9597786..30320def79b 100644 --- a/docs/painless/painless-execute-script.asciidoc +++ b/docs/painless/painless-execute-script.asciidoc @@ -26,7 +26,7 @@ The only variable that is available is `params`, which can be used to access use The result of the script is always converted to a string. If no context is specified then this context is used by default. -====== Example +*Example* Request: @@ -67,7 +67,7 @@ The following parameters may be specified in `context_setup` for a filter contex document:: Contains the document that will be temporarily indexed in-memory and is accessible from the script. index:: The name of an index containing a mapping that is compatable with the document being indexed. -====== Example +*Example* [source,js] ---------------------------------------------------------------- @@ -125,7 +125,7 @@ document:: Contains the document that will be temporarily indexed in-memory and index:: The name of an index containing a mapping that is compatable with the document being indexed. query:: If `_score` is used in the script then a query can specified that will be used to compute a score. -====== Example +*Example* [source,js] ---------------------------------------------------------------- diff --git a/docs/painless/painless-operators-array.asciidoc b/docs/painless/painless-operators-array.asciidoc index acfb87d30af..e80a863df27 100644 --- a/docs/painless/painless-operators-array.asciidoc +++ b/docs/painless/painless-operators-array.asciidoc @@ -254,7 +254,7 @@ and `]` tokens. *Errors* * If a value other than an `int` type value or a value that is castable to an - `int` type value is specified for for a dimension's size. + `int` type value is specified for a dimension's size. *Grammar* diff --git a/docs/plugins/analysis-phonetic.asciidoc b/docs/plugins/analysis-phonetic.asciidoc index a75c21fdac6..9d9df4827fd 100644 --- a/docs/plugins/analysis-phonetic.asciidoc +++ b/docs/plugins/analysis-phonetic.asciidoc @@ -38,7 +38,6 @@ PUT phonetic_sample "my_analyzer": { "tokenizer": "standard", "filter": [ - "standard", "lowercase", "my_metaphone" ] diff --git a/docs/plugins/discovery-file.asciidoc b/docs/plugins/discovery-file.asciidoc index ad06cfc0cc5..4f2182da056 100644 --- a/docs/plugins/discovery-file.asciidoc +++ b/docs/plugins/discovery-file.asciidoc @@ -1,71 +1,14 @@ [[discovery-file]] === File-Based Discovery Plugin -The file-based discovery plugin uses a list of hosts/ports in a `unicast_hosts.txt` file -in the `config/discovery-file` directory for unicast discovery. +The functionality provided by the `discovery-file` plugin is now available in +Elasticsearch without requiring a plugin. This plugin still exists to ensure +backwards compatibility, but it will be removed in a future version. + +On installation, this plugin creates a file at +`$ES_PATH_CONF/discovery-file/unicast_hosts.txt` that comprises comments that +describe how to use it. It is preferable not to install this plugin and instead +to create this file, and its containing directory, using standard tools. :plugin_name: discovery-file include::install_remove.asciidoc[] - -[[discovery-file-usage]] -[float] -==== Using the file-based discovery plugin - -The file-based discovery plugin provides the ability to specify the -unicast hosts list through a simple `unicast_hosts.txt` file that can -be dynamically updated at any time. To enable, add the following in `elasticsearch.yml`: - -[source,yaml] ----- -discovery.zen.hosts_provider: file ----- - -This plugin simply provides a facility to supply the unicast hosts list for -zen discovery through an external file that can be updated at any time by a side process. - -For example, this gives a convenient mechanism for an Elasticsearch instance -that is run in docker containers to be dynamically supplied a list of IP -addresses to connect to for zen discovery when those IP addresses may not be -known at node startup. - -Note that the file-based discovery plugin is meant to augment the unicast -hosts list in `elasticsearch.yml` (if specified), not replace it. Therefore, -if there are valid unicast host entries in `discovery.zen.ping.unicast.hosts`, -they will be used in addition to those supplied in `unicast_hosts.txt`. - -Anytime a change is made to the `unicast_hosts.txt` file, even as Elasticsearch -continues to run, the new changes will be picked up by the plugin and the -new hosts list will be used for the next pinging round for master election. - -Upon installation of the plugin, a default `unicast_hosts.txt` file will -be found in the `$CONFIG_DIR/discovery-file` directory. This default file -will contain some comments about what the file should contain. All comments -for this file must appear on their lines starting with `#` (i.e. comments -cannot start in the middle of a line). - -[[discovery-file-format]] -[float] -==== unicast_hosts.txt file format - -The format of the file is to specify one unicast host entry per line. -Each unicast host entry consists of the host (host name or IP address) and -an optional transport port number. If the port number is specified, is must -come immediately after the host (on the same line) separated by a `:`. -If the port number is not specified, a default value of 9300 is used. - -For example, this is an example of `unicast_hosts.txt` for a cluster with -four nodes that participate in unicast discovery, some of which are not -running on the default port: - -[source,txt] ----------------------------------------------------------------- -10.10.10.5 -10.10.10.6:9305 -10.10.10.5:10005 -# an IPv6 address -[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:9301 ----------------------------------------------------------------- - -Host names are allowed instead of IP addresses (similar to -`discovery.zen.ping.unicast.hosts`), and IPv6 addresses must be -specified in brackets with the port coming after the brackets. diff --git a/docs/plugins/repository-gcs.asciidoc b/docs/plugins/repository-gcs.asciidoc index 82923257385..e3978e65f44 100644 --- a/docs/plugins/repository-gcs.asciidoc +++ b/docs/plugins/repository-gcs.asciidoc @@ -10,71 +10,66 @@ include::install_remove.asciidoc[] [[repository-gcs-usage]] ==== Getting started -The plugin uses the https://cloud.google.com/storage/docs/json_api/[Google Cloud Storage JSON API] (v1) -to connect to the Storage service. If this is the first time you use Google Cloud Storage, you first -need to connect to the https://console.cloud.google.com/[Google Cloud Platform Console] and create a new -project. Once your project is created, you must enable the Cloud Storage Service for your project. +The plugin uses the https://github.com/GoogleCloudPlatform/google-cloud-java/tree/master/google-cloud-clients/google-cloud-storage[Google Cloud Java Client for Storage] +to connect to the Storage service. If you are using +https://cloud.google.com/storage/[Google Cloud Storage] for the first time, you +must connect to the https://console.cloud.google.com/[Google Cloud Platform Console] +and create a new project. After your project is created, you must enable the +Cloud Storage Service for your project. [[repository-gcs-creating-bucket]] ===== Creating a Bucket -Google Cloud Storage service uses the concept of https://cloud.google.com/storage/docs/key-terms[Bucket] -as a container for all the data. Buckets are usually created using the -https://console.cloud.google.com/[Google Cloud Platform Console]. The plugin will not automatically -create buckets. +The Google Cloud Storage service uses the concept of a +https://cloud.google.com/storage/docs/key-terms[bucket] as a container for all +the data. Buckets are usually created using the +https://console.cloud.google.com/[Google Cloud Platform Console]. The plugin +does not automatically create buckets. To create a new bucket: -1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console] -2. Select your project -3. Go to the https://console.cloud.google.com/storage/browser[Storage Browser] -4. Click the "Create Bucket" button -5. Enter the name of the new bucket -6. Select a storage class -7. Select a location -8. Click the "Create" button +1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console]. +2. Select your project. +3. Go to the https://console.cloud.google.com/storage/browser[Storage Browser]. +4. Click the *Create Bucket* button. +5. Enter the name of the new bucket. +6. Select a storage class. +7. Select a location. +8. Click the *Create* button. -The bucket should now be created. +For more detailed instructions, see the +https://cloud.google.com/storage/docs/quickstart-console#create_a_bucket[Google Cloud documentation]. [[repository-gcs-service-authentication]] ===== Service Authentication -The plugin supports two authentication modes: - -* The built-in <>. This mode is -recommended if your Elasticsearch node is running on a Compute Engine virtual machine. - -* Specifying <> credentials. - -[[repository-gcs-using-compute-engine]] -===== Using Compute Engine -When running on Compute Engine, the plugin use Google's built-in authentication mechanism to -authenticate on the Storage service. Compute Engine virtual machines are usually associated to a -default service account. This service account can be found in the VM instance details in the -https://console.cloud.google.com/compute/[Compute Engine console]. - -This is the default authentication mode and requires no configuration. - -NOTE: The Compute Engine VM must be allowed to use the Storage service. This can be done only at VM -creation time, when "Storage" access can be configured to "Read/Write" permission. Check your -instance details at the section "Cloud API access scopes". +The plugin must authenticate the requests it makes to the Google Cloud Storage +service. It is common for Google client libraries to employ a strategy named https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application[application default credentials]. +However, that strategy is **not** supported for use with Elasticsearch. The +plugin operates under the Elasticsearch process, which runs with the security +manager enabled. The security manager obstructs the "automatic" credential discovery. +Therefore, you must configure <> +credentials even if you are using an environment that does not normally require +this configuration (such as Compute Engine, Kubernetes Engine or App Engine). [[repository-gcs-using-service-account]] ===== Using a Service Account -If your Elasticsearch node is not running on Compute Engine, or if you don't want to use Google's -built-in authentication mechanism, you can authenticate on the Storage service using a -https://cloud.google.com/iam/docs/overview#service_account[Service Account] file. +You have to obtain and provide https://cloud.google.com/iam/docs/overview#service_account[service account credentials] +manually. -To create a service account file: +For detailed information about generating JSON service account files, see the https://cloud.google.com/storage/docs/authentication?hl=en#service_accounts[Google Cloud documentation]. +Note that the PKCS12 format is not supported by this plugin. -1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console] -2. Select your project -3. Got to the https://console.cloud.google.com/permissions[Permission] tab -4. Select the https://console.cloud.google.com/permissions/serviceaccounts[Service Accounts] tab -5. Click on "Create service account" -6. Once created, select the new service account and download a JSON key file +Here is a summary of the steps: -A service account file looks like this: +1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console]. +2. Select your project. +3. Got to the https://console.cloud.google.com/permissions[Permission] tab. +4. Select the https://console.cloud.google.com/permissions/serviceaccounts[Service Accounts] tab. +5. Click *Create service account*. +6. After the account is created, select it and download a JSON key file. + +A JSON service account file looks like this: [source,js] ---- @@ -84,19 +79,26 @@ A service account file looks like this: "private_key_id": "...", "private_key": "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n", "client_email": "service-account-for-your-repository@your-project-id.iam.gserviceaccount.com", - "client_id": "..." + "client_id": "...", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://accounts.google.com/o/oauth2/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/your-bucket@your-project-id.iam.gserviceaccount.com" } ---- // NOTCONSOLE -This file must be stored in the {ref}/secure-settings.html[elasticsearch keystore], under a setting name -of the form `gcs.client.NAME.credentials_file`, where `NAME` is the name of the client configuration. -The default client name is `default`, but a different client name can be specified in repository -settings using `client`. +To provide this file to the plugin, it must be stored in the {ref}/secure-settings.html[Elasticsearch keystore]. You must add a setting name of the form `gcs.client.NAME.credentials_file`, where `NAME` +is the name of the client configuration for the repository. The implicit client +name is `default`, but a different client name can be specified in the +repository settings with the `client` key. -For example, if specifying the credentials file in the keystore under -`gcs.client.my_alternate_client.credentials_file`, you can configure a repository to use these -credentials like this: +NOTE: Passing the file path via the GOOGLE_APPLICATION_CREDENTIALS environment +variable is **not** supported. + +For example, if you added a `gcs.client.my_alternate_client.credentials_file` +setting in the keystore, you can configure a repository to use those credentials +like this: [source,js] ---- @@ -113,19 +115,18 @@ PUT _snapshot/my_gcs_repository // TEST[skip:we don't have gcs setup while testing this] The `credentials_file` settings are {ref}/secure-settings.html#reloadable-secure-settings[reloadable]. -After you reload the settings, the internal `gcs` clients, used to transfer the -snapshot contents, will utilize the latest settings from the keystore. +After you reload the settings, the internal `gcs` clients, which are used to +transfer the snapshot contents, utilize the latest settings from the keystore. - -NOTE: In progress snapshot/restore jobs will not be preempted by a *reload* -of the client's `credentials_file` settings. They will complete using the client -as it was built when the operation started. +NOTE: Snapshot or restore jobs that are in progress are not preempted by a *reload* +of the client's `credentials_file` settings. They complete using the client as +it was built when the operation started. [[repository-gcs-client]] ==== Client Settings The client used to connect to Google Cloud Storage has a number of settings available. -Client setting names are of the form `gcs.client.CLIENT_NAME.SETTING_NAME` and specified +Client setting names are of the form `gcs.client.CLIENT_NAME.SETTING_NAME` and are specified inside `elasticsearch.yml`. The default client name looked up by a `gcs` repository is called `default`, but can be customized with the repository setting `client`. @@ -146,7 +147,7 @@ PUT _snapshot/my_gcs_repository // TEST[skip:we don't have gcs setup while testing this] Some settings are sensitive and must be stored in the -{ref}/secure-settings.html[elasticsearch keystore]. This is the case for the service account file: +{ref}/secure-settings.html[Elasticsearch keystore]. This is the case for the service account file: [source,sh] ---- @@ -185,7 +186,7 @@ are marked as `Secure`. `project_id`:: - The Google Cloud project id. This will be automatically infered from the credentials file but + The Google Cloud project id. This will be automatically inferred from the credentials file but can be specified explicitly. For example, it can be used to switch between projects when the same credentials are usable for both the production and the development projects. @@ -248,8 +249,8 @@ The following settings are supported: The service account used to access the bucket must have the "Writer" access to the bucket: -1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console] -2. Select your project -3. Got to the https://console.cloud.google.com/storage/browser[Storage Browser] -4. Select the bucket and "Edit bucket permission" -5. The service account must be configured as a "User" with "Writer" access +1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console]. +2. Select your project. +3. Got to the https://console.cloud.google.com/storage/browser[Storage Browser]. +4. Select the bucket and "Edit bucket permission". +5. The service account must be configured as a "User" with "Writer" access. diff --git a/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc index 3bd430d03d5..e371674228b 100644 --- a/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc @@ -81,7 +81,8 @@ Response: "key": 1425168000000, "doc_count": 2 } - ] + ], + "interval": "1M" } } } @@ -174,7 +175,8 @@ starting at midnight UTC on 1 October 2015: "key": 1443664800000, "doc_count": 1 } - ] + ], + "interval": "1h" } } } @@ -229,7 +231,8 @@ the specified time zone. "key": 1443664800000, "doc_count": 1 } - ] + ], + "interval": "1h" } } } diff --git a/docs/reference/aggregations/bucket/children-aggregation.asciidoc b/docs/reference/aggregations/bucket/children-aggregation.asciidoc index 3805b2e564c..e2b3c8ec591 100644 --- a/docs/reference/aggregations/bucket/children-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/children-aggregation.asciidoc @@ -144,7 +144,7 @@ Possible response: }, "hits": { "total": 3, - "max_score": 0.0, + "max_score": null, "hits": [] }, "aggregations": { diff --git a/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc b/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc index b6595c0d05c..0a8a46a0b67 100644 --- a/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc @@ -433,8 +433,8 @@ Scripts can be inline (as in above example), indexed or stored on disk. For deta Available parameters in the script are [horizontal] -`_subset_freq`:: Number of documents the term appears in in the subset. -`_superset_freq`:: Number of documents the term appears in in the superset. +`_subset_freq`:: Number of documents the term appears in the subset. +`_superset_freq`:: Number of documents the term appears in the superset. `_subset_size`:: Number of documents in the subset. `_superset_size`:: Number of documents in the superset. diff --git a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc index 5eeb3a4605a..958f48d835c 100644 --- a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc @@ -320,7 +320,7 @@ Top hits response snippet with a nested hit, which resides in the first slot of "by_nested": { "hits": { "total": 1, - "max_score": 0.2876821, + "max_score": 0.3616575, "hits": [ { "_index": "sales", @@ -330,7 +330,7 @@ Top hits response snippet with a nested hit, which resides in the first slot of "field": "comments", <1> "offset": 0 <2> }, - "_score": 0.2876821, + "_score": 0.3616575, "_source": { "comment": "This car could have better brakes", <3> "username": "baddriver007" diff --git a/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc index b05c56b8805..09fa707c5db 100644 --- a/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc @@ -307,7 +307,7 @@ POST /_search ===== stdDev Function -This function accepts a collection of doubles and and average, then returns the standard deviation of the values in that window. +This function accepts a collection of doubles and average, then returns the standard deviation of the values in that window. `null` and `NaN` values are ignored; the sum is only calculated over the real values. If the window is empty, or all values are `null`/`NaN`, `0.0` is returned as the result. diff --git a/docs/reference/analysis/analyzers/standard-analyzer.asciidoc b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc index 20aa072066b..3097ece21db 100644 --- a/docs/reference/analysis/analyzers/standard-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc @@ -273,7 +273,6 @@ Tokenizer:: * <> Token Filters:: -* <> * <> * <> (disabled by default) @@ -292,7 +291,6 @@ PUT /standard_example "rebuilt_standard": { "tokenizer": "standard", "filter": [ - "standard", "lowercase" <1> ] } diff --git a/docs/reference/analysis/tokenfilters.asciidoc b/docs/reference/analysis/tokenfilters.asciidoc index ee891fdd09a..f531bc5d0e9 100644 --- a/docs/reference/analysis/tokenfilters.asciidoc +++ b/docs/reference/analysis/tokenfilters.asciidoc @@ -9,8 +9,6 @@ or add tokens (eg synonyms). Elasticsearch has a number of built in token filters which can be used to build <>. -include::tokenfilters/standard-tokenfilter.asciidoc[] - include::tokenfilters/asciifolding-tokenfilter.asciidoc[] include::tokenfilters/flatten-graph-tokenfilter.asciidoc[] @@ -37,6 +35,8 @@ include::tokenfilters/word-delimiter-graph-tokenfilter.asciidoc[] include::tokenfilters/multiplexer-tokenfilter.asciidoc[] +include::tokenfilters/condition-tokenfilter.asciidoc[] + include::tokenfilters/stemmer-tokenfilter.asciidoc[] include::tokenfilters/stemmer-override-tokenfilter.asciidoc[] diff --git a/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc index 73d35549da8..bd22b013334 100644 --- a/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc @@ -15,7 +15,7 @@ PUT /asciifold_example "analyzer" : { "default" : { "tokenizer" : "standard", - "filter" : ["standard", "asciifolding"] + "filter" : ["asciifolding"] } } } @@ -37,7 +37,7 @@ PUT /asciifold_example "analyzer" : { "default" : { "tokenizer" : "standard", - "filter" : ["standard", "my_ascii_folding"] + "filter" : ["my_ascii_folding"] } }, "filter" : { diff --git a/docs/reference/analysis/tokenfilters/condition-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/condition-tokenfilter.asciidoc new file mode 100644 index 00000000000..cff05559ab9 --- /dev/null +++ b/docs/reference/analysis/tokenfilters/condition-tokenfilter.asciidoc @@ -0,0 +1,90 @@ +[[analysis-condition-tokenfilter]] +=== Conditional Token Filter + +The conditional token filter takes a predicate script and a list of subfilters, and +only applies the subfilters to the current token if it matches the predicate. + +[float] +=== Options +[horizontal] +filter:: a chain of token filters to apply to the current token if the predicate + matches. These can be any token filters defined elsewhere in the index mappings. + +script:: a predicate script that determines whether or not the filters will be applied + to the current token. Note that only inline scripts are supported + +[float] +=== Settings example + +You can set it up like: + +[source,js] +-------------------------------------------------- +PUT /condition_example +{ + "settings" : { + "analysis" : { + "analyzer" : { + "my_analyzer" : { + "tokenizer" : "standard", + "filter" : [ "my_condition" ] + } + }, + "filter" : { + "my_condition" : { + "type" : "condition", + "filter" : [ "lowercase" ], + "script" : { + "source" : "token.getTerm().length() < 5" <1> + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +<1> This will only apply the lowercase filter to terms that are less than 5 +characters in length + +And test it like: + +[source,js] +-------------------------------------------------- +POST /condition_example/_analyze +{ + "analyzer" : "my_analyzer", + "text" : "What Flapdoodle" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +And it'd respond: + +[source,js] +-------------------------------------------------- +{ + "tokens": [ + { + "token": "what", <1> + "start_offset": 0, + "end_offset": 4, + "type": "", + "position": 0 + }, + { + "token": "Flapdoodle", <2> + "start_offset": 5, + "end_offset": 15, + "type": "", + "position": 1 + } + ] +} +-------------------------------------------------- +// TESTRESPONSE +<1> The term `What` has been lowercased, because it is only 4 characters long +<2> The term `Flapdoodle` has been left in its original case, because it doesn't pass + the predicate \ No newline at end of file diff --git a/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc index 956c5ad13d0..924903b9f65 100644 --- a/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc @@ -16,7 +16,7 @@ PUT /elision_example "analyzer" : { "default" : { "tokenizer" : "standard", - "filter" : ["standard", "elision"] + "filter" : ["elision"] } }, "filter" : { diff --git a/docs/reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc index 05687f86691..33a927c4b98 100644 --- a/docs/reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc @@ -26,7 +26,7 @@ PUT /keep_types_example "analyzer" : { "my_analyzer" : { "tokenizer" : "standard", - "filter" : ["standard", "lowercase", "extract_numbers"] + "filter" : ["lowercase", "extract_numbers"] } }, "filter" : { @@ -87,7 +87,7 @@ PUT /keep_types_exclude_example "analyzer" : { "my_analyzer" : { "tokenizer" : "standard", - "filter" : ["standard", "lowercase", "remove_numbers"] + "filter" : ["lowercase", "remove_numbers"] } }, "filter" : { diff --git a/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc index 50c74942a01..b7385379be9 100644 --- a/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc @@ -27,11 +27,11 @@ PUT /keep_words_example "analyzer" : { "example_1" : { "tokenizer" : "standard", - "filter" : ["standard", "lowercase", "words_till_three"] + "filter" : ["lowercase", "words_till_three"] }, "example_2" : { "tokenizer" : "standard", - "filter" : ["standard", "lowercase", "words_in_file"] + "filter" : ["lowercase", "words_in_file"] } }, "filter" : { diff --git a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc index 93e1eed26b4..99ed03649ff 100644 --- a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc @@ -19,7 +19,7 @@ PUT /my_index "analyzer" : { "my_analyzer" : { "tokenizer" : "standard", - "filter" : ["standard", "lowercase", "my_snow"] + "filter" : ["lowercase", "my_snow"] } }, "filter" : { diff --git a/docs/reference/analysis/tokenfilters/standard-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/standard-tokenfilter.asciidoc deleted file mode 100644 index 0270bf71b4b..00000000000 --- a/docs/reference/analysis/tokenfilters/standard-tokenfilter.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -[[analysis-standard-tokenfilter]] -=== Standard Token Filter - -A token filter of type `standard` that normalizes tokens extracted with -the -<>. - -[TIP] -================================================== - -The `standard` token filter currently does nothing. It remains as a placeholder -in case some filtering function needs to be added in a future version. - -================================================== diff --git a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc index a13c6746d74..f59e2f3f2cf 100644 --- a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc @@ -13,7 +13,7 @@ PUT /my_index "analyzer" : { "my_analyzer" : { "tokenizer" : "standard", - "filter" : ["standard", "lowercase", "my_stemmer"] + "filter" : ["lowercase", "my_stemmer"] } }, "filter" : { diff --git a/docs/reference/cat/thread_pool.asciidoc b/docs/reference/cat/thread_pool.asciidoc index 9528b7829e3..d1ea1fad885 100644 --- a/docs/reference/cat/thread_pool.asciidoc +++ b/docs/reference/cat/thread_pool.asciidoc @@ -15,13 +15,14 @@ Which looks like: [source,txt] -------------------------------------------------- node-0 analyze 0 0 0 +... node-0 fetch_shard_started 0 0 0 node-0 fetch_shard_store 0 0 0 node-0 flush 0 0 0 ... node-0 write 0 0 0 -------------------------------------------------- -// TESTRESPONSE[s/\.\.\./(node-0 .+ 0 0 0\n)+/] +// TESTRESPONSE[s/\.\.\./(node-0 \\S+ 0 0 0\n)*/] // TESTRESPONSE[s/\d+/\\d+/ _cat] // The substitutions do two things: // 1. Expect any number of extra thread pools. This allows us to only list a @@ -45,6 +46,7 @@ The second column is the thread pool name -------------------------------------------------- name analyze +ccr (default distro only) fetch_shard_started fetch_shard_store flush @@ -81,6 +83,7 @@ active queue rejected 0 0 0 0 0 0 0 0 0 + 0 0 0 1 0 0 0 0 0 0 0 0 diff --git a/docs/reference/cluster.asciidoc b/docs/reference/cluster.asciidoc index f093b6ebcfa..f92e364bae1 100644 --- a/docs/reference/cluster.asciidoc +++ b/docs/reference/cluster.asciidoc @@ -6,23 +6,70 @@ ["float",id="cluster-nodes"] == Node specification -Most cluster level APIs allow to specify which nodes to execute on (for -example, getting the node stats for a node). Nodes can be identified in -the APIs either using their internal node id, the node name, address, -custom attributes, or just the `_local` node receiving the request. For -example, here are some sample executions of nodes info: +Some cluster-level APIs may operate on a subset of the nodes which can be +specified with _node filters_. For example, the <>, +<>, and <> APIs +can all report results from a filtered set of nodes rather than from all nodes. + +_Node filters_ are written as a comma-separated list of individual filters, +each of which adds or removes nodes from the chosen subset. Each filter can be +one of the following: + +* `_all`, to add all nodes to the subset. +* `_local`, to add the local node to the subset. +* `_master`, to add the currently-elected master node to the subset. +* a node id or name, to add this node to the subset. +* an IP address or hostname, to add all matching nodes to the subset. +* a pattern, using `*` wildcards, which adds all nodes to the subset + whose name, address or hostname matches the pattern. +* `master:true`, `data:true`, `ingest:true` or `coordinating_only:true`, which + respectively add to the subset all master-eligible nodes, all data nodes, + all ingest nodes, and all coordinating-only nodes. +* `master:false`, `data:false`, `ingest:false` or `coordinating_only:false`, + which respectively remove from the subset all master-eligible nodes, all data + nodes, all ingest nodes, and all coordinating-only nodes. +* a pair of patterns, using `*` wildcards, of the form `attrname:attrvalue`, + which adds to the subset all nodes with a custom node attribute whose name + and value match the respective patterns. Custom node attributes are + configured by setting properties in the configuration file of the form + `node.attr.attrname: attrvalue`. + +NOTE: node filters run in the order in which they are given, which is important +if using filters that remove nodes from the set. For example +`_all,master:false` means all the nodes except the master-eligible ones, but +`master:false,_all` means the same as `_all` because the `_all` filter runs +after the `master:false` filter. + +NOTE: if no filters are given, the default is to select all nodes. However, if +any filters are given then they run starting with an empty chosen subset. This +means that filters such as `master:false` which remove nodes from the chosen +subset are only useful if they come after some other filters. When used on its +own, `master:false` selects no nodes. + +Here are some examples of the use of node filters with the +<> APIs. [source,js] -------------------------------------------------- -# Local +# If no filters are given, the default is to select all nodes +GET /_nodes +# Explicitly select all nodes +GET /_nodes/_all +# Select just the local node GET /_nodes/_local -# Address -GET /_nodes/10.0.0.3,10.0.0.4 -GET /_nodes/10.0.0.* -# Names +# Select the elected master node +GET /_nodes/_master +# Select nodes by name, which can include wildcards GET /_nodes/node_name_goes_here GET /_nodes/node_name_goes_* -# Attributes (set something like node.attr.rack: 2 in the config) +# Select nodes by address, which can include wildcards +GET /_nodes/10.0.0.3,10.0.0.4 +GET /_nodes/10.0.0.* +# Select nodes by role +GET /_nodes/_all,master:false +GET /_nodes/data:true,ingest:true +GET /_nodes/coordinating_only:true +# Select nodes by custom attribute (e.g. with something like `node.attr.rack: 2` in the configuration file) GET /_nodes/rack:2 GET /_nodes/ra*:2 GET /_nodes/ra*:2* diff --git a/docs/reference/cluster/nodes-hot-threads.asciidoc b/docs/reference/cluster/nodes-hot-threads.asciidoc index c8fa2c9bf7c..541ee51a58a 100644 --- a/docs/reference/cluster/nodes-hot-threads.asciidoc +++ b/docs/reference/cluster/nodes-hot-threads.asciidoc @@ -1,12 +1,23 @@ [[cluster-nodes-hot-threads]] == Nodes hot_threads -An API allowing to get the current hot threads on each node in the -cluster. Endpoints are `/_nodes/hot_threads`, and -`/_nodes/{nodesIds}/hot_threads`. +This API yields a breakdown of the hot threads on each selected node in the +cluster. Its endpoints are `/_nodes/hot_threads` and +`/_nodes/{nodes}/hot_threads`: -The output is plain text with a breakdown of each node's top hot -threads. Parameters allowed are: +[source,js] +-------------------------------------------------- +GET /_nodes/hot_threads +GET /_nodes/nodeId1,nodeId2/hot_threads +-------------------------------------------------- +// CONSOLE + +The first command gets the hot threads of all the nodes in the cluster. The +second command gets the hot threads of only `nodeId1` and `nodeId2`. Nodes can +be selected using <>. + +The output is plain text with a breakdown of each node's top hot threads. The +allowed parameters are: [horizontal] `threads`:: number of hot threads to provide, defaults to 3. diff --git a/docs/reference/cluster/nodes-reload-secure-settings.asciidoc b/docs/reference/cluster/nodes-reload-secure-settings.asciidoc new file mode 100644 index 00000000000..f02ac8e4657 --- /dev/null +++ b/docs/reference/cluster/nodes-reload-secure-settings.asciidoc @@ -0,0 +1,55 @@ +[[cluster-nodes-reload-secure-settings]] +== Nodes Reload Secure Settings + +The cluster nodes reload secure settings API is used to re-read the +local node's encrypted keystore. Specifically, it will prompt the keystore +decryption and reading accross the cluster. The keystore's plain content is +used to reinitialize all compatible plugins. A compatible plugin can be +reinitilized without restarting the node. The operation is +complete when all compatible plugins have finished reinitilizing. Subsequently, +the keystore is closed and any changes to it will not be reflected on the node. + +[source,js] +-------------------------------------------------- +POST _nodes/reload_secure_settings +POST _nodes/nodeId1,nodeId2/reload_secure_settings +-------------------------------------------------- +// CONSOLE +// TEST[setup:node] +// TEST[s/nodeId1,nodeId2/*/] + +The first command reloads the keystore on each node. The seconds allows +to selectively target `nodeId1` and `nodeId2`. The node selection options are +detailed <>. + +Note: It is an error if secure settings are inconsistent across the cluster +nodes, yet this consistency is not enforced whatsoever. Hence, reloading specific +nodes is not standard. It is only justifiable when retrying failed reload operations. + +[float] +[[rest-reload-secure-settings]] +==== REST Reload Secure Settings Response + +The response contains the `nodes` object, which is a map, keyed by the +node id. Each value has the node `name` and an optional `reload_exception` +field. The `reload_exception` field is a serialization of the exception +that was thrown during the reload process, if any. + +[source,js] +-------------------------------------------------- +{ + "_nodes": { + "total": 1, + "successful": 1, + "failed": 0 + }, + "cluster_name": "my_cluster", + "nodes": { + "pQHNt5rXTTWNvUgOrdynKg": { + "name": "node-0" + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"my_cluster"/$body.cluster_name/] +// TESTRESPONSE[s/"pQHNt5rXTTWNvUgOrdynKg"/\$node_name/] diff --git a/docs/reference/cluster/remote-info.asciidoc b/docs/reference/cluster/remote-info.asciidoc index 3dfcc201e7a..a53a26873ce 100644 --- a/docs/reference/cluster/remote-info.asciidoc +++ b/docs/reference/cluster/remote-info.asciidoc @@ -10,7 +10,7 @@ GET /_remote/info ---------------------------------- // CONSOLE -This command returns returns connection and endpoint information keyed by +This command returns connection and endpoint information keyed by the configured remote cluster alias. [float] @@ -25,7 +25,7 @@ the configured remote cluster alias. `num_nodes_connected`:: The number of connected nodes in the remote cluster. -`max_connection_per_cluster`:: +`max_connections_per_cluster`:: The maximum number of connections maintained for the remote cluster. `initial_connect_timeout`:: diff --git a/docs/reference/cluster/reroute.asciidoc b/docs/reference/cluster/reroute.asciidoc index f076a7b8358..276a43f660d 100644 --- a/docs/reference/cluster/reroute.asciidoc +++ b/docs/reference/cluster/reroute.asciidoc @@ -31,7 +31,7 @@ POST /_cluster/reroute // CONSOLE // TEST[skip:doc tests run with only a single node] -It is important to note that that after processing any reroute commands +It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state. For example, if the requested allocation includes moving a diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index 3de85041871..78bccc8bd69 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -213,3 +213,12 @@ Will return, for example: // 3. All of the numbers and strings on the right hand side of *every* field in // the response are ignored. So we're really only asserting things about the // the shape of this response, not the values in it. + +This API can be restricted to a subset of the nodes using the `?nodeId` +parameter, which accepts <>: + +[source,js] +-------------------------------------------------- +GET /_cluster/stats?nodeId=node1,node*,master:false +-------------------------------------------------- +// CONSOLE diff --git a/docs/reference/cluster/tasks.asciidoc b/docs/reference/cluster/tasks.asciidoc index 2e59da42224..d6dfa71b76b 100644 --- a/docs/reference/cluster/tasks.asciidoc +++ b/docs/reference/cluster/tasks.asciidoc @@ -127,7 +127,7 @@ might look like: The new `description` field contains human readable text that identifies the particular request that the task is performing such as identifying the search request being performed by a search task like the example above. Other kinds of -task have have different descriptions, like <> which +task have different descriptions, like <> which has the search and the destination, or <> which just has the number of requests and the destination indices. Many requests will only have an empty description because more detailed information about the request is not diff --git a/docs/reference/commands/certgen.asciidoc b/docs/reference/commands/certgen.asciidoc index 3a8b15fbd28..956a4637ed3 100644 --- a/docs/reference/commands/certgen.asciidoc +++ b/docs/reference/commands/certgen.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="gold+"] [[certgen]] -== certgen +== elasticsearch-certgen -deprecated[6.1,Replaced by <>.] +deprecated[6.1,Replaced by <>.] The `elasticsearch-certgen` command simplifies the creation of certificate authorities (CA), certificate signing requests (CSR), and signed certificates diff --git a/docs/reference/commands/index.asciidoc b/docs/reference/commands/index.asciidoc index 164d2fc0e84..134ac1edbd0 100644 --- a/docs/reference/commands/index.asciidoc +++ b/docs/reference/commands/index.asciidoc @@ -1,11 +1,11 @@ -[role="xpack"] -[[xpack-commands]] -= {xpack} Commands +[[commands]] += Command line tools [partintro] -- -{xpack} includes commands that help you configure security: +{es} provides the following tools for configuring security and performing other +tasks from the command line: * <> * <> diff --git a/docs/reference/commands/saml-metadata.asciidoc b/docs/reference/commands/saml-metadata.asciidoc index 069c7135c01..5309f83288f 100644 --- a/docs/reference/commands/saml-metadata.asciidoc +++ b/docs/reference/commands/saml-metadata.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="gold+"] [[saml-metadata]] -== saml-metadata +== elasticsearch-saml-metadata The `elasticsearch-saml-metadata` command can be used to generate a SAML 2.0 Service Provider Metadata file. diff --git a/docs/reference/commands/setup-passwords.asciidoc b/docs/reference/commands/setup-passwords.asciidoc index a7dcd25d65e..e2d4dfdc13d 100644 --- a/docs/reference/commands/setup-passwords.asciidoc +++ b/docs/reference/commands/setup-passwords.asciidoc @@ -4,7 +4,7 @@ == elasticsearch-setup-passwords The `elasticsearch-setup-passwords` command sets the passwords for the built-in -`elastic`, `kibana`, `logstash_system`, and `beats_system` users. +`elastic`, `kibana`, `logstash_system`, `beats_system`, and `apm_system` users. [float] === Synopsis diff --git a/docs/reference/commands/users-command.asciidoc b/docs/reference/commands/users-command.asciidoc index e53e0815c5d..cf678f2138d 100644 --- a/docs/reference/commands/users-command.asciidoc +++ b/docs/reference/commands/users-command.asciidoc @@ -1,10 +1,7 @@ [role="xpack"] [testenv="gold+"] [[users-command]] -== Users Command -++++ -users -++++ +== elasticsearch-users If you use file-based user authentication, the `elasticsearch-users` command enables you to add and remove users, assign user roles, and manage passwords. diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 8229f74bdd0..c69597e74fd 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -1141,7 +1141,7 @@ And the response (partially shown): }, "hits" : { "total" : 1000, - "max_score" : 0.0, + "max_score" : null, "hits" : [ ] }, "aggregations" : { diff --git a/docs/reference/how-to/recipes/stemming.asciidoc b/docs/reference/how-to/recipes/stemming.asciidoc index 37901cb3abe..c09922fe63f 100644 --- a/docs/reference/how-to/recipes/stemming.asciidoc +++ b/docs/reference/how-to/recipes/stemming.asciidoc @@ -143,13 +143,13 @@ GET index/_search }, "hits": { "total": 1, - "max_score": 0.80259144, + "max_score": 0.8025915, "hits": [ { "_index": "index", "_type": "_doc", "_id": "1", - "_score": 0.80259144, + "_score": 0.8025915, "_source": { "body": "Ski resort" } @@ -200,13 +200,13 @@ GET index/_search }, "hits": { "total": 1, - "max_score": 0.80259144, + "max_score": 0.8025915, "hits": [ { "_index": "index", "_type": "_doc", "_id": "1", - "_score": 0.80259144, + "_score": 0.8025915, "_source": { "body": "Ski resort" } diff --git a/docs/reference/how-to/search-speed.asciidoc b/docs/reference/how-to/search-speed.asciidoc index cd37d0735e9..bb5b0edd2e6 100644 --- a/docs/reference/how-to/search-speed.asciidoc +++ b/docs/reference/how-to/search-speed.asciidoc @@ -165,12 +165,15 @@ GET index/_search // TEST[continued] [float] -=== Mappings +=== Consider mapping identifiers as `keyword` The fact that some data is numeric does not mean it should always be mapped as a -<>. Typically, fields storing identifiers such as an `ISBN` -or any number identifying a record from another database, might benefit from -being mapped as <> rather than `integer` or `long`. +<>. The way that Elasticsearch indexes numbers optimizes +for `range` queries while `keyword` fields are better at `term` queries. Typically, +fields storing identifiers such as an `ISBN` or any number identifying a record +from another database are rarely used in `range` queries or aggregations. This is +why they might benefit from being mapped as <> rather than as +`integer` or `long`. [float] === Avoid scripts @@ -349,15 +352,6 @@ WARNING: Loading data into the filesystem cache eagerly on too many indices or too many files will make search _slower_ if the filesystem cache is not large enough to hold all the data. Use with caution. -[float] -=== Map identifiers as `keyword` - -When you have numeric identifiers in your documents, it is tempting to map them -as numbers, which is consistent with their json type. However, the way that -Elasticsearch indexes numbers optimizes for `range` queries while `keyword` -fields are better at `term` queries. Since identifiers are never used in `range` -queries, they should be mapped as a `keyword`. - [float] === Use index sorting to speed up conjunctions diff --git a/docs/reference/images/msi_installer/msi_installer_configuration.png b/docs/reference/images/msi_installer/msi_installer_configuration.png index 058df78792f..36bae6cc519 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_configuration.png and b/docs/reference/images/msi_installer/msi_installer_configuration.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_help.png b/docs/reference/images/msi_installer/msi_installer_help.png index 9b63c512100..458b0821c1f 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_help.png and b/docs/reference/images/msi_installer/msi_installer_help.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_installing.png b/docs/reference/images/msi_installer/msi_installer_installing.png index 498d927ce2f..590c52371a7 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_installing.png and b/docs/reference/images/msi_installer/msi_installer_installing.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_locations.png b/docs/reference/images/msi_installer/msi_installer_locations.png index a8c81608974..ba7151e3714 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_locations.png and b/docs/reference/images/msi_installer/msi_installer_locations.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_no_service.png b/docs/reference/images/msi_installer/msi_installer_no_service.png index d26bb75f265..fbe9a0510c7 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_no_service.png and b/docs/reference/images/msi_installer/msi_installer_no_service.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_plugins.png b/docs/reference/images/msi_installer/msi_installer_plugins.png index 3a6901a18e2..e58f426a47d 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_plugins.png and b/docs/reference/images/msi_installer/msi_installer_plugins.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_selected_plugins.png b/docs/reference/images/msi_installer/msi_installer_selected_plugins.png index 8a817c81f8d..e58f426a47d 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_selected_plugins.png and b/docs/reference/images/msi_installer/msi_installer_selected_plugins.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_service.png b/docs/reference/images/msi_installer/msi_installer_service.png index b1445fa2d6a..c7fae13637b 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_service.png and b/docs/reference/images/msi_installer/msi_installer_service.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_success.png b/docs/reference/images/msi_installer/msi_installer_success.png index 337cbc9eb50..3a58467ae18 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_success.png and b/docs/reference/images/msi_installer/msi_installer_success.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_uninstall.png b/docs/reference/images/msi_installer/msi_installer_uninstall.png index 26c9bcb7c9a..6b5b69a5768 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_uninstall.png and b/docs/reference/images/msi_installer/msi_installer_uninstall.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_upgrade_configuration.png b/docs/reference/images/msi_installer/msi_installer_upgrade_configuration.png index a72fdf2ff91..7ca413bb299 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_upgrade_configuration.png and b/docs/reference/images/msi_installer/msi_installer_upgrade_configuration.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_upgrade_notice.png b/docs/reference/images/msi_installer/msi_installer_upgrade_notice.png index a4842b8d6d0..e5ee18b5208 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_upgrade_notice.png and b/docs/reference/images/msi_installer/msi_installer_upgrade_notice.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_upgrade_plugins.png b/docs/reference/images/msi_installer/msi_installer_upgrade_plugins.png index 6399c99baac..3e7496505f7 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_upgrade_plugins.png and b/docs/reference/images/msi_installer/msi_installer_upgrade_plugins.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_xpack.png b/docs/reference/images/msi_installer/msi_installer_xpack.png index 3f5f6d97594..e457a578877 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_xpack.png and b/docs/reference/images/msi_installer/msi_installer_xpack.png differ diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 54c0c1c1b15..70c3d09dc93 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -63,12 +63,6 @@ corruption is detected, it will prevent the shard from being opened. Accepts: Check for both physical and logical corruption. This is much more expensive in terms of CPU and memory usage. -`fix`:: - - Check for both physical and logical corruption. Segments that were reported - as corrupted will be automatically removed. This option *may result in data loss*. - Use with extreme caution! - WARNING: Expert only. Checking shards may take a lot of time on large indices. -- @@ -185,9 +179,9 @@ specific index module: `index.blocks.write`:: -   Set to `true` to disable data write operations against the index. Unlike `read_only', -   this setting does not affect metadata. For instance, you can close an index with a `write` -   block, but not an index with a `read_only` block. + Set to `true` to disable data write operations against the index. Unlike `read_only`, + this setting does not affect metadata. For instance, you can close an index with a `write` + block, but not an index with a `read_only` block. `index.blocks.metadata`:: diff --git a/docs/reference/index-modules/similarity.asciidoc b/docs/reference/index-modules/similarity.asciidoc index f5d5610ca1a..cf5cab106f8 100644 --- a/docs/reference/index-modules/similarity.asciidoc +++ b/docs/reference/index-modules/similarity.asciidoc @@ -295,27 +295,27 @@ Which yields: "details": [] }, { - "value": 2.0, + "value": 2, "description": "field.docCount", "details": [] }, { - "value": 4.0, + "value": 4, "description": "field.sumDocFreq", "details": [] }, { - "value": 5.0, + "value": 5, "description": "field.sumTotalTermFreq", "details": [] }, { - "value": 1.0, + "value": 1, "description": "term.docFreq", "details": [] }, { - "value": 2.0, + "value": 2, "description": "term.totalTermFreq", "details": [] }, @@ -325,7 +325,7 @@ Which yields: "details": [] }, { - "value": 3.0, + "value": 3, "description": "doc.length", "details": [] } @@ -469,27 +469,27 @@ GET /index/_search?explain=true "details": [] }, { - "value": 2.0, + "value": 2, "description": "field.docCount", "details": [] }, { - "value": 4.0, + "value": 4, "description": "field.sumDocFreq", "details": [] }, { - "value": 5.0, + "value": 5, "description": "field.sumTotalTermFreq", "details": [] }, { - "value": 1.0, + "value": 1, "description": "term.docFreq", "details": [] }, { - "value": 2.0, + "value": 2, "description": "term.totalTermFreq", "details": [] }, @@ -499,7 +499,7 @@ GET /index/_search?explain=true "details": [] }, { - "value": 3.0, + "value": 3, "description": "doc.length", "details": [] } diff --git a/docs/reference/index-modules/store.asciidoc b/docs/reference/index-modules/store.asciidoc index a1e00bac616..c2b3d700e9b 100644 --- a/docs/reference/index-modules/store.asciidoc +++ b/docs/reference/index-modules/store.asciidoc @@ -67,6 +67,13 @@ process equal to the size of the file being mapped. Before using this class, be sure you have allowed plenty of <>. +[[allow-mmapfs]] +You can restrict the use of the `mmapfs` store type via the setting +`node.store.allow_mmapfs`. This is a boolean setting indicating whether or not +`mmapfs` is allowed. The default is to allow `mmapfs`. This setting is useful, +for example, if you are in an environment where you can not control the ability +to create a lot of memory maps so you need disable the ability to use `mmapfs`. + === Pre-loading data into the file system cache NOTE: This is an expert setting, the details of which may change in the future. diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 7d51e4aa512..e4debd30c03 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -16,7 +16,7 @@ include::setup.asciidoc[] include::setup/setup-xes.asciidoc[] -include::{xes-repo-dir}/monitoring/configuring-monitoring.asciidoc[] +include::monitoring/configuring-monitoring.asciidoc[] include::{xes-repo-dir}/security/configuring-es.asciidoc[] @@ -61,7 +61,7 @@ include::sql/index.asciidoc[] include::monitoring/index.asciidoc[] -include::{xes-repo-dir}/rollup/index.asciidoc[] +include::rollup/index.asciidoc[] include::rest-api/index.asciidoc[] diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 37c616b2349..0241751a4df 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -1049,6 +1049,199 @@ understands this to mean `2016-04-01` as is explained in the <>, dissect also extracts structured fields out of a single text field +within a document. However unlike the <>, dissect does not use +https://en.wikipedia.org/wiki/Regular_expression[Regular Expressions]. This allows dissect's syntax to be simple and for +some cases faster than the <>. + +Dissect matches a single text field against a defined pattern. + +For example the following pattern: +[source,txt] +-------------------------------------------------- +%{clientip} %{ident} %{auth} [%{@timestamp}] \"%{verb} %{request} HTTP/%{httpversion}\" %{status} %{size} +-------------------------------------------------- +will match a log line of this format: +[source,txt] +-------------------------------------------------- +1.2.3.4 - - [30/Apr/1998:22:00:52 +0000] \"GET /english/venues/cities/images/montpellier/18.gif HTTP/1.0\" 200 3171 +-------------------------------------------------- +and result in a document with the following fields: +[source,js] +-------------------------------------------------- +"doc": { + "_index": "_index", + "_type": "_type", + "_id": "_id", + "_source": { + "request": "/english/venues/cities/images/montpellier/18.gif", + "auth": "-", + "ident": "-", + "verb": "GET", + "@timestamp": "30/Apr/1998:22:00:52 +0000", + "size": "3171", + "clientip": "1.2.3.4", + "httpversion": "1.0", + "status": "200" + } +} +-------------------------------------------------- +// NOTCONSOLE + +A dissect pattern is defined by the parts of the string that will be discarded. In the example above the first part +to be discarded is a single space. Dissect finds this space, then assigns the value of `clientip` is everything up +until that space. +Later dissect matches the `[` and then `]` and then assigns `@timestamp` to everything in-between `[` and `]`. +Paying special attention the parts of the string to discard will help build successful dissect patterns. + +Successful matches require all keys in a pattern to have a value. If any of the `%{keyname}` defined in the pattern do +not have a value, then an exception is thrown and may be handled by the <> directive. +An empty key `%{}` or a <> can be used to match values, but exclude the value from +the final document. All matched values are represented as string data types. The <> +may be used to convert to expected data type. + +Dissect also supports <> that can change dissect's default +behavior. For example you can instruct dissect to ignore certain fields, append fields, skip over padding, etc. +See <> for more information. + +[[dissect-options]] +.Dissect Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to dissect +| `pattern` | yes | - | The pattern to apply to the field +| `append_separator`| no | "" (empty string) | The character(s) that separate the appended fields. +| `ignore_missing` | no | false | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +| ` +|====== + +[source,js] +-------------------------------------------------- +{ + "dissect": { + "field": "message", + "pattern" : "%{clientip} %{ident} %{auth} [%{@timestamp}] \"%{verb} %{request} HTTP/%{httpversion}\" %{status} %{size}" + } +} +-------------------------------------------------- +// NOTCONSOLE +[[dissect-key-modifiers]] +==== Dissect key modifiers +Key modifiers can change the default behavior for dissection. Key modifiers may be found on the left or right +of the `%{keyname}` always inside the `%{` and `}`. For example `%{+keyname ->}` has the append and right padding +modifiers. + +.Dissect Key Modifiers +[options="header"] +|====== +| Modifier | Name | Position | Example | Description | Details +| `->` | Skip right padding | (far) right | `%{keyname1->}` | Skips any repeated characters to the right | <> +| `+` | Append | left | `%{+keyname} %{+keyname}` | Appends two or more fields together | <> +| `+` with `/n` | Append with order | left and right | `%{+keyname/2} %{+keyname/1}` | Appends two or more fields together in the order specified | <> +| `?` | Named skip key | left | `%{?ignoreme}` | Skips the matched value in the output. Same behavior as `%{}`| <> +| `*` and `&` | Reference keys | left | `%{*r1} %{&r1}` | Sets the output key as value of `*` and output value of `&` | <> +| ` +|====== + +[[dissect-modifier-skip-right-padding]] +===== Right padding modifier (`->`) + +The algorithm that performs the dissection is very strict in that it requires all characters in the pattern to match +the source string. For example, the pattern `%{fookey} %{barkey}` (1 space), will match the string "foo{nbsp}bar" +(1 space), but will not match the string "foo{nbsp}{nbsp}bar" (2 spaces) since the pattern has only 1 space and the +source string has 2 spaces. + +The right padding modifier helps with this case. Adding the right padding modifier to the pattern `%{fookey->} %{barkey}`, +It will now will match "foo{nbsp}bar" (1 space) and "foo{nbsp}{nbsp}bar" (2 spaces) +and even "foo{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}bar" (10 spaces). + +Use the right padding modifier to allow for repetition of the characters after a `%{keyname->}`. + +The right padding modifier may be placed on any key with any other modifiers. It should always be the furthest right +modifier. For example: `%{+keyname/1->}` and `%{->}` + +Right padding modifier example +|====== +| *Pattern* | `%{ts->} %{level}` +| *Input* | 1998-08-10T17:15:42,466{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}WARN +| *Result* a| +* ts = 1998-08-10T17:15:42,466 +* level = WARN +|====== + +The right padding modifier may be used with an empty key to help skip unwanted data. For example, the same input string, but wrapped with brackets requires the use of an empty right padded key to achieve the same result. + +Right padding modifier with empty key example +|====== +| *Pattern* | `[%{ts}]%{->}[%{level}]` +| *Input* | [1998-08-10T17:15:42,466]{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}[WARN] +| *Result* a| +* ts = 1998-08-10T17:15:42,466 +* level = WARN +|====== + +===== Append modifier (`+`) +[[dissect-modifier-append-key]] +Dissect supports appending two or more results together for the output. +Values are appended left to right. An append separator can be specified. +In this example the append_separator is defined as a space. + +Append modifier example +|====== +| *Pattern* | `%{+name} %{+name} %{+name} %{+name}` +| *Input* | john jacob jingleheimer schmidt +| *Result* a| +* name = john jacob jingleheimer schmidt +|====== + +===== Append with order modifier (`+` and `/n`) +[[dissect-modifier-append-key-with-order]] +Dissect supports appending two or more results together for the output. +Values are appended based on the order defined (`/n`). An append separator can be specified. +In this example the append_separator is defined as a comma. + +Append with order modifier example +|====== +| *Pattern* | `%{+name/2} %{+name/4} %{+name/3} %{+name/1}` +| *Input* | john jacob jingleheimer schmidt +| *Result* a| +* name = schmidt,john,jingleheimer,jacob +|====== + +===== Named skip key (`?`) +[[dissect-modifier-named-skip-key]] +Dissect supports ignoring matches in the final result. This can be done with an empty key `%{}`, but for readability +it may be desired to give that empty key a name. + +Named skip key modifier example +|====== +| *Pattern* | `%{clientip} %{?ident} %{?auth} [%{@timestamp}]` +| *Input* | 1.2.3.4 - - [30/Apr/1998:22:00:52 +0000] +| *Result* a| +* ip = 1.2.3.4 +* @timestamp = 30/Apr/1998:22:00:52 +0000 +|====== + +===== Reference keys (`*` and `&`) +[[dissect-modifier-reference-keys]] +Dissect support using parsed values as the key/value pairings for the structured content. Imagine a system that +partially logs in key/value pairs. Reference keys allow you to maintain that key/value relationship. + +Reference key modifier example +|====== +| *Pattern* | `[%{ts}] [%{level}] %{*p1}:%{&p1} %{*p2}:%{&p2}` +| *Input* | [2018-08-10T17:15:42,466] [ERR] ip:1.2.3.4 error:REFUSED +| *Result* a| +* ts = 1998-08-10T17:15:42,466 +* level = ERR +* ip = 1.2.3.4 +* error = REFUSED +|====== + [[dot-expand-processor]] === Dot Expander Processor diff --git a/docs/reference/mapping.asciidoc b/docs/reference/mapping.asciidoc index 31957344baf..3cfa0a96bfd 100644 --- a/docs/reference/mapping.asciidoc +++ b/docs/reference/mapping.asciidoc @@ -77,7 +77,8 @@ can be created manually or dynamically, in order to prevent bad documents from causing a mapping explosion: `index.mapping.total_fields.limit`:: - The maximum number of fields in an index. The default value is `1000`. + The maximum number of fields in an index. Field and object mappings, as well as + field aliases count towards this limit. The default value is `1000`. `index.mapping.depth.limit`:: The maximum depth for a field, which is measured as the number of inner diff --git a/docs/reference/mapping/dynamic/templates.asciidoc b/docs/reference/mapping/dynamic/templates.asciidoc index bdb00916755..4fbed664498 100644 --- a/docs/reference/mapping/dynamic/templates.asciidoc +++ b/docs/reference/mapping/dynamic/templates.asciidoc @@ -38,10 +38,10 @@ Dynamic templates are specified as an array of named objects: <3> The mapping that the matched field should use. -Templates are processed in order -- the first matching template wins. New -templates can be appended to the end of the list with the -<> API. If a new template has the same -name as an existing template, it will replace the old version. +Templates are processed in order -- the first matching template wins. When +putting new dynamic templates through the <> API, +all existing templates are overwritten. This allows for dynamic templates to be +reordered or deleted after they were initially added. [[match-mapping-type]] ==== `match_mapping_type` diff --git a/docs/reference/mapping/params/ignore-above.asciidoc b/docs/reference/mapping/params/ignore-above.asciidoc index 95704c6c8bb..fe7c6881a06 100644 --- a/docs/reference/mapping/params/ignore-above.asciidoc +++ b/docs/reference/mapping/params/ignore-above.asciidoc @@ -2,6 +2,9 @@ === `ignore_above` Strings longer than the `ignore_above` setting will not be indexed or stored. +For arrays of strings, `ignore_above` will be applied for each array element separately and string elements longer than `ignore_above` will not be indexed or stored. + +NOTE: All strings/array elements will still be present in the `_source` field, if the latter is enabled which is the default in Elasticsearch. [source,js] -------------------------------------------------- diff --git a/docs/reference/mapping/params/normalizer.asciidoc b/docs/reference/mapping/params/normalizer.asciidoc index 3688a0e9454..73110cd11f5 100644 --- a/docs/reference/mapping/params/normalizer.asciidoc +++ b/docs/reference/mapping/params/normalizer.asciidoc @@ -151,7 +151,7 @@ returns }, "hits": { "total": 3, - "max_score": 0.0, + "max_score": null, "hits": [] }, "aggregations": { diff --git a/docs/reference/mapping/types/percolator.asciidoc b/docs/reference/mapping/types/percolator.asciidoc index 066d3ce1ac5..e4502d37360 100644 --- a/docs/reference/mapping/types/percolator.asciidoc +++ b/docs/reference/mapping/types/percolator.asciidoc @@ -446,7 +446,6 @@ PUT my_queries1 "type": "custom", "tokenizer": "standard", "filter": [ - "standard", "lowercase", "wildcard_edge_ngram" ] @@ -597,7 +596,6 @@ PUT my_queries2 "type": "custom", "tokenizer": "standard", "filter": [ - "standard", "lowercase", "reverse", "wildcard_edge_ngram" @@ -607,7 +605,6 @@ PUT my_queries2 "type": "custom", "tokenizer": "standard", "filter": [ - "standard", "lowercase", "reverse" ] diff --git a/docs/reference/mapping/types/text.asciidoc b/docs/reference/mapping/types/text.asciidoc index e2336bd5cb0..db64e87412e 100644 --- a/docs/reference/mapping/types/text.asciidoc +++ b/docs/reference/mapping/types/text.asciidoc @@ -99,7 +99,7 @@ The following parameters are accepted by `text` fields: `index_phrases`:: If enabled, two-term word combinations ('shingles') are indexed into a separate - field. This allows exact phrase queries to run more efficiently, at the expense + field. This allows exact phrase queries (no slop) to run more efficiently, at the expense of a larger index. Note that this works best when stopwords are not removed, as phrases containing stopwords will not use the subsidiary field and will fall back to a standard phrase query. Accepts `true` or `false` (default). @@ -171,4 +171,4 @@ PUT my_index -------------------------------- // CONSOLE <1> `min_chars` must be greater than zero, defaults to 2 -<2> `max_chars` must be greater than or equal to `min_chars` and less than 20, defaults to 5 \ No newline at end of file +<2> `max_chars` must be greater than or equal to `min_chars` and less than 20, defaults to 5 diff --git a/docs/reference/migration/migrate_7_0.asciidoc b/docs/reference/migration/migrate_7_0.asciidoc index 42fd6b7afbe..924a6984dc0 100644 --- a/docs/reference/migration/migrate_7_0.asciidoc +++ b/docs/reference/migration/migrate_7_0.asciidoc @@ -39,6 +39,7 @@ Elasticsearch 6.x in order to be readable by Elasticsearch 7.x. * <> * <> * <> +* <> include::migrate_7_0/aggregations.asciidoc[] include::migrate_7_0/analysis.asciidoc[] @@ -53,4 +54,5 @@ include::migrate_7_0/java.asciidoc[] include::migrate_7_0/settings.asciidoc[] include::migrate_7_0/scripting.asciidoc[] include::migrate_7_0/snapshotstats.asciidoc[] -include::migrate_7_0/restclient.asciidoc[] \ No newline at end of file +include::migrate_7_0/restclient.asciidoc[] +include::migrate_7_0/low_level_restclient.asciidoc[] diff --git a/docs/reference/migration/migrate_7_0/aggregations.asciidoc b/docs/reference/migration/migrate_7_0/aggregations.asciidoc index b4f29935be9..08f181b2919 100644 --- a/docs/reference/migration/migrate_7_0/aggregations.asciidoc +++ b/docs/reference/migration/migrate_7_0/aggregations.asciidoc @@ -21,5 +21,3 @@ has been removed. `missing_bucket` should be used instead. The object used to share aggregation state between the scripts in a Scripted Metric Aggregation is now a variable called `state` available in the script context, rather than being provided via the `params` object as `params._agg`. - -The old `params._agg` variable is still available as well. diff --git a/docs/reference/migration/migrate_7_0/analysis.asciidoc b/docs/reference/migration/migrate_7_0/analysis.asciidoc index db617d3301f..6e6cc5b078d 100644 --- a/docs/reference/migration/migrate_7_0/analysis.asciidoc +++ b/docs/reference/migration/migrate_7_0/analysis.asciidoc @@ -22,3 +22,7 @@ The `delimited_payload_filter` was deprecated and renamed to `delimited_payload` Using it in indices created before 7.0 will issue deprecation warnings. Using the old name in new indices created in 7.0 will throw an error. Use the new name `delimited_payload` instead. + +==== `standard` filter has been removed + +The `standard` token filter has been removed because it doesn't change anything in the stream. diff --git a/docs/reference/migration/migrate_7_0/api.asciidoc b/docs/reference/migration/migrate_7_0/api.asciidoc index 689b941ef6b..ce2d817ac50 100644 --- a/docs/reference/migration/migrate_7_0/api.asciidoc +++ b/docs/reference/migration/migrate_7_0/api.asciidoc @@ -1,5 +1,5 @@ [[breaking_70_api_changes]] -=== Breaking API changes in 7.0 +=== API changes ==== Camel case and underscore parameters deprecated in 6.x have been removed A number of duplicate parameters deprecated in 6.x have been removed from diff --git a/docs/reference/migration/migrate_7_0/indices.asciidoc b/docs/reference/migration/migrate_7_0/indices.asciidoc index bab7b602220..a47cc6f4324 100644 --- a/docs/reference/migration/migrate_7_0/indices.asciidoc +++ b/docs/reference/migration/migrate_7_0/indices.asciidoc @@ -78,3 +78,7 @@ The parent circuit breaker defines a new setting `indices.breaker.total.use_real heap memory instead of only considering the reserved memory by child circuit breakers. When this setting is `true`, the default parent breaker limit also changes from 70% to 95% of the JVM heap size. The previous behavior can be restored by setting `indices.breaker.total.use_real_memory` to `false`. + +==== `fix` value for `index.shard.check_on_startup` is removed + +Deprecated option value `fix` for setting `index.shard.check_on_startup` is not supported. \ No newline at end of file diff --git a/docs/reference/migration/migrate_7_0/java.asciidoc b/docs/reference/migration/migrate_7_0/java.asciidoc index 169943a16ac..ea263283291 100644 --- a/docs/reference/migration/migrate_7_0/java.asciidoc +++ b/docs/reference/migration/migrate_7_0/java.asciidoc @@ -12,3 +12,9 @@ The `prepareExecute` method which created a request builder has been removed from the client api. Instead, construct a builder for the appropriate request directly. + +==== Some Aggregation classes have moved packages + +* All classes present in `org.elasticsearch.search.aggregations.metrics.*` packages +were moved to a single `org.elasticsearch.search.aggregations.metrics` package. + diff --git a/docs/reference/migration/migrate_7_0/low_level_restclient.asciidoc b/docs/reference/migration/migrate_7_0/low_level_restclient.asciidoc new file mode 100644 index 00000000000..77f5266763f --- /dev/null +++ b/docs/reference/migration/migrate_7_0/low_level_restclient.asciidoc @@ -0,0 +1,14 @@ +[[breaking_70_low_level_restclient_changes]] +=== Low-level REST client changes + +==== Deprecated flavors of performRequest have been removed + +We deprecated the flavors of `performRequest` and `performRequestAsync` that +do not take `Request` objects in 6.4.0 in favor of the flavors that take +`Request` objects because those methods can be extended without breaking +backwards compatibility. + +==== Removed setHosts + +We deprecated `setHosts` in 6.4.0 in favor of `setNodes` because it supports +host metadata used by the `NodeSelector`. diff --git a/docs/reference/migration/migrate_7_0/mappings.asciidoc b/docs/reference/migration/migrate_7_0/mappings.asciidoc index c56a0ae9b64..4983cb2da57 100644 --- a/docs/reference/migration/migrate_7_0/mappings.asciidoc +++ b/docs/reference/migration/migrate_7_0/mappings.asciidoc @@ -31,7 +31,7 @@ the index setting `index.mapping.nested_objects.limit`. This option is useless now that all indices have at most one type. -=== The `classic` similarity has been removed +==== The `classic` similarity has been removed The `classic` similarity relied on coordination factors for scoring to be good in presence of stopwords in the query. This feature has been removed from @@ -39,7 +39,7 @@ Lucene, which means that the `classic` similarity now produces scores of lower quality. It is advised to switch to `BM25` instead, which is widely accepted as a better alternative. -=== Similarities fail when unsupported options are provided +==== Similarities fail when unsupported options are provided An error will now be thrown when unknown configuration options are provided to similarities. Such unknown parameters were ignored before. diff --git a/docs/reference/migration/migrate_7_0/search.asciidoc b/docs/reference/migration/migrate_7_0/search.asciidoc index 094294d8530..a7d32896e97 100644 --- a/docs/reference/migration/migrate_7_0/search.asciidoc +++ b/docs/reference/migration/migrate_7_0/search.asciidoc @@ -54,6 +54,13 @@ Setting `request_cache:true` on a query that creates a scroll (`scroll=1m`) has been deprecated in 6 and will now return a `400 - Bad request`. Scroll queries are not meant to be cached. +==== Scroll queries cannot use `rescore` anymore +Including a rescore clause on a query that creates a scroll (`scroll=1m`) has +been deprecated in 6.5 and will now return a `400 - Bad request`. Allowing +rescore on scroll queries would break the scroll sort. In the 6.x line, the +rescore clause was silently ignored (for scroll queries), and it was allowed in +the 5.x line. + ==== Term Suggesters supported distance algorithms The following string distance algorithms were given additional names in 6.2 and @@ -100,3 +107,8 @@ and the context is only accepted if `path` points to a field with `geo_point` ty `max_concurrent_shard_requests` used to limit the total number of concurrent shard requests a single high level search request can execute. In 7.0 this changed to be the max number of concurrent shard requests per node. The default is now `5`. + +==== `max_score` set to `null` when scores are not tracked + +`max_score` used to be set to `0` whenever scores are not tracked. `null` is now used +instead which is a more appropriate value for a scenario where scores are not available. diff --git a/x-pack/docs/en/ml/aggregations.asciidoc b/docs/reference/ml/aggregations.asciidoc similarity index 99% rename from x-pack/docs/en/ml/aggregations.asciidoc rename to docs/reference/ml/aggregations.asciidoc index 07f46501569..4b873ea790b 100644 --- a/x-pack/docs/en/ml/aggregations.asciidoc +++ b/docs/reference/ml/aggregations.asciidoc @@ -41,7 +41,7 @@ PUT _xpack/ml/anomaly_detectors/farequote } ---------------------------------- // CONSOLE -// TEST[setup:farequote_data] +// TEST[skip:setup:farequote_data] In this example, the `airline`, `responsetime`, and `time` fields are aggregations. @@ -90,7 +90,7 @@ PUT _xpack/ml/datafeeds/datafeed-farequote } ---------------------------------- // CONSOLE -// TEST[setup:farequote_job] +// TEST[skip:setup:farequote_job] In this example, the aggregations have names that match the fields that they operate on. That is to say, the `max` aggregation is named `time` and its diff --git a/x-pack/docs/en/rest-api/ml/calendarresource.asciidoc b/docs/reference/ml/apis/calendarresource.asciidoc similarity index 94% rename from x-pack/docs/en/rest-api/ml/calendarresource.asciidoc rename to docs/reference/ml/apis/calendarresource.asciidoc index 8edb43ed7a3..4279102cd35 100644 --- a/x-pack/docs/en/rest-api/ml/calendarresource.asciidoc +++ b/docs/reference/ml/apis/calendarresource.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-calendar-resource]] === Calendar Resources diff --git a/x-pack/docs/en/rest-api/ml/close-job.asciidoc b/docs/reference/ml/apis/close-job.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/close-job.asciidoc rename to docs/reference/ml/apis/close-job.asciidoc index 8e7e8eb0ce8..6dec6402c87 100644 --- a/x-pack/docs/en/rest-api/ml/close-job.asciidoc +++ b/docs/reference/ml/apis/close-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-close-job]] === Close Jobs API ++++ @@ -80,7 +81,7 @@ The following example closes the `total-requests` job: POST _xpack/ml/anomaly_detectors/total-requests/_close -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_openjob] +// TEST[skip:setup:server_metrics_openjob] When the job is closed, you receive the following results: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/datafeedresource.asciidoc b/docs/reference/ml/apis/datafeedresource.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/datafeedresource.asciidoc rename to docs/reference/ml/apis/datafeedresource.asciidoc index 0ffeb6bc89d..6fe0b35d951 100644 --- a/x-pack/docs/en/rest-api/ml/datafeedresource.asciidoc +++ b/docs/reference/ml/apis/datafeedresource.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-datafeed-resource]] === {dfeed-cap} Resources diff --git a/x-pack/docs/en/rest-api/ml/delete-calendar-event.asciidoc b/docs/reference/ml/apis/delete-calendar-event.asciidoc similarity index 96% rename from x-pack/docs/en/rest-api/ml/delete-calendar-event.asciidoc rename to docs/reference/ml/apis/delete-calendar-event.asciidoc index ef8dad39dba..8961726f573 100644 --- a/x-pack/docs/en/rest-api/ml/delete-calendar-event.asciidoc +++ b/docs/reference/ml/apis/delete-calendar-event.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-delete-calendar-event]] === Delete Events from Calendar API ++++ @@ -44,7 +45,7 @@ calendar: DELETE _xpack/ml/calendars/planned-outages/events/LS8LJGEBMTCMA-qz49st -------------------------------------------------- // CONSOLE -// TEST[catch:missing] +// TEST[skip:catch:missing] When the event is removed, you receive the following results: [source,js] @@ -53,4 +54,3 @@ When the event is removed, you receive the following results: "acknowledged": true } ---- -// NOTCONSOLE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/delete-calendar-job.asciidoc b/docs/reference/ml/apis/delete-calendar-job.asciidoc similarity index 93% rename from x-pack/docs/en/rest-api/ml/delete-calendar-job.asciidoc rename to docs/reference/ml/apis/delete-calendar-job.asciidoc index 94388c0c4b6..4362a82b5cb 100644 --- a/x-pack/docs/en/rest-api/ml/delete-calendar-job.asciidoc +++ b/docs/reference/ml/apis/delete-calendar-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-delete-calendar-job]] === Delete Jobs from Calendar API ++++ @@ -38,7 +39,7 @@ calendar and `total-requests` job: DELETE _xpack/ml/calendars/planned-outages/jobs/total-requests -------------------------------------------------- // CONSOLE -// TEST[setup:calendar_outages_addjob] +// TEST[skip:setup:calendar_outages_addjob] When the job is removed from the calendar, you receive the following results: @@ -50,4 +51,4 @@ results: "job_ids": [] } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/delete-calendar.asciidoc b/docs/reference/ml/apis/delete-calendar.asciidoc similarity index 92% rename from x-pack/docs/en/rest-api/ml/delete-calendar.asciidoc rename to docs/reference/ml/apis/delete-calendar.asciidoc index f7673b54574..9f9f3457f24 100644 --- a/x-pack/docs/en/rest-api/ml/delete-calendar.asciidoc +++ b/docs/reference/ml/apis/delete-calendar.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-delete-calendar]] === Delete Calendar API ++++ @@ -40,7 +41,7 @@ The following example deletes the `planned-outages` calendar: DELETE _xpack/ml/calendars/planned-outages -------------------------------------------------- // CONSOLE -// TEST[setup:calendar_outages] +// TEST[skip:setup:calendar_outages] When the calendar is deleted, you receive the following results: [source,js] @@ -49,4 +50,4 @@ When the calendar is deleted, you receive the following results: "acknowledged": true } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/delete-datafeed.asciidoc b/docs/reference/ml/apis/delete-datafeed.asciidoc similarity index 94% rename from x-pack/docs/en/rest-api/ml/delete-datafeed.asciidoc rename to docs/reference/ml/apis/delete-datafeed.asciidoc index db4fd5c177a..996d2c7dd2e 100644 --- a/x-pack/docs/en/rest-api/ml/delete-datafeed.asciidoc +++ b/docs/reference/ml/apis/delete-datafeed.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-delete-datafeed]] === Delete {dfeeds-cap} API ++++ @@ -47,7 +48,7 @@ The following example deletes the `datafeed-total-requests` {dfeed}: DELETE _xpack/ml/datafeeds/datafeed-total-requests -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_datafeed] +// TEST[skip:setup:server_metrics_datafeed] When the {dfeed} is deleted, you receive the following results: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/delete-filter.asciidoc b/docs/reference/ml/apis/delete-filter.asciidoc similarity index 92% rename from x-pack/docs/en/rest-api/ml/delete-filter.asciidoc rename to docs/reference/ml/apis/delete-filter.asciidoc index b58d2980b88..21e35b66076 100644 --- a/x-pack/docs/en/rest-api/ml/delete-filter.asciidoc +++ b/docs/reference/ml/apis/delete-filter.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-delete-filter]] === Delete Filter API ++++ @@ -41,7 +42,7 @@ The following example deletes the `safe_domains` filter: DELETE _xpack/ml/filters/safe_domains -------------------------------------------------- // CONSOLE -// TEST[setup:ml_filter_safe_domains] +// TEST[skip:setup:ml_filter_safe_domains] When the filter is deleted, you receive the following results: [source,js] @@ -50,4 +51,4 @@ When the filter is deleted, you receive the following results: "acknowledged": true } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/docs/reference/ml/apis/delete-forecast.asciidoc b/docs/reference/ml/apis/delete-forecast.asciidoc new file mode 100644 index 00000000000..159dafefb0e --- /dev/null +++ b/docs/reference/ml/apis/delete-forecast.asciidoc @@ -0,0 +1,78 @@ +[role="xpack"] +[testenv="platinum"] +[[ml-delete-forecast]] +=== Delete Forecast API +++++ +Delete Forecast +++++ + +Deletes forecasts from a {ml} job. + +==== Request + +`DELETE _xpack/ml/anomaly_detectors//_forecast` + + +`DELETE _xpack/ml/anomaly_detectors//_forecast/` + + +`DELETE _xpack/ml/anomaly_detectors//_forecast/_all` + + +==== Description + +By default, forecasts are retained for 14 days. You can specify a different +retention period with the `expires_in` parameter in the <>. The delete forecast API enables you to delete one or more forecasts before they expire. + +NOTE: When you delete a job its associated forecasts are deleted. + +For more information, see {stack-ov}/ml-overview.html#ml-forecasting[Forecasting the Future]. + + +==== Path Parameters + +`job_id` (required):: + (string) Identifier for the job. + +`forecast_id`:: + (string) A comma-separated list of forecast identifiers. + If you do not specify this optional parameter or if you specify `_all`, the + API deletes all forecasts from the job. + +==== Request Parameters + +`allow_no_forecasts`:: + (boolean) Specifies whether an error occurs when there are no forecasts. In + particular, if this parameter is set to `false` and there are no forecasts + associated with the job, attempts to delete all forecasts return an error. + The default value is `true`. + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + delete operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see {stack-ov}/security-privileges.html[Security Privileges]. + +==== Examples + +The following example deletes all forecasts from the `total-requests` job: + +[source,js] +-------------------------------------------------- +DELETE _xpack/ml/anomaly_detectors/total-requests/_forecast/_all +-------------------------------------------------- +// CONSOLE +// TEST[skip:setup:server_metrics_openjob] + +If the request does not encounter errors, you receive the following result: +[source,js] +---- +{ + "acknowledged": true +} +---- +// NOTCONSOLE diff --git a/x-pack/docs/en/rest-api/ml/delete-job.asciidoc b/docs/reference/ml/apis/delete-job.asciidoc similarity index 95% rename from x-pack/docs/en/rest-api/ml/delete-job.asciidoc rename to docs/reference/ml/apis/delete-job.asciidoc index c01b08545b6..d5ef120ad04 100644 --- a/x-pack/docs/en/rest-api/ml/delete-job.asciidoc +++ b/docs/reference/ml/apis/delete-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-delete-job]] === Delete Jobs API ++++ @@ -56,7 +57,7 @@ The following example deletes the `total-requests` job: DELETE _xpack/ml/anomaly_detectors/total-requests -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_job] +// TEST[skip:setup:server_metrics_job] When the job is deleted, you receive the following results: [source,js] @@ -65,4 +66,4 @@ When the job is deleted, you receive the following results: "acknowledged": true } ---- -// TESTRESPONSE +// TESTRESPONSE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/delete-snapshot.asciidoc b/docs/reference/ml/apis/delete-snapshot.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/delete-snapshot.asciidoc rename to docs/reference/ml/apis/delete-snapshot.asciidoc index 2ab0116fe74..96a35900545 100644 --- a/x-pack/docs/en/rest-api/ml/delete-snapshot.asciidoc +++ b/docs/reference/ml/apis/delete-snapshot.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-delete-snapshot]] === Delete Model Snapshots API ++++ @@ -32,7 +33,6 @@ the `model_snapshot_id` in the results from the get jobs API. You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. -//<>. ==== Examples @@ -53,3 +53,4 @@ When the snapshot is deleted, you receive the following results: "acknowledged": true } ---- +// TESTRESPONSE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/eventresource.asciidoc b/docs/reference/ml/apis/eventresource.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/eventresource.asciidoc rename to docs/reference/ml/apis/eventresource.asciidoc index c9ab7896421..a1e96f5c25a 100644 --- a/x-pack/docs/en/rest-api/ml/eventresource.asciidoc +++ b/docs/reference/ml/apis/eventresource.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-event-resource]] === Scheduled Event Resources diff --git a/x-pack/docs/en/rest-api/ml/filterresource.asciidoc b/docs/reference/ml/apis/filterresource.asciidoc similarity index 95% rename from x-pack/docs/en/rest-api/ml/filterresource.asciidoc rename to docs/reference/ml/apis/filterresource.asciidoc index e942447c1ee..e67c92dc8d0 100644 --- a/x-pack/docs/en/rest-api/ml/filterresource.asciidoc +++ b/docs/reference/ml/apis/filterresource.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-filter-resource]] === Filter Resources diff --git a/x-pack/docs/en/rest-api/ml/flush-job.asciidoc b/docs/reference/ml/apis/flush-job.asciidoc similarity index 92% rename from x-pack/docs/en/rest-api/ml/flush-job.asciidoc rename to docs/reference/ml/apis/flush-job.asciidoc index 934a2d81b17..f19d2aa648f 100644 --- a/x-pack/docs/en/rest-api/ml/flush-job.asciidoc +++ b/docs/reference/ml/apis/flush-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-flush-job]] === Flush Jobs API ++++ @@ -74,7 +75,7 @@ POST _xpack/ml/anomaly_detectors/total-requests/_flush } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_openjob] +// TEST[skip:setup:server_metrics_openjob] When the operation succeeds, you receive the following results: [source,js] @@ -84,7 +85,7 @@ When the operation succeeds, you receive the following results: "last_finalized_bucket_end": 1455234900000 } ---- -// TESTRESPONSE[s/"last_finalized_bucket_end": 1455234900000/"last_finalized_bucket_end": $body.last_finalized_bucket_end/] +//TESTRESPONSE[s/"last_finalized_bucket_end": 1455234900000/"last_finalized_bucket_end": $body.last_finalized_bucket_end/] The `last_finalized_bucket_end` provides the timestamp (in milliseconds-since-the-epoch) of the end of the last bucket that was processed. @@ -101,7 +102,7 @@ POST _xpack/ml/anomaly_detectors/total-requests/_flush } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_openjob] +// TEST[skip:setup:server_metrics_openjob] When the operation succeeds, you receive the following results: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/forecast.asciidoc b/docs/reference/ml/apis/forecast.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/forecast.asciidoc rename to docs/reference/ml/apis/forecast.asciidoc index 99647ecae1b..197876f3f04 100644 --- a/x-pack/docs/en/rest-api/ml/forecast.asciidoc +++ b/docs/reference/ml/apis/forecast.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-forecast]] === Forecast Jobs API ++++ diff --git a/x-pack/docs/en/rest-api/ml/get-bucket.asciidoc b/docs/reference/ml/apis/get-bucket.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/ml/get-bucket.asciidoc rename to docs/reference/ml/apis/get-bucket.asciidoc index 95b05ff7f5d..3a276c13e89 100644 --- a/x-pack/docs/en/rest-api/ml/get-bucket.asciidoc +++ b/docs/reference/ml/apis/get-bucket.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-bucket]] === Get Buckets API ++++ @@ -81,7 +82,6 @@ that stores the results. The `machine_learning_admin` and `machine_learning_user roles provide these privileges. For more information, see {xpack-ref}/security-privileges.html[Security Privileges] and {xpack-ref}/built-in-roles.html[Built-in Roles]. -//<> and <>. ==== Examples diff --git a/x-pack/docs/en/rest-api/ml/get-calendar-event.asciidoc b/docs/reference/ml/apis/get-calendar-event.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/get-calendar-event.asciidoc rename to docs/reference/ml/apis/get-calendar-event.asciidoc index e89173c3382..43dd74e47c9 100644 --- a/x-pack/docs/en/rest-api/ml/get-calendar-event.asciidoc +++ b/docs/reference/ml/apis/get-calendar-event.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-calendar-event]] === Get Scheduled Events API ++++ @@ -66,7 +67,7 @@ The following example gets information about the scheduled events in the GET _xpack/ml/calendars/planned-outages/events -------------------------------------------------- // CONSOLE -// TEST[setup:calendar_outages_addevent] +// TEST[skip:setup:calendar_outages_addevent] The API returns the following results: diff --git a/x-pack/docs/en/rest-api/ml/get-calendar.asciidoc b/docs/reference/ml/apis/get-calendar.asciidoc similarity index 94% rename from x-pack/docs/en/rest-api/ml/get-calendar.asciidoc rename to docs/reference/ml/apis/get-calendar.asciidoc index ae95fd99688..f86875f326c 100644 --- a/x-pack/docs/en/rest-api/ml/get-calendar.asciidoc +++ b/docs/reference/ml/apis/get-calendar.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-calendar]] === Get Calendars API ++++ @@ -62,7 +63,7 @@ calendar: GET _xpack/ml/calendars/planned-outages -------------------------------------------------- // CONSOLE -// TEST[setup:calendar_outages_addjob] +// TEST[skip:setup:calendar_outages_addjob] The API returns the following results: [source,js] @@ -79,4 +80,4 @@ The API returns the following results: ] } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/get-category.asciidoc b/docs/reference/ml/apis/get-category.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/get-category.asciidoc rename to docs/reference/ml/apis/get-category.asciidoc index 13f274133c0..e5d6fe16802 100644 --- a/x-pack/docs/en/rest-api/ml/get-category.asciidoc +++ b/docs/reference/ml/apis/get-category.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-category]] === Get Categories API ++++ @@ -18,7 +19,6 @@ Retrieves job results for one or more categories. For more information about categories, see {xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages]. -//<>. ==== Path Parameters @@ -56,7 +56,6 @@ that stores the results. The `machine_learning_admin` and `machine_learning_user roles provide these privileges. For more information, see {xpack-ref}/security-privileges.html[Security Privileges] and {xpack-ref}/built-in-roles.html[Built-in Roles]. -//<> and <>. ==== Examples diff --git a/x-pack/docs/en/rest-api/ml/get-datafeed-stats.asciidoc b/docs/reference/ml/apis/get-datafeed-stats.asciidoc similarity index 96% rename from x-pack/docs/en/rest-api/ml/get-datafeed-stats.asciidoc rename to docs/reference/ml/apis/get-datafeed-stats.asciidoc index 2869e8222f8..9ca67cc17fb 100644 --- a/x-pack/docs/en/rest-api/ml/get-datafeed-stats.asciidoc +++ b/docs/reference/ml/apis/get-datafeed-stats.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-datafeed-stats]] === Get {dfeed-cap} Statistics API ++++ @@ -66,7 +67,7 @@ The following example gets usage information for the GET _xpack/ml/datafeeds/datafeed-total-requests/_stats -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_startdf] +// TEST[skip:setup:server_metrics_startdf] The API returns the following results: [source,js] @@ -97,4 +98,4 @@ The API returns the following results: // TESTRESPONSE[s/"node-0"/$body.$_path/] // TESTRESPONSE[s/"hoXMLZB0RWKfR9UPPUCxXX"/$body.$_path/] // TESTRESPONSE[s/"127.0.0.1:9300"/$body.$_path/] -// TESTRESPONSE[s/"17179869184"/$body.datafeeds.0.node.attributes.ml\\.machine_memory/] +// TESTRESPONSE[s/"17179869184"/$body.datafeeds.0.node.attributes.ml\\.machine_memory/] \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/get-datafeed.asciidoc b/docs/reference/ml/apis/get-datafeed.asciidoc similarity index 96% rename from x-pack/docs/en/rest-api/ml/get-datafeed.asciidoc rename to docs/reference/ml/apis/get-datafeed.asciidoc index 0fa51773fd1..db5f4249669 100644 --- a/x-pack/docs/en/rest-api/ml/get-datafeed.asciidoc +++ b/docs/reference/ml/apis/get-datafeed.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-datafeed]] === Get {dfeeds-cap} API ++++ @@ -60,7 +61,7 @@ The following example gets configuration information for the GET _xpack/ml/datafeeds/datafeed-total-requests -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_datafeed] +// TEST[skip:setup:server_metrics_datafeed] The API returns the following results: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/get-filter.asciidoc b/docs/reference/ml/apis/get-filter.asciidoc similarity index 94% rename from x-pack/docs/en/rest-api/ml/get-filter.asciidoc rename to docs/reference/ml/apis/get-filter.asciidoc index b4699e9d622..2dbb5d16cc5 100644 --- a/x-pack/docs/en/rest-api/ml/get-filter.asciidoc +++ b/docs/reference/ml/apis/get-filter.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-filter]] === Get Filters API ++++ @@ -62,7 +63,7 @@ filter: GET _xpack/ml/filters/safe_domains -------------------------------------------------- // CONSOLE -// TEST[setup:ml_filter_safe_domains] +// TEST[skip:setup:ml_filter_safe_domains] The API returns the following results: [source,js] @@ -81,4 +82,4 @@ The API returns the following results: ] } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/get-influencer.asciidoc b/docs/reference/ml/apis/get-influencer.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/get-influencer.asciidoc rename to docs/reference/ml/apis/get-influencer.asciidoc index bffd2b8e096..182cca7aa99 100644 --- a/x-pack/docs/en/rest-api/ml/get-influencer.asciidoc +++ b/docs/reference/ml/apis/get-influencer.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-influencer]] === Get Influencers API ++++ diff --git a/x-pack/docs/en/rest-api/ml/get-job-stats.asciidoc b/docs/reference/ml/apis/get-job-stats.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/get-job-stats.asciidoc rename to docs/reference/ml/apis/get-job-stats.asciidoc index bd59ee8b258..509d9448a69 100644 --- a/x-pack/docs/en/rest-api/ml/get-job-stats.asciidoc +++ b/docs/reference/ml/apis/get-job-stats.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-job-stats]] === Get Job Statistics API ++++ diff --git a/x-pack/docs/en/rest-api/ml/get-job.asciidoc b/docs/reference/ml/apis/get-job.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/get-job.asciidoc rename to docs/reference/ml/apis/get-job.asciidoc index 2e95d8e01bb..c669ac6034e 100644 --- a/x-pack/docs/en/rest-api/ml/get-job.asciidoc +++ b/docs/reference/ml/apis/get-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-job]] === Get Jobs API ++++ @@ -59,7 +60,7 @@ The following example gets configuration information for the `total-requests` jo GET _xpack/ml/anomaly_detectors/total-requests -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_job] +// TEST[skip:setup:server_metrics_job] The API returns the following results: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/get-overall-buckets.asciidoc b/docs/reference/ml/apis/get-overall-buckets.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/get-overall-buckets.asciidoc rename to docs/reference/ml/apis/get-overall-buckets.asciidoc index f2581f4904e..f4818f3bbbe 100644 --- a/x-pack/docs/en/rest-api/ml/get-overall-buckets.asciidoc +++ b/docs/reference/ml/apis/get-overall-buckets.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-overall-buckets]] === Get Overall Buckets API ++++ @@ -93,7 +94,6 @@ that stores the results. The `machine_learning_admin` and `machine_learning_user roles provide these privileges. For more information, see {xpack-ref}/security-privileges.html[Security Privileges] and {xpack-ref}/built-in-roles.html[Built-in Roles]. -//<> and <>. ==== Examples diff --git a/x-pack/docs/en/rest-api/ml/get-record.asciidoc b/docs/reference/ml/apis/get-record.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/get-record.asciidoc rename to docs/reference/ml/apis/get-record.asciidoc index 1870b441597..199cce15484 100644 --- a/x-pack/docs/en/rest-api/ml/get-record.asciidoc +++ b/docs/reference/ml/apis/get-record.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-record]] === Get Records API ++++ diff --git a/x-pack/docs/en/rest-api/ml/get-snapshot.asciidoc b/docs/reference/ml/apis/get-snapshot.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/get-snapshot.asciidoc rename to docs/reference/ml/apis/get-snapshot.asciidoc index 24e82af1f19..e194d944b63 100644 --- a/x-pack/docs/en/rest-api/ml/get-snapshot.asciidoc +++ b/docs/reference/ml/apis/get-snapshot.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-snapshot]] === Get Model Snapshots API ++++ diff --git a/x-pack/docs/en/rest-api/ml/jobcounts.asciidoc b/docs/reference/ml/apis/jobcounts.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/jobcounts.asciidoc rename to docs/reference/ml/apis/jobcounts.asciidoc index d343cc23ae0..d0169e228d5 100644 --- a/x-pack/docs/en/rest-api/ml/jobcounts.asciidoc +++ b/docs/reference/ml/apis/jobcounts.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-jobstats]] === Job Statistics diff --git a/x-pack/docs/en/rest-api/ml/jobresource.asciidoc b/docs/reference/ml/apis/jobresource.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/jobresource.asciidoc rename to docs/reference/ml/apis/jobresource.asciidoc index 5b109b1c21d..e0c314724e7 100644 --- a/x-pack/docs/en/rest-api/ml/jobresource.asciidoc +++ b/docs/reference/ml/apis/jobresource.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-job-resource]] === Job Resources diff --git a/x-pack/docs/en/rest-api/ml-api.asciidoc b/docs/reference/ml/apis/ml-api.asciidoc similarity index 60% rename from x-pack/docs/en/rest-api/ml-api.asciidoc rename to docs/reference/ml/apis/ml-api.asciidoc index b48e9f93404..961eb37e9d7 100644 --- a/x-pack/docs/en/rest-api/ml-api.asciidoc +++ b/docs/reference/ml/apis/ml-api.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-apis]] == Machine Learning APIs @@ -47,7 +48,7 @@ machine learning APIs and in advanced job configuration options in Kibana. * <> * <> * <> -* <> +* <>, <> [float] [[ml-api-snapshot-endpoint]] @@ -70,57 +71,58 @@ machine learning APIs and in advanced job configuration options in Kibana. * <> //ADD -include::ml/post-calendar-event.asciidoc[] -include::ml/put-calendar-job.asciidoc[] +include::post-calendar-event.asciidoc[] +include::put-calendar-job.asciidoc[] //CLOSE -include::ml/close-job.asciidoc[] +include::close-job.asciidoc[] //CREATE -include::ml/put-calendar.asciidoc[] -include::ml/put-datafeed.asciidoc[] -include::ml/put-filter.asciidoc[] -include::ml/put-job.asciidoc[] +include::put-calendar.asciidoc[] +include::put-datafeed.asciidoc[] +include::put-filter.asciidoc[] +include::put-job.asciidoc[] //DELETE -include::ml/delete-calendar.asciidoc[] -include::ml/delete-datafeed.asciidoc[] -include::ml/delete-calendar-event.asciidoc[] -include::ml/delete-filter.asciidoc[] -include::ml/delete-job.asciidoc[] -include::ml/delete-calendar-job.asciidoc[] -include::ml/delete-snapshot.asciidoc[] +include::delete-calendar.asciidoc[] +include::delete-datafeed.asciidoc[] +include::delete-calendar-event.asciidoc[] +include::delete-filter.asciidoc[] +include::delete-forecast.asciidoc[] +include::delete-job.asciidoc[] +include::delete-calendar-job.asciidoc[] +include::delete-snapshot.asciidoc[] //FLUSH -include::ml/flush-job.asciidoc[] +include::flush-job.asciidoc[] //FORECAST -include::ml/forecast.asciidoc[] +include::forecast.asciidoc[] //GET -include::ml/get-calendar.asciidoc[] -include::ml/get-bucket.asciidoc[] -include::ml/get-overall-buckets.asciidoc[] -include::ml/get-category.asciidoc[] -include::ml/get-datafeed.asciidoc[] -include::ml/get-datafeed-stats.asciidoc[] -include::ml/get-influencer.asciidoc[] -include::ml/get-job.asciidoc[] -include::ml/get-job-stats.asciidoc[] -include::ml/get-snapshot.asciidoc[] -include::ml/get-calendar-event.asciidoc[] -include::ml/get-filter.asciidoc[] -include::ml/get-record.asciidoc[] +include::get-calendar.asciidoc[] +include::get-bucket.asciidoc[] +include::get-overall-buckets.asciidoc[] +include::get-category.asciidoc[] +include::get-datafeed.asciidoc[] +include::get-datafeed-stats.asciidoc[] +include::get-influencer.asciidoc[] +include::get-job.asciidoc[] +include::get-job-stats.asciidoc[] +include::get-snapshot.asciidoc[] +include::get-calendar-event.asciidoc[] +include::get-filter.asciidoc[] +include::get-record.asciidoc[] //OPEN -include::ml/open-job.asciidoc[] +include::open-job.asciidoc[] //POST -include::ml/post-data.asciidoc[] +include::post-data.asciidoc[] //PREVIEW -include::ml/preview-datafeed.asciidoc[] +include::preview-datafeed.asciidoc[] //REVERT -include::ml/revert-snapshot.asciidoc[] +include::revert-snapshot.asciidoc[] //START/STOP -include::ml/start-datafeed.asciidoc[] -include::ml/stop-datafeed.asciidoc[] +include::start-datafeed.asciidoc[] +include::stop-datafeed.asciidoc[] //UPDATE -include::ml/update-datafeed.asciidoc[] -include::ml/update-filter.asciidoc[] -include::ml/update-job.asciidoc[] -include::ml/update-snapshot.asciidoc[] +include::update-datafeed.asciidoc[] +include::update-filter.asciidoc[] +include::update-job.asciidoc[] +include::update-snapshot.asciidoc[] //VALIDATE -//include::ml/validate-detector.asciidoc[] -//include::ml/validate-job.asciidoc[] +//include::validate-detector.asciidoc[] +//include::validate-job.asciidoc[] diff --git a/x-pack/docs/en/rest-api/ml/open-job.asciidoc b/docs/reference/ml/apis/open-job.asciidoc similarity index 95% rename from x-pack/docs/en/rest-api/ml/open-job.asciidoc rename to docs/reference/ml/apis/open-job.asciidoc index 59d5568ac77..c1e5977b734 100644 --- a/x-pack/docs/en/rest-api/ml/open-job.asciidoc +++ b/docs/reference/ml/apis/open-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-open-job]] === Open Jobs API ++++ @@ -56,7 +57,7 @@ POST _xpack/ml/anomaly_detectors/total-requests/_open } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_job] +// TEST[skip:setup:server_metrics_job] When the job opens, you receive the following results: [source,js] @@ -65,5 +66,4 @@ When the job opens, you receive the following results: "opened": true } ---- -//CONSOLE // TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/post-calendar-event.asciidoc b/docs/reference/ml/apis/post-calendar-event.asciidoc similarity index 96% rename from x-pack/docs/en/rest-api/ml/post-calendar-event.asciidoc rename to docs/reference/ml/apis/post-calendar-event.asciidoc index 41af0841d2e..998db409fc7 100644 --- a/x-pack/docs/en/rest-api/ml/post-calendar-event.asciidoc +++ b/docs/reference/ml/apis/post-calendar-event.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-post-calendar-event]] === Add Events to Calendar API ++++ @@ -52,7 +53,7 @@ POST _xpack/ml/calendars/planned-outages/events } -------------------------------------------------- // CONSOLE -// TEST[setup:calendar_outages_addjob] +// TEST[skip:setup:calendar_outages_addjob] The API returns the following results: @@ -81,7 +82,7 @@ The API returns the following results: ] } ---- -//TESTRESPONSE +// TESTRESPONSE For more information about these properties, see <>. diff --git a/x-pack/docs/en/rest-api/ml/post-data.asciidoc b/docs/reference/ml/apis/post-data.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/post-data.asciidoc rename to docs/reference/ml/apis/post-data.asciidoc index 40354d7f6f7..6a5a3d3d6cb 100644 --- a/x-pack/docs/en/rest-api/ml/post-data.asciidoc +++ b/docs/reference/ml/apis/post-data.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-post-data]] === Post Data to Jobs API ++++ diff --git a/x-pack/docs/en/rest-api/ml/preview-datafeed.asciidoc b/docs/reference/ml/apis/preview-datafeed.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/preview-datafeed.asciidoc rename to docs/reference/ml/apis/preview-datafeed.asciidoc index 637b506cb9a..7b9eccd9a59 100644 --- a/x-pack/docs/en/rest-api/ml/preview-datafeed.asciidoc +++ b/docs/reference/ml/apis/preview-datafeed.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-preview-datafeed]] === Preview {dfeeds-cap} API ++++ @@ -53,7 +54,7 @@ The following example obtains a preview of the `datafeed-farequote` {dfeed}: GET _xpack/ml/datafeeds/datafeed-farequote/_preview -------------------------------------------------- // CONSOLE -// TEST[setup:farequote_datafeed] +// TEST[skip:setup:farequote_datafeed] The data that is returned for this example is as follows: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/put-calendar-job.asciidoc b/docs/reference/ml/apis/put-calendar-job.asciidoc similarity index 93% rename from x-pack/docs/en/rest-api/ml/put-calendar-job.asciidoc rename to docs/reference/ml/apis/put-calendar-job.asciidoc index 6940957b159..0563047043a 100644 --- a/x-pack/docs/en/rest-api/ml/put-calendar-job.asciidoc +++ b/docs/reference/ml/apis/put-calendar-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-put-calendar-job]] === Add Jobs to Calendar API ++++ @@ -38,7 +39,7 @@ The following example associates the `planned-outages` calendar with the PUT _xpack/ml/calendars/planned-outages/jobs/total-requests -------------------------------------------------- // CONSOLE -// TEST[setup:calendar_outages_openjob] +// TEST[skip:setup:calendar_outages_openjob] The API returns the following results: @@ -51,4 +52,4 @@ The API returns the following results: ] } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/put-calendar.asciidoc b/docs/reference/ml/apis/put-calendar.asciidoc similarity index 94% rename from x-pack/docs/en/rest-api/ml/put-calendar.asciidoc rename to docs/reference/ml/apis/put-calendar.asciidoc index a82da5a2c0c..06b8e55d774 100644 --- a/x-pack/docs/en/rest-api/ml/put-calendar.asciidoc +++ b/docs/reference/ml/apis/put-calendar.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-put-calendar]] === Create Calendar API ++++ @@ -44,6 +45,7 @@ The following example creates the `planned-outages` calendar: PUT _xpack/ml/calendars/planned-outages -------------------------------------------------- // CONSOLE +// TEST[skip:need-license] When the calendar is created, you receive the following results: [source,js] @@ -53,4 +55,4 @@ When the calendar is created, you receive the following results: "job_ids": [] } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/put-datafeed.asciidoc b/docs/reference/ml/apis/put-datafeed.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/ml/put-datafeed.asciidoc rename to docs/reference/ml/apis/put-datafeed.asciidoc index 6b8ad932a1d..b5c99fc8e36 100644 --- a/x-pack/docs/en/rest-api/ml/put-datafeed.asciidoc +++ b/docs/reference/ml/apis/put-datafeed.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-put-datafeed]] === Create {dfeeds-cap} API ++++ @@ -107,7 +108,7 @@ PUT _xpack/ml/datafeeds/datafeed-total-requests } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_job] +// TEST[skip:setup:server_metrics_job] When the {dfeed} is created, you receive the following results: [source,js] @@ -132,4 +133,4 @@ When the {dfeed} is created, you receive the following results: } ---- // TESTRESPONSE[s/"query_delay": "83474ms"/"query_delay": $body.query_delay/] -// TESTRESPONSE[s/"query.boost": "1.0"/"query.boost": $body.query.boost/] +// TESTRESPONSE[s/"query.boost": "1.0"/"query.boost": $body.query.boost/] \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/put-filter.asciidoc b/docs/reference/ml/apis/put-filter.asciidoc similarity index 95% rename from x-pack/docs/en/rest-api/ml/put-filter.asciidoc rename to docs/reference/ml/apis/put-filter.asciidoc index d2982a56f61..165fe969758 100644 --- a/x-pack/docs/en/rest-api/ml/put-filter.asciidoc +++ b/docs/reference/ml/apis/put-filter.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-put-filter]] === Create Filter API ++++ @@ -55,6 +56,7 @@ PUT _xpack/ml/filters/safe_domains } -------------------------------------------------- // CONSOLE +// TEST[skip:need-licence] When the filter is created, you receive the following response: [source,js] @@ -65,4 +67,4 @@ When the filter is created, you receive the following response: "items": ["*.google.com", "wikipedia.org"] } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/put-job.asciidoc b/docs/reference/ml/apis/put-job.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/ml/put-job.asciidoc rename to docs/reference/ml/apis/put-job.asciidoc index 1c436f53d32..ce053484906 100644 --- a/x-pack/docs/en/rest-api/ml/put-job.asciidoc +++ b/docs/reference/ml/apis/put-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-put-job]] === Create Jobs API ++++ @@ -104,6 +105,7 @@ PUT _xpack/ml/anomaly_detectors/total-requests } -------------------------------------------------- // CONSOLE +// TEST[skip:need-licence] When the job is created, you receive the following results: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/resultsresource.asciidoc b/docs/reference/ml/apis/resultsresource.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/resultsresource.asciidoc rename to docs/reference/ml/apis/resultsresource.asciidoc index c28ed72aedb..d3abd094be7 100644 --- a/x-pack/docs/en/rest-api/ml/resultsresource.asciidoc +++ b/docs/reference/ml/apis/resultsresource.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-results-resource]] === Results Resources diff --git a/x-pack/docs/en/rest-api/ml/revert-snapshot.asciidoc b/docs/reference/ml/apis/revert-snapshot.asciidoc similarity index 67% rename from x-pack/docs/en/rest-api/ml/revert-snapshot.asciidoc rename to docs/reference/ml/apis/revert-snapshot.asciidoc index 1dc3046ac4f..48fc65edf90 100644 --- a/x-pack/docs/en/rest-api/ml/revert-snapshot.asciidoc +++ b/docs/reference/ml/apis/revert-snapshot.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-revert-snapshot]] === Revert Model Snapshots API ++++ @@ -22,33 +23,6 @@ then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure. -//// -To revert to a saved snapshot, you must follow this sequence: -. Close the job -. Revert to a snapshot -. Open the job -. Send new data to the job - -When reverting to a snapshot, there is a choice to make about whether or not -you want to keep the results that were created between the time of the snapshot -and the current time. In the case of Black Friday for instance, you might want -to keep the results and carry on processing data from the current time, -though without the models learning the one-off behavior and compensating for it. -However, say in the event of a critical system failure and you decide to reset -and models to a previous known good state and process data from that time, -it makes sense to delete the intervening results for the known bad period and -resend data from that earlier time. - -Any gaps in data since the snapshot time will be treated as nulls and not modeled. -If there is a partial bucket at the end of the snapshot and/or at the beginning -of the new input data, then this will be ignored and treated as a gap. - -For jobs with many entities, the model state may be very large. -If a model state is several GB, this could take 10-20 mins to revert depending -upon machine spec and resources. If this is the case, please ensure this time -is planned for. -Model size (in bytes) is available as part of the Job Resource Model Size Stats. -//// IMPORTANT: Before you revert to a saved snapshot, you must close the job. @@ -77,7 +51,6 @@ If you want to resend data, then delete the intervening results. You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. -//<>. ==== Examples diff --git a/x-pack/docs/en/rest-api/ml/snapshotresource.asciidoc b/docs/reference/ml/apis/snapshotresource.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/snapshotresource.asciidoc rename to docs/reference/ml/apis/snapshotresource.asciidoc index fb2e3d83de6..f068f6d94ed 100644 --- a/x-pack/docs/en/rest-api/ml/snapshotresource.asciidoc +++ b/docs/reference/ml/apis/snapshotresource.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-snapshot-resource]] === Model Snapshot Resources diff --git a/x-pack/docs/en/rest-api/ml/start-datafeed.asciidoc b/docs/reference/ml/apis/start-datafeed.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/start-datafeed.asciidoc rename to docs/reference/ml/apis/start-datafeed.asciidoc index fa3ea35a751..566e700dd04 100644 --- a/x-pack/docs/en/rest-api/ml/start-datafeed.asciidoc +++ b/docs/reference/ml/apis/start-datafeed.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-start-datafeed]] === Start {dfeeds-cap} API ++++ @@ -79,7 +80,6 @@ of the latest processed record. You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. -//<>. ==== Security Integration @@ -101,7 +101,7 @@ POST _xpack/ml/datafeeds/datafeed-total-requests/_start } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_openjob] +// TEST[skip:setup:server_metrics_openjob] When the {dfeed} starts, you receive the following results: [source,js] @@ -110,5 +110,4 @@ When the {dfeed} starts, you receive the following results: "started": true } ---- -// CONSOLE -// TESTRESPONSE +// TESTRESPONSE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/stop-datafeed.asciidoc b/docs/reference/ml/apis/stop-datafeed.asciidoc similarity index 92% rename from x-pack/docs/en/rest-api/ml/stop-datafeed.asciidoc rename to docs/reference/ml/apis/stop-datafeed.asciidoc index 27872ff5a20..7ea48974f2d 100644 --- a/x-pack/docs/en/rest-api/ml/stop-datafeed.asciidoc +++ b/docs/reference/ml/apis/stop-datafeed.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-stop-datafeed]] === Stop {dfeeds-cap} API ++++ @@ -18,7 +19,6 @@ A {dfeed} can be started and stopped multiple times throughout its lifecycle. `POST _xpack/ml/datafeeds/_all/_stop` -//TBD: Can there be spaces between the items in the list? ===== Description @@ -63,14 +63,14 @@ POST _xpack/ml/datafeeds/datafeed-total-requests/_stop } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_startdf] +// TEST[skip:setup:server_metrics_startdf] When the {dfeed} stops, you receive the following results: + [source,js] ---- { "stopped": true } ---- -// CONSOLE -// TESTRESPONSE +// TESTRESPONSE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/update-datafeed.asciidoc b/docs/reference/ml/apis/update-datafeed.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/ml/update-datafeed.asciidoc rename to docs/reference/ml/apis/update-datafeed.asciidoc index bc9462347c1..be55d864c87 100644 --- a/x-pack/docs/en/rest-api/ml/update-datafeed.asciidoc +++ b/docs/reference/ml/apis/update-datafeed.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-update-datafeed]] === Update {dfeeds-cap} API ++++ @@ -106,7 +107,7 @@ POST _xpack/ml/datafeeds/datafeed-total-requests/_update } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_datafeed] +// TEST[skip:setup:server_metrics_datafeed] When the {dfeed} is updated, you receive the full {dfeed} configuration with with the updated values: diff --git a/x-pack/docs/en/rest-api/ml/update-filter.asciidoc b/docs/reference/ml/apis/update-filter.asciidoc similarity index 94% rename from x-pack/docs/en/rest-api/ml/update-filter.asciidoc rename to docs/reference/ml/apis/update-filter.asciidoc index 1b6760dfed6..f551c8e923b 100644 --- a/x-pack/docs/en/rest-api/ml/update-filter.asciidoc +++ b/docs/reference/ml/apis/update-filter.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-update-filter]] === Update Filter API ++++ @@ -52,7 +53,7 @@ POST _xpack/ml/filters/safe_domains/_update } -------------------------------------------------- // CONSOLE -// TEST[setup:ml_filter_safe_domains] +// TEST[skip:setup:ml_filter_safe_domains] The API returns the following results: @@ -64,4 +65,4 @@ The API returns the following results: "items": ["*.google.com", "*.myorg.com"] } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/update-job.asciidoc b/docs/reference/ml/apis/update-job.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/ml/update-job.asciidoc rename to docs/reference/ml/apis/update-job.asciidoc index 852745e9dd9..58bfb2679d9 100644 --- a/x-pack/docs/en/rest-api/ml/update-job.asciidoc +++ b/docs/reference/ml/apis/update-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-update-job]] === Update Jobs API ++++ @@ -121,7 +122,7 @@ POST _xpack/ml/anomaly_detectors/total-requests/_update } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_job] +// TEST[skip:setup:server_metrics_job] When the job is updated, you receive a summary of the job configuration information, including the updated property values. For example: @@ -177,4 +178,4 @@ information, including the updated property values. For example: } ---- // TESTRESPONSE[s/"job_version": "7.0.0-alpha1"/"job_version": $body.job_version/] -// TESTRESPONSE[s/"create_time": 1518808660505/"create_time": $body.create_time/] +// TESTRESPONSE[s/"create_time": 1518808660505/"create_time": $body.create_time/] \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/update-snapshot.asciidoc b/docs/reference/ml/apis/update-snapshot.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/ml/update-snapshot.asciidoc rename to docs/reference/ml/apis/update-snapshot.asciidoc index 8c98a7b7321..b58eebe810f 100644 --- a/x-pack/docs/en/rest-api/ml/update-snapshot.asciidoc +++ b/docs/reference/ml/apis/update-snapshot.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-update-snapshot]] === Update Model Snapshots API ++++ diff --git a/x-pack/docs/en/rest-api/ml/validate-detector.asciidoc b/docs/reference/ml/apis/validate-detector.asciidoc similarity index 94% rename from x-pack/docs/en/rest-api/ml/validate-detector.asciidoc rename to docs/reference/ml/apis/validate-detector.asciidoc index ab8a0de442c..e525b1a1b20 100644 --- a/x-pack/docs/en/rest-api/ml/validate-detector.asciidoc +++ b/docs/reference/ml/apis/validate-detector.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-valid-detector]] === Validate Detectors API ++++ @@ -44,6 +45,7 @@ POST _xpack/ml/anomaly_detectors/_validate/detector } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] When the validation completes, you receive the following results: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/validate-job.asciidoc b/docs/reference/ml/apis/validate-job.asciidoc similarity index 95% rename from x-pack/docs/en/rest-api/ml/validate-job.asciidoc rename to docs/reference/ml/apis/validate-job.asciidoc index 0ccc5bc04e1..b8326058260 100644 --- a/x-pack/docs/en/rest-api/ml/validate-job.asciidoc +++ b/docs/reference/ml/apis/validate-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-valid-job]] === Validate Jobs API ++++ @@ -55,6 +56,7 @@ POST _xpack/ml/anomaly_detectors/_validate } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] When the validation is complete, you receive the following results: [source,js] diff --git a/x-pack/docs/en/ml/categories.asciidoc b/docs/reference/ml/categories.asciidoc similarity index 99% rename from x-pack/docs/en/ml/categories.asciidoc rename to docs/reference/ml/categories.asciidoc index 21f71b871cb..03ebc8af76e 100644 --- a/x-pack/docs/en/ml/categories.asciidoc +++ b/docs/reference/ml/categories.asciidoc @@ -44,6 +44,7 @@ PUT _xpack/ml/anomaly_detectors/it_ops_new_logs } ---------------------------------- //CONSOLE +// TEST[skip:needs-licence] <1> The `categorization_field_name` property indicates which field will be categorized. <2> The resulting categories are used in a detector by setting `by_field_name`, @@ -127,6 +128,7 @@ PUT _xpack/ml/anomaly_detectors/it_ops_new_logs2 } ---------------------------------- //CONSOLE +// TEST[skip:needs-licence] <1> The {ref}/analysis-pattern-replace-charfilter.html[`pattern_replace` character filter] here achieves exactly the same as the `categorization_filters` in the first @@ -193,6 +195,7 @@ PUT _xpack/ml/anomaly_detectors/it_ops_new_logs3 } ---------------------------------- //CONSOLE +// TEST[skip:needs-licence] <1> Tokens basically consist of hyphens, digits, letters, underscores and dots. <2> By default, categorization ignores tokens that begin with a digit. <3> By default, categorization also ignores tokens that are hexadecimal numbers. diff --git a/x-pack/docs/en/ml/configuring.asciidoc b/docs/reference/ml/configuring.asciidoc similarity index 87% rename from x-pack/docs/en/ml/configuring.asciidoc rename to docs/reference/ml/configuring.asciidoc index e35f046a82b..9b6149d662a 100644 --- a/x-pack/docs/en/ml/configuring.asciidoc +++ b/docs/reference/ml/configuring.asciidoc @@ -36,20 +36,20 @@ The scenarios in this section describe some best practices for generating useful * <> * <> -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/customurl.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/customurl.asciidoc include::customurl.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/aggregations.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/aggregations.asciidoc include::aggregations.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/categories.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/categories.asciidoc include::categories.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/populations.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/populations.asciidoc include::populations.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/transforms.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/transforms.asciidoc include::transforms.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/detector-custom-rules.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/detector-custom-rules.asciidoc include::detector-custom-rules.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/ml/customurl.asciidoc b/docs/reference/ml/customurl.asciidoc similarity index 99% rename from x-pack/docs/en/ml/customurl.asciidoc rename to docs/reference/ml/customurl.asciidoc index 7c197084c0e..95f4f5f938f 100644 --- a/x-pack/docs/en/ml/customurl.asciidoc +++ b/docs/reference/ml/customurl.asciidoc @@ -106,7 +106,7 @@ POST _xpack/ml/anomaly_detectors/sample_job/_update } ---------------------------------- //CONSOLE -//TEST[setup:sample_job] +//TEST[skip:setup:sample_job] When you click this custom URL in the anomalies table in {kib}, it opens up the *Discover* page and displays source data for the period one hour before and diff --git a/x-pack/docs/en/ml/detector-custom-rules.asciidoc b/docs/reference/ml/detector-custom-rules.asciidoc similarity index 97% rename from x-pack/docs/en/ml/detector-custom-rules.asciidoc rename to docs/reference/ml/detector-custom-rules.asciidoc index 8513c7e4d25..02881f4cc43 100644 --- a/x-pack/docs/en/ml/detector-custom-rules.asciidoc +++ b/docs/reference/ml/detector-custom-rules.asciidoc @@ -39,6 +39,7 @@ PUT _xpack/ml/filters/safe_domains } ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] Now, we can create our job specifying a scope that uses the `safe_domains` filter for the `highest_registered_domain` field: @@ -70,6 +71,7 @@ PUT _xpack/ml/anomaly_detectors/dns_exfiltration_with_rule } ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] As time advances and we see more data and more results, we might encounter new domains that we want to add in the filter. We can do that by using the @@ -83,7 +85,7 @@ POST _xpack/ml/filters/safe_domains/_update } ---------------------------------- // CONSOLE -// TEST[setup:ml_filter_safe_domains] +// TEST[skip:setup:ml_filter_safe_domains] Note that we can use any of the `partition_field_name`, `over_field_name`, or `by_field_name` fields in the `scope`. @@ -123,6 +125,7 @@ PUT _xpack/ml/anomaly_detectors/scoping_multiple_fields } ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] Such a detector will skip results when the values of all 3 scoped fields are included in the referenced filters. @@ -166,6 +169,7 @@ PUT _xpack/ml/anomaly_detectors/cpu_with_rule } ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] When there are multiple conditions they are combined with a logical `and`. This is useful when we want the rule to apply to a range. We simply create @@ -205,6 +209,7 @@ PUT _xpack/ml/anomaly_detectors/rule_with_range } ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] ==== Custom rules in the life-cycle of a job diff --git a/x-pack/docs/en/ml/functions.asciidoc b/docs/reference/ml/functions.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions.asciidoc rename to docs/reference/ml/functions.asciidoc diff --git a/x-pack/docs/en/ml/functions/count.asciidoc b/docs/reference/ml/functions/count.asciidoc similarity index 97% rename from x-pack/docs/en/ml/functions/count.asciidoc rename to docs/reference/ml/functions/count.asciidoc index a2dc5645b61..abbbd118ffe 100644 --- a/x-pack/docs/en/ml/functions/count.asciidoc +++ b/docs/reference/ml/functions/count.asciidoc @@ -59,6 +59,7 @@ PUT _xpack/ml/anomaly_detectors/example1 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] This example is probably the simplest possible analysis. It identifies time buckets during which the overall count of events is higher or lower than @@ -86,6 +87,7 @@ PUT _xpack/ml/anomaly_detectors/example2 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] If you use this `high_count` function in a detector in your job, it models the event rate for each error code. It detects users that generate an @@ -110,6 +112,7 @@ PUT _xpack/ml/anomaly_detectors/example3 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] In this example, the function detects when the count of events for a status code is lower than usual. @@ -136,6 +139,7 @@ PUT _xpack/ml/anomaly_detectors/example4 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] If you are analyzing an aggregated `events_per_min` field, do not use a sum function (for example, `sum(events_per_min)`). Instead, use the count function @@ -200,6 +204,7 @@ PUT _xpack/ml/anomaly_detectors/example5 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] If you use this `high_non_zero_count` function in a detector in your job, it models the count of events for the `signaturename` field. It ignores any buckets @@ -253,6 +258,7 @@ PUT _xpack/ml/anomaly_detectors/example6 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] This `distinct_count` function detects when a system has an unusual number of logged in users. When you use this function in a detector in your job, it @@ -278,6 +284,7 @@ PUT _xpack/ml/anomaly_detectors/example7 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] This example detects instances of port scanning. When you use this function in a detector in your job, it models the distinct count of ports. It also detects the diff --git a/x-pack/docs/en/ml/functions/geo.asciidoc b/docs/reference/ml/functions/geo.asciidoc similarity index 98% rename from x-pack/docs/en/ml/functions/geo.asciidoc rename to docs/reference/ml/functions/geo.asciidoc index 5bcf6c33945..461ab825ff5 100644 --- a/x-pack/docs/en/ml/functions/geo.asciidoc +++ b/docs/reference/ml/functions/geo.asciidoc @@ -47,6 +47,7 @@ PUT _xpack/ml/anomaly_detectors/example1 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] If you use this `lat_long` function in a detector in your job, it detects anomalies where the geographic location of a credit card transaction is @@ -98,6 +99,6 @@ PUT _xpack/ml/datafeeds/datafeed-test2 } -------------------------------------------------- // CONSOLE -// TEST[setup:farequote_job] +// TEST[skip:setup:farequote_job] For more information, see <>. diff --git a/x-pack/docs/en/ml/functions/info.asciidoc b/docs/reference/ml/functions/info.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions/info.asciidoc rename to docs/reference/ml/functions/info.asciidoc diff --git a/x-pack/docs/en/ml/functions/metric.asciidoc b/docs/reference/ml/functions/metric.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions/metric.asciidoc rename to docs/reference/ml/functions/metric.asciidoc diff --git a/x-pack/docs/en/ml/functions/rare.asciidoc b/docs/reference/ml/functions/rare.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions/rare.asciidoc rename to docs/reference/ml/functions/rare.asciidoc diff --git a/x-pack/docs/en/ml/functions/sum.asciidoc b/docs/reference/ml/functions/sum.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions/sum.asciidoc rename to docs/reference/ml/functions/sum.asciidoc diff --git a/x-pack/docs/en/ml/functions/time.asciidoc b/docs/reference/ml/functions/time.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions/time.asciidoc rename to docs/reference/ml/functions/time.asciidoc diff --git a/x-pack/docs/en/ml/images/ml-category-advanced.jpg b/docs/reference/ml/images/ml-category-advanced.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-category-advanced.jpg rename to docs/reference/ml/images/ml-category-advanced.jpg diff --git a/x-pack/docs/en/ml/images/ml-category-anomalies.jpg b/docs/reference/ml/images/ml-category-anomalies.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-category-anomalies.jpg rename to docs/reference/ml/images/ml-category-anomalies.jpg diff --git a/x-pack/docs/en/ml/images/ml-categoryterms.jpg b/docs/reference/ml/images/ml-categoryterms.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-categoryterms.jpg rename to docs/reference/ml/images/ml-categoryterms.jpg diff --git a/x-pack/docs/en/ml/images/ml-create-job.jpg b/docs/reference/ml/images/ml-create-job.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-create-job.jpg rename to docs/reference/ml/images/ml-create-job.jpg diff --git a/x-pack/docs/en/ml/images/ml-create-jobs.jpg b/docs/reference/ml/images/ml-create-jobs.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-create-jobs.jpg rename to docs/reference/ml/images/ml-create-jobs.jpg diff --git a/x-pack/docs/en/ml/images/ml-customurl-detail.jpg b/docs/reference/ml/images/ml-customurl-detail.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-customurl-detail.jpg rename to docs/reference/ml/images/ml-customurl-detail.jpg diff --git a/x-pack/docs/en/ml/images/ml-customurl-discover.jpg b/docs/reference/ml/images/ml-customurl-discover.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-customurl-discover.jpg rename to docs/reference/ml/images/ml-customurl-discover.jpg diff --git a/x-pack/docs/en/ml/images/ml-customurl-edit.jpg b/docs/reference/ml/images/ml-customurl-edit.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-customurl-edit.jpg rename to docs/reference/ml/images/ml-customurl-edit.jpg diff --git a/x-pack/docs/en/ml/images/ml-customurl.jpg b/docs/reference/ml/images/ml-customurl.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-customurl.jpg rename to docs/reference/ml/images/ml-customurl.jpg diff --git a/x-pack/docs/en/ml/images/ml-data-dates.jpg b/docs/reference/ml/images/ml-data-dates.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-data-dates.jpg rename to docs/reference/ml/images/ml-data-dates.jpg diff --git a/x-pack/docs/en/ml/images/ml-data-keywords.jpg b/docs/reference/ml/images/ml-data-keywords.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-data-keywords.jpg rename to docs/reference/ml/images/ml-data-keywords.jpg diff --git a/x-pack/docs/en/ml/images/ml-data-metrics.jpg b/docs/reference/ml/images/ml-data-metrics.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-data-metrics.jpg rename to docs/reference/ml/images/ml-data-metrics.jpg diff --git a/x-pack/docs/en/ml/images/ml-data-topmetrics.jpg b/docs/reference/ml/images/ml-data-topmetrics.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-data-topmetrics.jpg rename to docs/reference/ml/images/ml-data-topmetrics.jpg diff --git a/x-pack/docs/en/ml/images/ml-data-visualizer.jpg b/docs/reference/ml/images/ml-data-visualizer.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-data-visualizer.jpg rename to docs/reference/ml/images/ml-data-visualizer.jpg diff --git a/x-pack/docs/en/ml/images/ml-edit-job.jpg b/docs/reference/ml/images/ml-edit-job.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-edit-job.jpg rename to docs/reference/ml/images/ml-edit-job.jpg diff --git a/x-pack/docs/en/ml/images/ml-population-anomaly.jpg b/docs/reference/ml/images/ml-population-anomaly.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-population-anomaly.jpg rename to docs/reference/ml/images/ml-population-anomaly.jpg diff --git a/x-pack/docs/en/ml/images/ml-population-job.jpg b/docs/reference/ml/images/ml-population-job.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-population-job.jpg rename to docs/reference/ml/images/ml-population-job.jpg diff --git a/x-pack/docs/en/ml/images/ml-population-results.jpg b/docs/reference/ml/images/ml-population-results.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-population-results.jpg rename to docs/reference/ml/images/ml-population-results.jpg diff --git a/x-pack/docs/en/ml/images/ml-scriptfields.jpg b/docs/reference/ml/images/ml-scriptfields.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-scriptfields.jpg rename to docs/reference/ml/images/ml-scriptfields.jpg diff --git a/x-pack/docs/en/ml/images/ml-start-feed.jpg b/docs/reference/ml/images/ml-start-feed.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-start-feed.jpg rename to docs/reference/ml/images/ml-start-feed.jpg diff --git a/x-pack/docs/en/ml/images/ml-stop-feed.jpg b/docs/reference/ml/images/ml-stop-feed.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-stop-feed.jpg rename to docs/reference/ml/images/ml-stop-feed.jpg diff --git a/x-pack/docs/en/ml/images/ml.jpg b/docs/reference/ml/images/ml.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml.jpg rename to docs/reference/ml/images/ml.jpg diff --git a/x-pack/docs/en/ml/populations.asciidoc b/docs/reference/ml/populations.asciidoc similarity index 94% rename from x-pack/docs/en/ml/populations.asciidoc rename to docs/reference/ml/populations.asciidoc index bf0dd2ad7d7..ed58c117f17 100644 --- a/x-pack/docs/en/ml/populations.asciidoc +++ b/docs/reference/ml/populations.asciidoc @@ -51,14 +51,11 @@ PUT _xpack/ml/anomaly_detectors/population } ---------------------------------- //CONSOLE +// TEST[skip:needs-licence] <1> This `over_field_name` property indicates that the metrics for each user ( as identified by their `username` value) are analyzed relative to other users in each bucket. -//TO-DO: Per sophiec20 "Perhaps add the datafeed config and add a query filter to -//include only workstations as servers and printers would behave differently -//from the population - If your data is stored in {es}, you can use the population job wizard in {kib} to create a job with these same properties. For example, the population job wizard provides the following job settings: diff --git a/x-pack/docs/en/ml/stopping-ml.asciidoc b/docs/reference/ml/stopping-ml.asciidoc similarity index 94% rename from x-pack/docs/en/ml/stopping-ml.asciidoc rename to docs/reference/ml/stopping-ml.asciidoc index c0be2d947cd..17505a02d15 100644 --- a/x-pack/docs/en/ml/stopping-ml.asciidoc +++ b/docs/reference/ml/stopping-ml.asciidoc @@ -28,7 +28,7 @@ request stops the `feed1` {dfeed}: POST _xpack/ml/datafeeds/datafeed-total-requests/_stop -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_startdf] +// TEST[skip:setup:server_metrics_startdf] NOTE: You must have `manage_ml`, or `manage` cluster privileges to stop {dfeeds}. For more information, see <>. @@ -49,6 +49,7 @@ If you are upgrading your cluster, you can use the following request to stop all POST _xpack/ml/datafeeds/_all/_stop ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] [float] [[closing-ml-jobs]] @@ -67,7 +68,7 @@ example, the following request closes the `job1` job: POST _xpack/ml/anomaly_detectors/total-requests/_close -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_openjob] +// TEST[skip:setup:server_metrics_openjob] NOTE: You must have `manage_ml`, or `manage` cluster privileges to stop {dfeeds}. For more information, see <>. @@ -86,3 +87,4 @@ all open jobs on the cluster: POST _xpack/ml/anomaly_detectors/_all/_close ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] diff --git a/x-pack/docs/en/ml/transforms.asciidoc b/docs/reference/ml/transforms.asciidoc similarity index 97% rename from x-pack/docs/en/ml/transforms.asciidoc rename to docs/reference/ml/transforms.asciidoc index c4b4d560297..a2276895fc9 100644 --- a/x-pack/docs/en/ml/transforms.asciidoc +++ b/docs/reference/ml/transforms.asciidoc @@ -95,7 +95,7 @@ PUT /my_index/my_type/1 } ---------------------------------- // CONSOLE -// TESTSETUP +// TEST[skip:SETUP] <1> In this example, string fields are mapped as `keyword` fields to support aggregation. If you want both a full text (`text`) and a keyword (`keyword`) version of the same field, use multi-fields. For more information, see @@ -144,7 +144,7 @@ PUT _xpack/ml/datafeeds/datafeed-test1 } ---------------------------------- // CONSOLE -// TEST[skip:broken] +// TEST[skip:needs-licence] <1> A script field named `total_error_count` is referenced in the detector within the job. <2> The script field is defined in the {dfeed}. @@ -163,7 +163,7 @@ You can preview the contents of the {dfeed} by using the following API: GET _xpack/ml/datafeeds/datafeed-test1/_preview ---------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] In this example, the API returns the following results, which contain a sum of the `error_count` and `aborted_count` values: @@ -177,8 +177,6 @@ the `error_count` and `aborted_count` values: } ] ---------------------------------- -// TESTRESPONSE - NOTE: This example demonstrates how to use script fields, but it contains insufficient data to generate meaningful results. For a full demonstration of @@ -254,7 +252,7 @@ PUT _xpack/ml/datafeeds/datafeed-test2 GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[skip:broken] +// TEST[skip:needs-licence] <1> The script field has a rather generic name in this case, since it will be used for various tests in the subsequent examples. <2> The script field uses the plus (+) operator to concatenate strings. @@ -271,7 +269,6 @@ and "SMITH " have been concatenated and an underscore was added: } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform3]] .Example 3: Trimming strings @@ -292,7 +289,7 @@ POST _xpack/ml/datafeeds/datafeed-test2/_update GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] <1> This script field uses the `trim()` function to trim extra white space from a string. @@ -308,7 +305,6 @@ has been trimmed to "SMITH": } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform4]] .Example 4: Converting strings to lowercase @@ -329,7 +325,7 @@ POST _xpack/ml/datafeeds/datafeed-test2/_update GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] <1> This script field uses the `toLowerCase` function to convert a string to all lowercase letters. Likewise, you can use the `toUpperCase{}` function to convert a string to uppercase letters. @@ -346,7 +342,6 @@ has been converted to "joe": } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform5]] .Example 5: Converting strings to mixed case formats @@ -367,7 +362,7 @@ POST _xpack/ml/datafeeds/datafeed-test2/_update GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] <1> This script field is a more complicated example of case manipulation. It uses the `subString()` function to capitalize the first letter of a string and converts the remaining characters to lowercase. @@ -384,7 +379,6 @@ has been converted to "Joe": } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform6]] .Example 6: Replacing tokens @@ -405,7 +399,7 @@ POST _xpack/ml/datafeeds/datafeed-test2/_update GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] <1> This script field uses regular expressions to replace white space with underscores. @@ -421,7 +415,6 @@ The preview {dfeed} API returns the following results, which show that } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform7]] .Example 7: Regular expression matching and concatenation @@ -442,7 +435,7 @@ POST _xpack/ml/datafeeds/datafeed-test2/_update GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] <1> This script field looks for a specific regular expression pattern and emits the matched groups as a concatenated string. If no match is found, it emits an empty string. @@ -459,7 +452,6 @@ The preview {dfeed} API returns the following results, which show that } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform8]] .Example 8: Splitting strings by domain name @@ -509,7 +501,7 @@ PUT _xpack/ml/datafeeds/datafeed-test3 GET _xpack/ml/datafeeds/datafeed-test3/_preview -------------------------------------------------- // CONSOLE -// TEST[skip:broken] +// TEST[skip:needs-licence] If you have a single field that contains a well-formed DNS domain name, you can use the `domainSplit()` function to split the string into its highest registered @@ -537,7 +529,6 @@ The preview {dfeed} API returns the following results, which show that } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform9]] .Example 9: Transforming geo_point data @@ -583,7 +574,7 @@ PUT _xpack/ml/datafeeds/datafeed-test4 GET _xpack/ml/datafeeds/datafeed-test4/_preview -------------------------------------------------- // CONSOLE -// TEST[skip:broken] +// TEST[skip:needs-licence] In {es}, location data can be stored in `geo_point` fields but this data type is not supported natively in {xpackml} analytics. This example of a script field @@ -602,4 +593,4 @@ The preview {dfeed} API returns the following results, which show that } ] ---------------------------------- -// TESTRESPONSE + diff --git a/docs/reference/modules/cluster/misc.asciidoc b/docs/reference/modules/cluster/misc.asciidoc index 96867494868..bdc56999553 100644 --- a/docs/reference/modules/cluster/misc.asciidoc +++ b/docs/reference/modules/cluster/misc.asciidoc @@ -22,6 +22,27 @@ user with access to the <> API can make the cluster read-write again. +[[user-defined-data]] +==== User Defined Cluster Metadata + +User-defined metadata can be stored and retrieved using the Cluster Settings API. +This can be used to store arbitrary, infrequently-changing data about the cluster +without the need to create an index to store it. This data may be stored using +any key prefixed with `cluster.metadata.`. For example, to store the email +address of the administrator of a cluster under the key `cluster.metadata.administrator`, +issue this request: + +[source,js] +------------------------------- +PUT /_cluster/settings +{ + "persistent": { + "cluster.metadata.administrator": "sysadmin@example.com" + } +} +------------------------------- +// CONSOLE + [[cluster-max-tombstones]] ==== Index Tombstones diff --git a/docs/reference/modules/cross-cluster-search.asciidoc b/docs/reference/modules/cross-cluster-search.asciidoc index 21e21edc35b..d6c65eaff01 100644 --- a/docs/reference/modules/cross-cluster-search.asciidoc +++ b/docs/reference/modules/cross-cluster-search.asciidoc @@ -38,7 +38,7 @@ remote clusters that should be connected to, for instance: [source,yaml] -------------------------------- -search: +cluster: remote: cluster_one: <1> seeds: 127.0.0.1:9300 @@ -58,7 +58,7 @@ following: PUT _cluster/settings { "persistent": { - "search": { + "cluster": { "remote": { "cluster_one": { "seeds": [ @@ -94,7 +94,7 @@ because we'll use them later. { "acknowledged" : true, "persistent": { - "search": { + "cluster": { "remote": { "cluster_one": { "seeds": [ @@ -129,7 +129,7 @@ A remote cluster can be deleted from the cluster settings by setting its seeds t PUT _cluster/settings { "persistent": { - "search": { + "cluster": { "remote": { "cluster_three": { "seeds": null <1> @@ -309,7 +309,7 @@ boolean `skip_unavailable` setting, set to `false` by default. PUT _cluster/settings { "persistent": { - "search.remote.cluster_two.skip_unavailable": true <1> + "cluster.remote.cluster_two.skip_unavailable": true <1> } } -------------------------------- @@ -391,30 +391,30 @@ GET /cluster_one:twitter,cluster_two:twitter,twitter/_search <1> [[cross-cluster-search-settings]] === Cross cluster search settings -`search.remote.connections_per_cluster`:: +`cluster.remote.connections_per_cluster`:: The number of nodes to connect to per remote cluster. The default is `3`. -`search.remote.initial_connect_timeout`:: +`cluster.remote.initial_connect_timeout`:: The time to wait for remote connections to be established when the node starts. The default is `30s`. -`search.remote.node.attr`:: +`cluster.remote.node.attr`:: A node attribute to filter out nodes that are eligible as a gateway node in the remote cluster. For instance a node can have a node attribute `node.attr.gateway: true` such that only nodes with this attribute will be - connected to if `search.remote.node.attr` is set to `gateway`. + connected to if `cluster.remote.node.attr` is set to `gateway`. -`search.remote.connect`:: +`cluster.remote.connect`:: By default, any node in the cluster can act as a cross-cluster client and - connect to remote clusters. The `search.remote.connect` setting can be set + connect to remote clusters. The `cluster.remote.connect` setting can be set to `false` (defaults to `true`) to prevent certain nodes from connecting to remote clusters. Cross-cluster search requests must be sent to a node that is allowed to act as a cross-cluster client. -`search.remote.${cluster_alias}.skip_unavailable`:: +`cluster.remote.${cluster_alias}.skip_unavailable`:: Per cluster boolean setting that allows to skip specific clusters when no nodes belonging to them are available and they are searched as part of a diff --git a/docs/reference/modules/discovery/zen.asciidoc b/docs/reference/modules/discovery/zen.asciidoc index f0f26a46659..e9be7aa52e8 100644 --- a/docs/reference/modules/discovery/zen.asciidoc +++ b/docs/reference/modules/discovery/zen.asciidoc @@ -1,13 +1,12 @@ [[modules-discovery-zen]] === Zen Discovery -The zen discovery is the built in discovery module for Elasticsearch and -the default. It provides unicast discovery, but can be extended to -support cloud environments and other forms of discovery. +Zen discovery is the built-in, default, discovery module for Elasticsearch. It +provides unicast and file-based discovery, and can be extended to support cloud +environments and other forms of discovery via plugins. -The zen discovery is integrated with other modules, for example, all -communication between nodes is done using the -<> module. +Zen discovery is integrated with other modules, for example, all communication +between nodes is done using the <> module. It is separated into several sub modules, which are explained below: @@ -15,86 +14,155 @@ It is separated into several sub modules, which are explained below: [[ping]] ==== Ping -This is the process where a node uses the discovery mechanisms to find -other nodes. +This is the process where a node uses the discovery mechanisms to find other +nodes. + +[float] +[[discovery-seed-nodes]] +==== Seed nodes + +Zen discovery uses a list of _seed_ nodes in order to start off the discovery +process. At startup, or when electing a new master, Elasticsearch tries to +connect to each seed node in its list, and holds a gossip-like conversation with +them to find other nodes and to build a complete picture of the cluster. By +default there are two methods for configuring the list of seed nodes: _unicast_ +and _file-based_. It is recommended that the list of seed nodes comprises the +list of master-eligible nodes in the cluster. [float] [[unicast]] ===== Unicast -Unicast discovery requires a list of hosts to use that will act as gossip -routers. These hosts can be specified as hostnames or IP addresses; hosts -specified as hostnames are resolved to IP addresses during each round of -pinging. Note that if you are in an environment where DNS resolutions vary with -time, you might need to adjust your <>. +Unicast discovery configures a static list of hosts for use as seed nodes. +These hosts can be specified as hostnames or IP addresses; hosts specified as +hostnames are resolved to IP addresses during each round of pinging. Note that +if you are in an environment where DNS resolutions vary with time, you might +need to adjust your <>. -It is recommended that the unicast hosts list be maintained as the list of -master-eligible nodes in the cluster. +The list of hosts is set using the `discovery.zen.ping.unicast.hosts` static +setting. This is either an array of hosts or a comma-delimited string. Each +value should be in the form of `host:port` or `host` (where `port` defaults to +the setting `transport.profiles.default.port` falling back to +`transport.tcp.port` if not set). Note that IPv6 hosts must be bracketed. The +default for this setting is `127.0.0.1, [::1]` -Unicast discovery provides the following settings with the `discovery.zen.ping.unicast` prefix: +Additionally, the `discovery.zen.ping.unicast.resolve_timeout` configures the +amount of time to wait for DNS lookups on each round of pinging. This is +specified as a <> and defaults to 5s. -[cols="<,<",options="header",] -|======================================================================= -|Setting |Description -|`hosts` |Either an array setting or a comma delimited setting. Each - value should be in the form of `host:port` or `host` (where `port` defaults to the setting `transport.profiles.default.port` - falling back to `transport.tcp.port` if not set). Note that IPv6 hosts must be bracketed. Defaults to `127.0.0.1, [::1]` -|`hosts.resolve_timeout` |The amount of time to wait for DNS lookups on each round of pinging. Specified as -<>. Defaults to 5s. -|======================================================================= +Unicast discovery uses the <> module to perform the +discovery. -The unicast discovery uses the <> module to perform the discovery. +[float] +[[file-based-hosts-provider]] +===== File-based + +In addition to hosts provided by the static `discovery.zen.ping.unicast.hosts` +setting, it is possible to provide a list of hosts via an external file. +Elasticsearch reloads this file when it changes, so that the list of seed nodes +can change dynamically without needing to restart each node. For example, this +gives a convenient mechanism for an Elasticsearch instance that is run in a +Docker container to be dynamically supplied with a list of IP addresses to +connect to for Zen discovery when those IP addresses may not be known at node +startup. + +To enable file-based discovery, configure the `file` hosts provider as follows: + +[source,txt] +---------------------------------------------------------------- +discovery.zen.hosts_provider: file +---------------------------------------------------------------- + +Then create a file at `$ES_PATH_CONF/unicast_hosts.txt` in the format described +below. Any time a change is made to the `unicast_hosts.txt` file the new +changes will be picked up by Elasticsearch and the new hosts list will be used. + +Note that the file-based discovery plugin augments the unicast hosts list in +`elasticsearch.yml`: if there are valid unicast host entries in +`discovery.zen.ping.unicast.hosts` then they will be used in addition to those +supplied in `unicast_hosts.txt`. + +The `discovery.zen.ping.unicast.resolve_timeout` setting also applies to DNS +lookups for nodes specified by address via file-based discovery. This is +specified as a <> and defaults to 5s. + +The format of the file is to specify one node entry per line. Each node entry +consists of the host (host name or IP address) and an optional transport port +number. If the port number is specified, is must come immediately after the +host (on the same line) separated by a `:`. If the port number is not +specified, a default value of 9300 is used. + +For example, this is an example of `unicast_hosts.txt` for a cluster with four +nodes that participate in unicast discovery, some of which are not running on +the default port: + +[source,txt] +---------------------------------------------------------------- +10.10.10.5 +10.10.10.6:9305 +10.10.10.5:10005 +# an IPv6 address +[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:9301 +---------------------------------------------------------------- + +Host names are allowed instead of IP addresses (similar to +`discovery.zen.ping.unicast.hosts`), and IPv6 addresses must be specified in +brackets with the port coming after the brackets. + +It is also possible to add comments to this file. All comments must appear on +their lines starting with `#` (i.e. comments cannot start in the middle of a +line). [float] [[master-election]] ==== Master Election -As part of the ping process a master of the cluster is either -elected or joined to. This is done automatically. The -`discovery.zen.ping_timeout` (which defaults to `3s`) determines how long the node -will wait before deciding on starting an election or joining an existing cluster. -Three pings will be sent over this timeout interval. In case where no decision can be -reached after the timeout, the pinging process restarts. -In slow or congested networks, three seconds might not be enough for a node to become -aware of the other nodes in its environment before making an election decision. -Increasing the timeout should be done with care in that case, as it will slow down the -election process. -Once a node decides to join an existing formed cluster, it -will send a join request to the master (`discovery.zen.join_timeout`) -with a timeout defaulting at 20 times the ping timeout. +As part of the ping process a master of the cluster is either elected or joined +to. This is done automatically. The `discovery.zen.ping_timeout` (which defaults +to `3s`) determines how long the node will wait before deciding on starting an +election or joining an existing cluster. Three pings will be sent over this +timeout interval. In case where no decision can be reached after the timeout, +the pinging process restarts. In slow or congested networks, three seconds +might not be enough for a node to become aware of the other nodes in its +environment before making an election decision. Increasing the timeout should +be done with care in that case, as it will slow down the election process. Once +a node decides to join an existing formed cluster, it will send a join request +to the master (`discovery.zen.join_timeout`) with a timeout defaulting at 20 +times the ping timeout. -When the master node stops or has encountered a problem, the cluster nodes -start pinging again and will elect a new master. This pinging round also -serves as a protection against (partial) network failures where a node may unjustly -think that the master has failed. In this case the node will simply hear from -other nodes about the currently active master. +When the master node stops or has encountered a problem, the cluster nodes start +pinging again and will elect a new master. This pinging round also serves as a +protection against (partial) network failures where a node may unjustly think +that the master has failed. In this case the node will simply hear from other +nodes about the currently active master. -If `discovery.zen.master_election.ignore_non_master_pings` is `true`, pings from nodes that are not master -eligible (nodes where `node.master` is `false`) are ignored during master election; the default value is +If `discovery.zen.master_election.ignore_non_master_pings` is `true`, pings from +nodes that are not master eligible (nodes where `node.master` is `false`) are +ignored during master election; the default value is `false`. + +Nodes can be excluded from becoming a master by setting `node.master` to `false`. -Nodes can be excluded from becoming a master by setting `node.master` to `false`. - -The `discovery.zen.minimum_master_nodes` sets the minimum -number of master eligible nodes that need to join a newly elected master in order for an election to -complete and for the elected node to accept its mastership. The same setting controls the minimum number of -active master eligible nodes that should be a part of any active cluster. If this requirement is not met the -active master node will step down and a new master election will begin. +The `discovery.zen.minimum_master_nodes` sets the minimum number of master +eligible nodes that need to join a newly elected master in order for an election +to complete and for the elected node to accept its mastership. The same setting +controls the minimum number of active master eligible nodes that should be a +part of any active cluster. If this requirement is not met the active master +node will step down and a new master election will begin. This setting must be set to a <> of your master eligible nodes. It is recommended to avoid having only two master eligible -nodes, since a quorum of two is two. Therefore, a loss of either master -eligible node will result in an inoperable cluster. +nodes, since a quorum of two is two. Therefore, a loss of either master eligible +node will result in an inoperable cluster. [float] [[fault-detection]] ==== Fault Detection -There are two fault detection processes running. The first is by the -master, to ping all the other nodes in the cluster and verify that they -are alive. And on the other end, each node pings to master to verify if -its still alive or an election process needs to be initiated. +There are two fault detection processes running. The first is by the master, to +ping all the other nodes in the cluster and verify that they are alive. And on +the other end, each node pings to master to verify if its still alive or an +election process needs to be initiated. The following settings control the fault detection process using the `discovery.zen.fd` prefix: @@ -116,19 +184,21 @@ considered failed. Defaults to `3`. The master node is the only node in a cluster that can make changes to the cluster state. The master node processes one cluster state update at a time, -applies the required changes and publishes the updated cluster state to all -the other nodes in the cluster. Each node receives the publish message, acknowledges -it, but does *not* yet apply it. If the master does not receive acknowledgement from -at least `discovery.zen.minimum_master_nodes` nodes within a certain time (controlled by -the `discovery.zen.commit_timeout` setting and defaults to 30 seconds) the cluster state -change is rejected. +applies the required changes and publishes the updated cluster state to all the +other nodes in the cluster. Each node receives the publish message, acknowledges +it, but does *not* yet apply it. If the master does not receive acknowledgement +from at least `discovery.zen.minimum_master_nodes` nodes within a certain time +(controlled by the `discovery.zen.commit_timeout` setting and defaults to 30 +seconds) the cluster state change is rejected. -Once enough nodes have responded, the cluster state is committed and a message will -be sent to all the nodes. The nodes then proceed to apply the new cluster state to their -internal state. The master node waits for all nodes to respond, up to a timeout, before -going ahead processing the next updates in the queue. The `discovery.zen.publish_timeout` is -set by default to 30 seconds and is measured from the moment the publishing started. Both -timeout settings can be changed dynamically through the <> +Once enough nodes have responded, the cluster state is committed and a message +will be sent to all the nodes. The nodes then proceed to apply the new cluster +state to their internal state. The master node waits for all nodes to respond, +up to a timeout, before going ahead processing the next updates in the queue. +The `discovery.zen.publish_timeout` is set by default to 30 seconds and is +measured from the moment the publishing started. Both timeout settings can be +changed dynamically through the <> [float] [[no-master-block]] @@ -143,10 +213,14 @@ rejected when there is no active master. The `discovery.zen.no_master_block` setting has two valid options: [horizontal] -`all`:: All operations on the node--i.e. both read & writes--will be rejected. This also applies for api cluster state -read or write operations, like the get index settings, put mapping and cluster state api. -`write`:: (default) Write operations will be rejected. Read operations will succeed, based on the last known cluster configuration. -This may result in partial reads of stale data as this node may be isolated from the rest of the cluster. +`all`:: All operations on the node--i.e. both read & writes--will be rejected. +This also applies for api cluster state read or write operations, like the get +index settings, put mapping and cluster state api. +`write`:: (default) Write operations will be rejected. Read operations will +succeed, based on the last known cluster configuration. This may result in +partial reads of stale data as this node may be isolated from the rest of the +cluster. -The `discovery.zen.no_master_block` setting doesn't apply to nodes-based apis (for example cluster stats, node info and -node stats apis). Requests to these apis will not be blocked and can run on any available node. +The `discovery.zen.no_master_block` setting doesn't apply to nodes-based apis +(for example cluster stats, node info and node stats apis). Requests to these +apis will not be blocked and can run on any available node. diff --git a/docs/reference/modules/gateway.asciidoc b/docs/reference/modules/gateway.asciidoc index 76e08407939..038a4b24a85 100644 --- a/docs/reference/modules/gateway.asciidoc +++ b/docs/reference/modules/gateway.asciidoc @@ -51,7 +51,7 @@ NOTE: These settings only take effect on a full cluster restart. === Dangling indices -When a node joins the cluster, any shards stored in its local data directory +When a node joins the cluster, any shards stored in its local data directory which do not already exist in the cluster will be imported into the cluster. This functionality is intended as a best effort to help users who lose all master nodes. If a new master node is started which is unaware of diff --git a/docs/reference/modules/ml-node.asciidoc b/docs/reference/modules/ml-node.asciidoc index 316df743bf9..9e4413e3a0c 100644 --- a/docs/reference/modules/ml-node.asciidoc +++ b/docs/reference/modules/ml-node.asciidoc @@ -59,7 +59,7 @@ To create a dedicated ingest node when {xpack} is installed, set: node.master: false <1> node.data: false <2> node.ingest: true <3> -search.remote.connect: false <4> +cluster.remote.connect: false <4> node.ml: false <5> ------------------- <1> Disable the `node.master` role (enabled by default). @@ -75,7 +75,7 @@ To create a dedicated coordinating node when {xpack} is installed, set: node.master: false <1> node.data: false <2> node.ingest: false <3> -search.remote.connect: false <4> +cluster.remote.connect: false <4> node.ml: false <5> ------------------- <1> Disable the `node.master` role (enabled by default). @@ -105,7 +105,7 @@ To create a dedicated {ml} node, set: node.master: false <1> node.data: false <2> node.ingest: false <3> -search.remote.connect: false <4> +cluster.remote.connect: false <4> node.ml: true <5> xpack.ml.enabled: true <6> ------------------- diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index f772977e3f0..2d0cee85e29 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -93,7 +93,7 @@ To create a dedicated master-eligible node, set: node.master: true <1> node.data: false <2> node.ingest: false <3> -search.remote.connect: false <4> +cluster.remote.connect: false <4> ------------------- <1> The `node.master` role is enabled by default. <2> Disable the `node.data` role (enabled by default). @@ -192,7 +192,7 @@ To create a dedicated data node, set: node.master: false <1> node.data: true <2> node.ingest: false <3> -search.remote.connect: false <4> +cluster.remote.connect: false <4> ------------------- <1> Disable the `node.master` role (enabled by default). <2> The `node.data` role is enabled by default. @@ -220,7 +220,7 @@ To create a dedicated ingest node, set: node.master: false <1> node.data: false <2> node.ingest: true <3> -search.remote.connect: false <4> +cluster.remote.connect: false <4> ------------------- <1> Disable the `node.master` role (enabled by default). <2> Disable the `node.data` role (enabled by default). @@ -260,7 +260,7 @@ To create a dedicated coordinating node, set: node.master: false <1> node.data: false <2> node.ingest: false <3> -search.remote.connect: false <4> +cluster.remote.connect: false <4> ------------------- <1> Disable the `node.master` role (enabled by default). <2> Disable the `node.data` role (enabled by default). diff --git a/x-pack/docs/en/monitoring/configuring-monitoring.asciidoc b/docs/reference/monitoring/configuring-monitoring.asciidoc similarity index 90% rename from x-pack/docs/en/monitoring/configuring-monitoring.asciidoc rename to docs/reference/monitoring/configuring-monitoring.asciidoc index e705100e05e..3bcfef2acbf 100644 --- a/x-pack/docs/en/monitoring/configuring-monitoring.asciidoc +++ b/docs/reference/monitoring/configuring-monitoring.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="gold"] [[configuring-monitoring]] -== Configuring Monitoring in {es} +== Configuring monitoring in {es} ++++ -Configuring Monitoring +Configuring monitoring ++++ By default, {monitoring} is enabled but data collection is disabled. Advanced @@ -47,21 +47,19 @@ as {kib}, Beats, and Logstash to a monitoring cluster: .. Verify that `xpack.monitoring.collection.enabled` settings are `true` on each node in the cluster. -.. {xpack-ref}/xpack-monitoring.html[Configure {monitoring} across the Elastic Stack]. +.. {stack-ov}/xpack-monitoring.html[Configure {monitoring} across the Elastic Stack]. . Identify where to store monitoring data. + -- By default, {monitoring} uses a `local` exporter that indexes monitoring data -on the same cluster. -//See <> and <>. +on the same cluster. See <> and <>. Alternatively, you can use an `http` exporter to send data to a separate -monitoring cluster. -//See <>. +monitoring cluster. See <>. For more information about typical monitoring architectures, -see {xpack-ref}/how-monitoring-works.html[How Monitoring Works]. +see {stack-ov}/how-monitoring-works.html[How Monitoring Works]. -- . If {security} is enabled and you are using an `http` exporter to send data to @@ -82,6 +80,7 @@ POST /_xpack/security/user/remote_monitor } --------------------------------------------------------------- // CONSOLE +// TEST[skip:needs-gold+-license] -- .. On each node in the cluster that is being monitored, configure the `http` @@ -136,7 +135,7 @@ read from the monitoring indices. You set up {monitoring} UI users on the cluster where the monitoring data is stored, that is to say the monitoring cluster. To grant all of the necessary permissions, assign users the `monitoring_user` and `kibana_user` roles. For more information, see -{xpack-ref}/mapping-roles.html[Mapping users and groups to roles]. +{stack-ov}/mapping-roles.html[Mapping users and groups to roles]. -- . Optional: diff --git a/docs/reference/monitoring/exporters.asciidoc b/docs/reference/monitoring/exporters.asciidoc index 2a7729eee94..a1d4bc08ae7 100644 --- a/docs/reference/monitoring/exporters.asciidoc +++ b/docs/reference/monitoring/exporters.asciidoc @@ -105,12 +105,12 @@ route monitoring data: [options="header"] |======================= -| Template | Purpose -| `.monitoring-alerts` | All cluster alerts for monitoring data. -| `.monitoring-beats` | All Beats monitoring data. -| `.monitoring-es` | All {es} monitoring data. -| `.monitoring-kibana` | All {kib} monitoring data. -| `.monitoring-logstash` | All Logstash monitoring data. +| Template | Purpose +| `.monitoring-alerts` | All cluster alerts for monitoring data. +| `.monitoring-beats` | All Beats monitoring data. +| `.monitoring-es` | All {es} monitoring data. +| `.monitoring-kibana` | All {kib} monitoring data. +| `.monitoring-logstash` | All Logstash monitoring data. |======================= The templates are ordinary {es} templates that control the default settings and diff --git a/docs/reference/monitoring/http-export.asciidoc b/docs/reference/monitoring/http-export.asciidoc index 4dfe1a0c537..4ba93f32637 100644 --- a/docs/reference/monitoring/http-export.asciidoc +++ b/docs/reference/monitoring/http-export.asciidoc @@ -96,7 +96,7 @@ see <>. [[http-exporter-dns]] ==== Using DNS Hosts in HTTP Exporters -{monitoring} runs inside of the the JVM security manager. When the JVM has the +{monitoring} runs inside of the JVM security manager. When the JVM has the security manager enabled, the JVM changes the duration so that it caches DNS lookups indefinitely (for example, the mapping of a DNS hostname to an IP address). For this reason, if you are in an environment where the DNS response diff --git a/x-pack/docs/en/monitoring/indices.asciidoc b/docs/reference/monitoring/indices.asciidoc similarity index 98% rename from x-pack/docs/en/monitoring/indices.asciidoc rename to docs/reference/monitoring/indices.asciidoc index a27d91d423e..658ac389ae8 100644 --- a/x-pack/docs/en/monitoring/indices.asciidoc +++ b/docs/reference/monitoring/indices.asciidoc @@ -13,7 +13,6 @@ You can retrieve the templates through the `_template` API: GET /_template/.monitoring-* ---------------------------------- // CONSOLE -// TEST[catch:missing] By default, the template configures one shard and one replica for the monitoring indices. To override the default settings, add your own template: diff --git a/docs/reference/query-dsl/span-multi-term-query.asciidoc b/docs/reference/query-dsl/span-multi-term-query.asciidoc index 40bd1553298..de78d80284e 100644 --- a/docs/reference/query-dsl/span-multi-term-query.asciidoc +++ b/docs/reference/query-dsl/span-multi-term-query.asciidoc @@ -41,5 +41,5 @@ WARNING: `span_multi` queries will hit too many clauses failure if the number of boolean query limit (defaults to 1024).To avoid an unbounded expansion you can set the <> of the multi term query to `top_terms_*` rewrite. Or, if you use `span_multi` on `prefix` query only, you can activate the <> field option of the `text` field instead. This will -rewrite any prefix query on the field to a a single term query that matches the indexed prefix. +rewrite any prefix query on the field to a single term query that matches the indexed prefix. diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index d67d8a733ac..f07d1d09747 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -531,3 +531,32 @@ native realm: * <>, <> * <> * <> + +[role="exclude",id="security-api-role-mapping"] +=== Role mapping APIs + +You can use the following APIs to add, remove, and retrieve role mappings: + +* <>, <> +* <> + +[role="exclude",id="security-api-privileges"] +=== Privilege APIs + +See <>. + +[role="exclude",id="xpack-commands"] +=== X-Pack commands + +See <>. + +[role="exclude",id="ml-api-definitions"] +=== Machine learning API definitions + +See <>. + +[role="exclude",id="analysis-standard-tokenfilter"] +=== Standard filter removed + +The standard token filter has been removed. + diff --git a/docs/reference/rest-api/defs.asciidoc b/docs/reference/rest-api/defs.asciidoc new file mode 100644 index 00000000000..4eeedc55399 --- /dev/null +++ b/docs/reference/rest-api/defs.asciidoc @@ -0,0 +1,27 @@ +[role="xpack"] +[[api-definitions]] +== Definitions + +These resource definitions are used in {ml} and {security} APIs and in {kib} +advanced {ml} job configuration options. + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +include::{es-repo-dir}/ml/apis/calendarresource.asciidoc[] +include::{es-repo-dir}/ml/apis/datafeedresource.asciidoc[] +include::{es-repo-dir}/ml/apis/filterresource.asciidoc[] +include::{es-repo-dir}/ml/apis/jobresource.asciidoc[] +include::{es-repo-dir}/ml/apis/jobcounts.asciidoc[] +include::{es-repo-dir}/ml/apis/snapshotresource.asciidoc[] +include::{xes-repo-dir}/rest-api/security/role-mapping-resources.asciidoc[] +include::{es-repo-dir}/ml/apis/resultsresource.asciidoc[] +include::{es-repo-dir}/ml/apis/eventresource.asciidoc[] diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index 9ec57940dd2..b80e8badf5b 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -22,8 +22,8 @@ include::info.asciidoc[] include::{xes-repo-dir}/rest-api/graph/explore.asciidoc[] include::{es-repo-dir}/licensing/index.asciidoc[] include::{es-repo-dir}/migration/migration.asciidoc[] -include::{xes-repo-dir}/rest-api/ml-api.asciidoc[] -include::{xes-repo-dir}/rest-api/rollup-api.asciidoc[] +include::{es-repo-dir}/ml/apis/ml-api.asciidoc[] +include::{es-repo-dir}/rollup/rollup-api.asciidoc[] include::{xes-repo-dir}/rest-api/security.asciidoc[] include::{xes-repo-dir}/rest-api/watcher.asciidoc[] -include::{xes-repo-dir}/rest-api/defs.asciidoc[] +include::defs.asciidoc[] diff --git a/x-pack/docs/en/rollup/api-quickref.asciidoc b/docs/reference/rollup/api-quickref.asciidoc similarity index 97% rename from x-pack/docs/en/rollup/api-quickref.asciidoc rename to docs/reference/rollup/api-quickref.asciidoc index 5e99f1c6984..1d372a03ddc 100644 --- a/x-pack/docs/en/rollup/api-quickref.asciidoc +++ b/docs/reference/rollup/api-quickref.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-api-quickref]] == API Quick Reference diff --git a/x-pack/docs/en/rest-api/rollup/delete-job.asciidoc b/docs/reference/rollup/apis/delete-job.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/delete-job.asciidoc rename to docs/reference/rollup/apis/delete-job.asciidoc index b795e0b28c7..37774560848 100644 --- a/x-pack/docs/en/rest-api/rollup/delete-job.asciidoc +++ b/docs/reference/rollup/apis/delete-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-delete-job]] === Delete Job API ++++ diff --git a/x-pack/docs/en/rest-api/rollup/get-job.asciidoc b/docs/reference/rollup/apis/get-job.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/get-job.asciidoc rename to docs/reference/rollup/apis/get-job.asciidoc index 96053dbfea6..794d7248012 100644 --- a/x-pack/docs/en/rest-api/rollup/get-job.asciidoc +++ b/docs/reference/rollup/apis/get-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-get-job]] === Get Rollup Jobs API ++++ diff --git a/x-pack/docs/en/rest-api/rollup/put-job.asciidoc b/docs/reference/rollup/apis/put-job.asciidoc similarity index 95% rename from x-pack/docs/en/rest-api/rollup/put-job.asciidoc rename to docs/reference/rollup/apis/put-job.asciidoc index 1449acadc63..79e30ae8dc9 100644 --- a/x-pack/docs/en/rest-api/rollup/put-job.asciidoc +++ b/docs/reference/rollup/apis/put-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-put-job]] === Create Job API ++++ @@ -43,6 +44,8 @@ started with the <>. `metrics`:: (object) Defines the metrics that should be collected for each grouping tuple. See <>. +For more details about the job configuration, see <>. + ==== Authorization You must have `manage` or `manage_rollup` cluster privileges to use this API. diff --git a/x-pack/docs/en/rest-api/rollup/rollup-caps.asciidoc b/docs/reference/rollup/apis/rollup-caps.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/rollup-caps.asciidoc rename to docs/reference/rollup/apis/rollup-caps.asciidoc index 1f233f195a0..907efb94c17 100644 --- a/x-pack/docs/en/rest-api/rollup/rollup-caps.asciidoc +++ b/docs/reference/rollup/apis/rollup-caps.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-get-rollup-caps]] === Get Rollup Job Capabilities ++++ diff --git a/x-pack/docs/en/rest-api/rollup/rollup-index-caps.asciidoc b/docs/reference/rollup/apis/rollup-index-caps.asciidoc similarity index 100% rename from x-pack/docs/en/rest-api/rollup/rollup-index-caps.asciidoc rename to docs/reference/rollup/apis/rollup-index-caps.asciidoc diff --git a/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc b/docs/reference/rollup/apis/rollup-job-config.asciidoc similarity index 83% rename from x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc rename to docs/reference/rollup/apis/rollup-job-config.asciidoc index 2ba92b6b59e..3a917fb59f2 100644 --- a/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc +++ b/docs/reference/rollup/apis/rollup-job-config.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-job-config]] === Rollup Job Configuration @@ -23,7 +24,7 @@ PUT _xpack/rollup/job/sensor "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h", + "interval": "60m", "delay": "7d" }, "terms": { @@ -99,7 +100,7 @@ fields will then be available later for aggregating into buckets. For example, "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h", + "interval": "60m", "delay": "7d" }, "terms": { @@ -133,9 +134,9 @@ The `date_histogram` group has several parameters: The date field that is to be rolled up. `interval` (required):: - The interval of time buckets to be generated when rolling up. E.g. `"1h"` will produce hourly rollups. This follows standard time formatting - syntax as used elsewhere in Elasticsearch. The `interval` defines the _minimum_ interval that can be aggregated only. If hourly (`"1h"`) - intervals are configured, <> can execute aggregations with 1hr or greater (weekly, monthly, etc) intervals. + The interval of time buckets to be generated when rolling up. E.g. `"60m"` will produce 60 minute (hourly) rollups. This follows standard time formatting + syntax as used elsewhere in Elasticsearch. The `interval` defines the _minimum_ interval that can be aggregated only. If hourly (`"60m"`) + intervals are configured, <> can execute aggregations with 60m or greater (weekly, monthly, etc) intervals. So define the interval as the smallest unit that you wish to later query. Note: smaller, more granular intervals take up proportionally more space. @@ -154,6 +155,46 @@ The `date_histogram` group has several parameters: to be stored with a specific timezone. By default, rollup documents are stored in `UTC`, but this can be changed with the `time_zone` parameter. +.Calendar vs Fixed time intervals +********************************** +Elasticsearch understands both "calendar" and "fixed" time intervals. Fixed time intervals are fairly easy to understand; +`"60s"` means sixty seconds. But what does `"1M` mean? One month of time depends on which month we are talking about, +some months are longer or shorter than others. This is an example of "calendar" time, and the duration of that unit +depends on context. Calendar units are also affected by leap-seconds, leap-years, etc. + +This is important because the buckets generated by Rollup will be in either calendar or fixed intervals, and will limit +how you can query them later (see <>. + +We recommend sticking with "fixed" time intervals, since they are easier to understand and are more flexible at query +time. It will introduce some drift in your data during leap-events, and you will have to think about months in a fixed +quantity (30 days) instead of the actual calendar length... but it is often easier than dealing with calendar units +at query time. + +Multiples of units are always "fixed" (e.g. `"2h"` is always the fixed quantity `7200` seconds. Single units can be +fixed or calendar depending on the unit: + +[options="header"] +|======= +|Unit |Calendar |Fixed +|millisecond |NA |`1ms`, `10ms`, etc +|second |NA |`1s`, `10s`, etc +|minute |`1m` |`2m`, `10m`, etc +|hour |`1h` |`2h`, `10h`, etc +|day |`1d` |`2d`, `10d`, etc +|week |`1w` |NA +|month |`1M` |NA +|quarter |`1q` |NA +|year |`1y` |NA +|======= + +For some units where there are both fixed and calendar, you may need to express the quantity in terms of the next +smaller unit. For example, if you want a fixed day (not a calendar day), you should specify `24h` instead of `1d`. +Similarly, if you want fixed hours, specify `60m` instead of `1h`. This is because the single quantity entails +calendar time, and limits you to querying by calendar time in the future. + + +********************************** + ===== Terms The `terms` group can be used on `keyword` or numeric fields, to allow bucketing via the `terms` aggregation at a later point. The `terms` diff --git a/x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc b/docs/reference/rollup/apis/rollup-search.asciidoc similarity index 96% rename from x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc rename to docs/reference/rollup/apis/rollup-search.asciidoc index f595d52ec10..8e7fc69a00a 100644 --- a/x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc +++ b/docs/reference/rollup/apis/rollup-search.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-search]] === Rollup Search ++++ @@ -101,6 +102,7 @@ GET /sensor_rollup/_rollup_search -------------------------------------------------- // CONSOLE // TEST[setup:sensor_prefab_data] +// TEST[s/_rollup_search/_rollup_search?filter_path=took,timed_out,terminated_early,_shards,hits,aggregations/] The query is targeting the `sensor_rollup` data, since this contains the rollup data as configured in the job. A `max` aggregation has been used on the `temperature` field, yielding the following response: @@ -194,6 +196,7 @@ GET sensor-1,sensor_rollup/_rollup_search <1> -------------------------------------------------- // CONSOLE // TEST[continued] +// TEST[s/_rollup_search/_rollup_search?filter_path=took,timed_out,terminated_early,_shards,hits,aggregations/] <1> Note the URI now searches `sensor-1` and `sensor_rollup` at the same time When the search is executed, the Rollup Search endpoint will do two things: diff --git a/x-pack/docs/en/rest-api/rollup/start-job.asciidoc b/docs/reference/rollup/apis/start-job.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/start-job.asciidoc rename to docs/reference/rollup/apis/start-job.asciidoc index 9a0a0a7e4f0..cf44883895c 100644 --- a/x-pack/docs/en/rest-api/rollup/start-job.asciidoc +++ b/docs/reference/rollup/apis/start-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-start-job]] === Start Job API ++++ diff --git a/x-pack/docs/en/rest-api/rollup/stop-job.asciidoc b/docs/reference/rollup/apis/stop-job.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/stop-job.asciidoc rename to docs/reference/rollup/apis/stop-job.asciidoc index 60507402705..5912b2d688b 100644 --- a/x-pack/docs/en/rest-api/rollup/stop-job.asciidoc +++ b/docs/reference/rollup/apis/stop-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-stop-job]] === Stop Job API ++++ diff --git a/x-pack/docs/en/rollup/index.asciidoc b/docs/reference/rollup/index.asciidoc similarity index 92% rename from x-pack/docs/en/rollup/index.asciidoc rename to docs/reference/rollup/index.asciidoc index 9ac89341bfe..64dc233f82f 100644 --- a/x-pack/docs/en/rollup/index.asciidoc +++ b/docs/reference/rollup/index.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[xpack-rollup]] = Rolling up historical data diff --git a/x-pack/docs/en/rollup/overview.asciidoc b/docs/reference/rollup/overview.asciidoc similarity index 99% rename from x-pack/docs/en/rollup/overview.asciidoc rename to docs/reference/rollup/overview.asciidoc index a9a983fbecc..b2570f647e7 100644 --- a/x-pack/docs/en/rollup/overview.asciidoc +++ b/docs/reference/rollup/overview.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-overview]] == Overview diff --git a/x-pack/docs/en/rollup/rollup-agg-limitations.asciidoc b/docs/reference/rollup/rollup-agg-limitations.asciidoc similarity index 90% rename from x-pack/docs/en/rollup/rollup-agg-limitations.asciidoc rename to docs/reference/rollup/rollup-agg-limitations.asciidoc index cd20622d93c..9f8b6f66ade 100644 --- a/x-pack/docs/en/rollup/rollup-agg-limitations.asciidoc +++ b/docs/reference/rollup/rollup-agg-limitations.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-agg-limitations]] == Rollup Aggregation Limitations diff --git a/x-pack/docs/en/rest-api/rollup-api.asciidoc b/docs/reference/rollup/rollup-api.asciidoc similarity index 61% rename from x-pack/docs/en/rest-api/rollup-api.asciidoc rename to docs/reference/rollup/rollup-api.asciidoc index 9a8ec00d77a..099686fb432 100644 --- a/x-pack/docs/en/rest-api/rollup-api.asciidoc +++ b/docs/reference/rollup/rollup-api.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-apis]] == Rollup APIs @@ -26,12 +27,12 @@ -include::rollup/delete-job.asciidoc[] -include::rollup/get-job.asciidoc[] -include::rollup/put-job.asciidoc[] -include::rollup/start-job.asciidoc[] -include::rollup/stop-job.asciidoc[] -include::rollup/rollup-caps.asciidoc[] -include::rollup/rollup-index-caps.asciidoc[] -include::rollup/rollup-search.asciidoc[] -include::rollup/rollup-job-config.asciidoc[] \ No newline at end of file +include::apis/delete-job.asciidoc[] +include::apis/get-job.asciidoc[] +include::apis/put-job.asciidoc[] +include::apis/start-job.asciidoc[] +include::apis/stop-job.asciidoc[] +include::apis/rollup-caps.asciidoc[] +include::apis/rollup-index-caps.asciidoc[] +include::apis/rollup-search.asciidoc[] +include::apis/rollup-job-config.asciidoc[] diff --git a/x-pack/docs/en/rollup/rollup-getting-started.asciidoc b/docs/reference/rollup/rollup-getting-started.asciidoc similarity index 83% rename from x-pack/docs/en/rollup/rollup-getting-started.asciidoc rename to docs/reference/rollup/rollup-getting-started.asciidoc index 24f68dddd81..8f99bc2c010 100644 --- a/x-pack/docs/en/rollup/rollup-getting-started.asciidoc +++ b/docs/reference/rollup/rollup-getting-started.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-getting-started]] == Getting Started @@ -37,8 +39,7 @@ PUT _xpack/rollup/job/sensor "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h", - "delay": "7d" + "interval": "60m" }, "terms": { "fields": ["node"] @@ -66,7 +67,7 @@ The `cron` parameter controls when and how often the job activates. When a roll from where it left off after the last activation. So if you configure the cron to run every 30 seconds, the job will process the last 30 seconds worth of data that was indexed into the `sensor-*` indices. -If instead the cron was configured to run once a day at midnight, the job would process the last 24hours worth of data. The choice is largely +If instead the cron was configured to run once a day at midnight, the job would process the last 24 hours worth of data. The choice is largely preference, based on how "realtime" you want the rollups, and if you wish to process continuously or move it to off-peak hours. Next, we define a set of `groups` and `metrics`. The metrics are fairly straightforward: we want to save the min/max/sum of the `temperature` @@ -79,7 +80,7 @@ It also allows us to run terms aggregations on the `node` field. .Date histogram interval vs cron schedule ********************************** You'll note that the job's cron is configured to run every 30 seconds, but the date_histogram is configured to -rollup at hourly intervals. How do these relate? +rollup at 60 minute intervals. How do these relate? The date_histogram controls the granularity of the saved data. Data will be rolled up into hourly intervals, and you will be unable to query with finer granularity. The cron simply controls when the process looks for new data to rollup. Every 30 seconds it will see @@ -223,70 +224,71 @@ Which returns a corresponding response: [source,js] ---- { - "took" : 93, - "timed_out" : false, - "terminated_early" : false, - "_shards" : ... , - "hits" : { - "total" : 0, - "max_score" : 0.0, - "hits" : [ ] - }, - "aggregations" : { - "timeline" : { - "meta" : { }, - "buckets" : [ - { - "key_as_string" : "2018-01-18T00:00:00.000Z", - "key" : 1516233600000, - "doc_count" : 6, - "nodes" : { - "doc_count_error_upper_bound" : 0, - "sum_other_doc_count" : 0, - "buckets" : [ - { - "key" : "a", - "doc_count" : 2, - "max_temperature" : { - "value" : 202.0 - }, - "avg_voltage" : { - "value" : 5.1499998569488525 - } - }, - { - "key" : "b", - "doc_count" : 2, - "max_temperature" : { - "value" : 201.0 - }, - "avg_voltage" : { - "value" : 5.700000047683716 - } - }, - { - "key" : "c", - "doc_count" : 2, - "max_temperature" : { - "value" : 202.0 - }, - "avg_voltage" : { - "value" : 4.099999904632568 - } - } - ] - } - } - ] - } - } + "took" : 93, + "timed_out" : false, + "terminated_early" : false, + "_shards" : ... , + "hits" : { + "total" : 0, + "max_score" : 0.0, + "hits" : [ ] + }, + "aggregations" : { + "timeline" : { + "meta" : { }, + "buckets" : [ + { + "key_as_string" : "2018-01-18T00:00:00.000Z", + "key" : 1516233600000, + "doc_count" : 6, + "nodes" : { + "doc_count_error_upper_bound" : 0, + "sum_other_doc_count" : 0, + "buckets" : [ + { + "key" : "a", + "doc_count" : 2, + "max_temperature" : { + "value" : 202.0 + }, + "avg_voltage" : { + "value" : 5.1499998569488525 + } + }, + { + "key" : "b", + "doc_count" : 2, + "max_temperature" : { + "value" : 201.0 + }, + "avg_voltage" : { + "value" : 5.700000047683716 + } + }, + { + "key" : "c", + "doc_count" : 2, + "max_temperature" : { + "value" : 202.0 + }, + "avg_voltage" : { + "value" : 4.099999904632568 + } + } + ] + } + } + ] + } + } } + ---- // TESTRESPONSE[s/"took" : 93/"took" : $body.$_path/] // TESTRESPONSE[s/"_shards" : \.\.\. /"_shards" : $body.$_path/] In addition to being more complicated (date histogram and a terms aggregation, plus an additional average metric), you'll notice -the date_histogram uses a `7d` interval instead of `1h`. +the date_histogram uses a `7d` interval instead of `60m`. [float] === Conclusion diff --git a/x-pack/docs/en/rollup/rollup-search-limitations.asciidoc b/docs/reference/rollup/rollup-search-limitations.asciidoc similarity index 79% rename from x-pack/docs/en/rollup/rollup-search-limitations.asciidoc rename to docs/reference/rollup/rollup-search-limitations.asciidoc index 57ba23eebcc..b61d1a74388 100644 --- a/x-pack/docs/en/rollup/rollup-search-limitations.asciidoc +++ b/docs/reference/rollup/rollup-search-limitations.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-search-limitations]] == Rollup Search Limitations @@ -80,16 +82,32 @@ The response will tell you that the field and aggregation were not possible, bec [float] === Interval Granularity -Rollups are stored at a certain granularity, as defined by the `date_histogram` group in the configuration. If data is rolled up at hourly -intervals, the <> API can aggregate on any time interval hourly or greater. Intervals that are less than an hour will throw -an exception, since the data simply doesn't exist for finer granularities. +Rollups are stored at a certain granularity, as defined by the `date_histogram` group in the configuration. This means you +can only search/aggregate the rollup data with an interval that is greater-than or equal to the configured rollup interval. + +For example, if data is rolled up at hourly intervals, the <> API can aggregate on any time interval +hourly or greater. Intervals that are less than an hour will throw an exception, since the data simply doesn't +exist for finer granularities. + +[[rollup-search-limitations-intervals]] +.Requests must be multiples of the config +********************************** +Perhaps not immediately apparent, but the interval specified in an aggregation request must be a whole +multiple of the configured interval. If the job was configured to rollup on `3d` intervals, you can only +query and aggregate on multiples of three (`3d`, `6d`, `9d`, etc). + +A non-multiple wouldn't work, since the rolled up data wouldn't cleanly "overlap" with the buckets generated +by the aggregation, leading to incorrect results. + +For that reason, an error is thrown if a whole multiple of the configured interval isn't found. +********************************** Because the RollupSearch endpoint can "upsample" intervals, there is no need to configure jobs with multiple intervals (hourly, daily, etc). It's recommended to just configure a single job with the smallest granularity that is needed, and allow the search endpoint to upsample as needed. That said, if multiple jobs are present in a single rollup index with varying intervals, the search endpoint will identify and use the job(s) -with the largest interval to satisfy the search reques. +with the largest interval to satisfy the search request. [float] === Limited querying components diff --git a/x-pack/docs/en/rollup/understanding-groups.asciidoc b/docs/reference/rollup/understanding-groups.asciidoc similarity index 98% rename from x-pack/docs/en/rollup/understanding-groups.asciidoc rename to docs/reference/rollup/understanding-groups.asciidoc index 803555b2d73..6321ab9b00f 100644 --- a/x-pack/docs/en/rollup/understanding-groups.asciidoc +++ b/docs/reference/rollup/understanding-groups.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-understanding-groups]] == Understanding Groups diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index f08a10aeb64..c73642c6713 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -119,10 +119,14 @@ Individual searches can have a timeout as part of the <>. Since search requests can originate from many sources, Elasticsearch has a dynamic cluster-level setting for a global search timeout that applies to all search requests that do not set a -timeout in the <>. The default value is no global -timeout. The setting key is `search.default_search_timeout` and can be -set using the <> endpoints. Setting this value -to `-1` resets the global search timeout to no timeout. +timeout in the request body. These requests will be cancelled after +the specified time using the mechanism described in the following section on +<>. Therefore the same caveats about timeout +responsiveness apply. + +The setting key is `search.default_search_timeout` and can be set using the +<> endpoints. The default value is no global timeout. +Setting this value to `-1` resets the global search timeout to no timeout. [float] [[global-search-cancellation]] diff --git a/docs/reference/search/explain.asciidoc b/docs/reference/search/explain.asciidoc index fd09984f169..341c8e4802b 100644 --- a/docs/reference/search/explain.asciidoc +++ b/docs/reference/search/explain.asciidoc @@ -30,62 +30,67 @@ This will yield the following result: [source,js] -------------------------------------------------- { - "_index": "twitter", - "_type": "_doc", - "_id": "0", - "matched": true, - "explanation": { - "value": 1.6943599, - "description": "weight(message:elasticsearch in 0) [PerFieldSimilarity], result of:", - "details": [ + "_index":"twitter", + "_type":"_doc", + "_id":"0", + "matched":true, + "explanation":{ + "value":1.6943597, + "description":"weight(message:elasticsearch in 0) [PerFieldSimilarity], result of:", + "details":[ { - "value": 1.6943599, - "description": "score(doc=0,freq=1.0 = termFreq=1.0\n), product of:", - "details": [ + "value":1.6943597, + "description":"score(freq=1.0), product of:", + "details":[ { - "value": 1.3862944, - "description": "idf, computed as log(1 + (docCount - docFreq + 0.5) / (docFreq + 0.5)) from:", - "details": [ - { - "value": 1.0, - "description": "docFreq", - "details": [] - }, - { - "value": 5.0, - "description": "docCount", - "details": [] - } - ] + "value":2.2, + "description":"scaling factor, k1 + 1", + "details":[] }, - { - "value": 1.2222223, - "description": "tfNorm, computed as (freq * (k1 + 1)) / (freq + k1 * (1 - b + b * fieldLength / avgFieldLength)) from:", - "details": [ + { + "value":1.3862944, + "description":"idf, computed as log(1 + (N - n + 0.5) / (n + 0.5)) from:", + "details":[ { - "value": 1.0, - "description": "termFreq=1.0", - "details": [] + "value":1, + "description":"n, number of documents containing term", + "details":[] }, { - "value": 1.2, - "description": "parameter k1", - "details": [] + "value":5, + "description":"N, total number of documents with field", + "details":[] + } + ] + }, + { + "value":0.5555555, + "description":"tf, computed as freq / (freq + k1 * (1 - b + b * dl / avgdl)) from:", + "details":[ + { + "value":1.0, + "description":"freq, occurrences of term within document", + "details":[] }, { - "value": 0.75, - "description": "parameter b", - "details": [] + "value":1.2, + "description":"k1, term saturation parameter", + "details":[] }, { - "value": 5.4, - "description": "avgFieldLength", - "details": [] + "value":0.75, + "description":"b, length normalization parameter", + "details":[] }, { - "value": 3.0, - "description": "fieldLength", - "details": [] + "value":3.0, + "description":"dl, length of field", + "details":[] + }, + { + "value":5.4, + "description":"avgdl, average length of field", + "details":[] } ] } diff --git a/docs/reference/search/multi-search.asciidoc b/docs/reference/search/multi-search.asciidoc index c68cf0daaf5..8771915dee6 100644 --- a/docs/reference/search/multi-search.asciidoc +++ b/docs/reference/search/multi-search.asciidoc @@ -86,6 +86,16 @@ The msearch's `max_concurrent_searches` request parameter can be used to control the maximum number of concurrent searches the multi search api will execute. This default is based on the number of data nodes and the default search thread pool size. +The request parameter `max_concurrent_shard_requests` can be used to control the +maximum number of concurrent shard requests the each sub search request will execute. +This parameter should be used to protect a single request from overloading a cluster +(e.g., a default request will hit all indices in a cluster which could cause shard request rejections +if the number of shards per node is high). This default is based on the number of +data nodes in the cluster but at most `256`.In certain scenarios parallelism isn't achieved through +concurrent request such that this protection will result in poor performance. For +instance in an environment where only a very low number of concurrent search requests are expected +it might help to increase this value to a higher number. + [float] [[msearch-security]] === Security diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc index b2444535153..bc7edcd3a88 100644 --- a/docs/reference/search/profile.asciidoc +++ b/docs/reference/search/profile.asciidoc @@ -72,7 +72,11 @@ This will yield the following result: "next_doc": 53876, "next_doc_count": 5, "advance": 0, - "advance_count": 0 + "advance_count": 0, + "compute_max_score": 0, + "compute_max_score_count": 0, + "shallow_advance": 0, + "shallow_advance_count": 0 }, "children": [ { @@ -91,7 +95,11 @@ This will yield the following result: "next_doc": 10111, "next_doc_count": 5, "advance": 0, - "advance_count": 0 + "advance_count": 0, + "compute_max_score": 0, + "compute_max_score_count": 0, + "shallow_advance": 0, + "shallow_advance_count": 0 } }, { @@ -110,7 +118,11 @@ This will yield the following result: "next_doc": 2852, "next_doc_count": 5, "advance": 0, - "advance_count": 0 + "advance_count": 0, + "compute_max_score": 0, + "compute_max_score_count": 0, + "shallow_advance": 0, + "shallow_advance_count": 0 } } ] @@ -288,7 +300,11 @@ The `breakdown` component lists detailed timing statistics about low-level Lucen "next_doc": 53876, "next_doc_count": 5, "advance": 0, - "advance_count": 0 + "advance_count": 0, + "compute_max_score": 0, + "compute_max_score_count": 0, + "shallow_advance": 0, + "shallow_advance_count": 0 } -------------------------------------------------- // TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.$_path",\n"searches": [{\n"query": [{\n"type": "BooleanQuery",\n"description": "message:some message:number",\n"time_in_nanos": $body.$_path,/] @@ -548,7 +564,11 @@ And the response: "score_count": 1, "build_scorer": 377872, "advance": 0, - "advance_count": 0 + "advance_count": 0, + "compute_max_score": 0, + "compute_max_score_count": 0, + "shallow_advance": 0, + "shallow_advance_count": 0 } }, { @@ -567,7 +587,11 @@ And the response: "score_count": 1, "build_scorer": 112551, "advance": 0, - "advance_count": 0 + "advance_count": 0, + "compute_max_score": 0, + "compute_max_score_count": 0, + "shallow_advance": 0, + "shallow_advance_count": 0 } } ], @@ -596,7 +620,7 @@ And the response: ] }, { - "name": "BucketCollector: [[my_scoped_agg, my_global_agg]]", + "name": "MultiBucketCollector: [[my_scoped_agg, my_global_agg]]", "reason": "aggregation", "time_in_nanos": 8273 } diff --git a/docs/reference/search/request-body.asciidoc b/docs/reference/search/request-body.asciidoc index e7c9b593af3..5be54662d01 100644 --- a/docs/reference/search/request-body.asciidoc +++ b/docs/reference/search/request-body.asciidoc @@ -60,7 +60,9 @@ And here is a sample response: A search timeout, bounding the search request to be executed within the specified time value and bail with the hits accumulated up to that point - when expired. Defaults to no timeout. See <>. + when expired. Search requests are canceled after the timeout is reached using + the <> mechanism. + Defaults to no timeout. See <>. `from`:: @@ -161,7 +163,7 @@ be set to `true` in the response. }, "hits": { "total": 1, - "max_score": 0.0, + "max_score": null, "hits": [] } } diff --git a/docs/reference/search/request/collapse.asciidoc b/docs/reference/search/request/collapse.asciidoc index 192495e5d6d..1ab79e36c7e 100644 --- a/docs/reference/search/request/collapse.asciidoc +++ b/docs/reference/search/request/collapse.asciidoc @@ -217,4 +217,4 @@ Response: -------------------------------------------------- // NOTCONSOLE -NOTE: Second level of of collapsing doesn't allow `inner_hits`. \ No newline at end of file +NOTE: Second level of collapsing doesn't allow `inner_hits`. \ No newline at end of file diff --git a/docs/reference/search/request/docvalue-fields.asciidoc b/docs/reference/search/request/docvalue-fields.asciidoc index fa5baf1db22..bcfcb20d1d5 100644 --- a/docs/reference/search/request/docvalue-fields.asciidoc +++ b/docs/reference/search/request/docvalue-fields.asciidoc @@ -30,6 +30,27 @@ GET /_search Doc value fields can work on fields that are not stored. +`*` can be used as a wild card, for example: + +[source,js] +-------------------------------------------------- +GET /_search +{ + "query" : { + "match_all": {} + }, + "docvalue_fields" : [ + { + "field": "*field", <1> + "format": "use_field_mapping" <2> + } + ] +} +-------------------------------------------------- +// CONSOLE +<1> Match all fields ending with `field` +<2> Format to be applied to all matching fields. + Note that if the fields parameter specifies fields without docvalues it will try to load the value from the fielddata cache causing the terms for that field to be loaded to memory (cached), which will result in more memory consumption. diff --git a/docs/reference/search/request/inner-hits.asciidoc b/docs/reference/search/request/inner-hits.asciidoc index 887ae2bdf14..8e719a02c75 100644 --- a/docs/reference/search/request/inner-hits.asciidoc +++ b/docs/reference/search/request/inner-hits.asciidoc @@ -265,19 +265,19 @@ Response not included in text but tested for completeness sake. ..., "hits": { "total": 1, - "max_score": 1.0444683, + "max_score": 1.0444684, "hits": [ { "_index": "test", "_type": "_doc", "_id": "1", - "_score": 1.0444683, + "_score": 1.0444684, "_source": ..., "inner_hits": { "comments": { <1> "hits": { "total": 1, - "max_score": 1.0444683, + "max_score": 1.0444684, "hits": [ { "_index": "test", @@ -287,7 +287,7 @@ Response not included in text but tested for completeness sake. "field": "comments", "offset": 1 }, - "_score": 1.0444683, + "_score": 1.0444684, "fields": { "comments.text.keyword": [ "words words words" diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index 9f9833bde9d..c52f28bc7be 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -258,7 +258,7 @@ Which should look like: }, "hits": { "total" : 0, - "max_score" : 0.0, + "max_score" : null, "hits" : [] }, "suggest": { diff --git a/docs/reference/search/suggesters/phrase-suggest.asciidoc b/docs/reference/search/suggesters/phrase-suggest.asciidoc index cba299e97cb..96d60467d10 100644 --- a/docs/reference/search/suggesters/phrase-suggest.asciidoc +++ b/docs/reference/search/suggesters/phrase-suggest.asciidoc @@ -33,12 +33,12 @@ PUT test "trigram": { "type": "custom", "tokenizer": "standard", - "filter": ["standard", "shingle"] + "filter": ["shingle"] }, "reverse": { "type": "custom", "tokenizer": "standard", - "filter": ["standard", "reverse"] + "filter": ["reverse"] } }, "filter": { diff --git a/docs/reference/settings/security-hash-settings.asciidoc b/docs/reference/settings/security-hash-settings.asciidoc new file mode 100644 index 00000000000..061ca38d545 --- /dev/null +++ b/docs/reference/settings/security-hash-settings.asciidoc @@ -0,0 +1,84 @@ +[float] +[[hashing-settings]] +==== User cache and password hash algorithms + +Certain realms store user credentials in memory. To limit exposure +to credential theft and mitigate credential compromise, the cache only stores +a hashed version of the user credentials in memory. By default, the user cache +is hashed with a salted `sha-256` hash algorithm. You can use a different +hashing algorithm by setting the `cache.hash_algo` realm settings to any of the +following values: + +[[cache-hash-algo]] +.Cache hash algorithms +|======================= +| Algorithm | | | Description +| `ssha256` | | | Uses a salted `sha-256` algorithm (default). +| `md5` | | | Uses `MD5` algorithm. +| `sha1` | | | Uses `SHA1` algorithm. +| `bcrypt` | | | Uses `bcrypt` algorithm with salt generated in 1024 rounds. +| `bcrypt4` | | | Uses `bcrypt` algorithm with salt generated in 16 rounds. +| `bcrypt5` | | | Uses `bcrypt` algorithm with salt generated in 32 rounds. +| `bcrypt6` | | | Uses `bcrypt` algorithm with salt generated in 64 rounds. +| `bcrypt7` | | | Uses `bcrypt` algorithm with salt generated in 128 rounds. +| `bcrypt8` | | | Uses `bcrypt` algorithm with salt generated in 256 rounds. +| `bcrypt9` | | | Uses `bcrypt` algorithm with salt generated in 512 rounds. +| `pbkdf2` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 10000 iterations. +| `pbkdf2_1000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 1000 iterations. +| `pbkdf2_10000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 10000 iterations. +| `pbkdf2_50000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 50000 iterations. +| `pbkdf2_100000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 100000 iterations. +| `pbkdf2_500000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 500000 iterations. +| `pbkdf2_1000000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 1000000 iterations. +| `noop`,`clear_text` | | | Doesn't hash the credentials and keeps it in clear text in + memory. CAUTION: keeping clear text is considered insecure + and can be compromised at the OS level (for example through + memory dumps and using `ptrace`). +|======================= + +Likewise, realms that store passwords hash them using cryptographically strong +and password-specific salt values. You can configure the algorithm for password +hashing by setting the `xpack.security.authc.password_hashing.algorithm` setting +to one of the following: + +[[password-hashing-algorithms]] +.Password hashing algorithms +|======================= +| Algorithm | | | Description + +| `bcrypt` | | | Uses `bcrypt` algorithm with salt generated in 1024 rounds. (default) +| `bcrypt4` | | | Uses `bcrypt` algorithm with salt generated in 16 rounds. +| `bcrypt5` | | | Uses `bcrypt` algorithm with salt generated in 32 rounds. +| `bcrypt6` | | | Uses `bcrypt` algorithm with salt generated in 64 rounds. +| `bcrypt7` | | | Uses `bcrypt` algorithm with salt generated in 128 rounds. +| `bcrypt8` | | | Uses `bcrypt` algorithm with salt generated in 256 rounds. +| `bcrypt9` | | | Uses `bcrypt` algorithm with salt generated in 512 rounds. +| `bcrypt10` | | | Uses `bcrypt` algorithm with salt generated in 1024 rounds. +| `bcrypt11` | | | Uses `bcrypt` algorithm with salt generated in 2048 rounds. +| `bcrypt12` | | | Uses `bcrypt` algorithm with salt generated in 4096 rounds. +| `bcrypt13` | | | Uses `bcrypt` algorithm with salt generated in 8192 rounds. +| `bcrypt14` | | | Uses `bcrypt` algorithm with salt generated in 16384 rounds. +| `pbkdf2` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 10000 iterations. +| `pbkdf2_1000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 1000 iterations. +| `pbkdf2_10000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 10000 iterations. +| `pbkdf2_50000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 50000 iterations. +| `pbkdf2_100000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 100000 iterations. +| `pbkdf2_500000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 500000 iterations. +| `pbkdf2_1000000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 1000000 iterations. +|======================= + + diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 4f29b0549b3..1fc441a0622 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -46,12 +46,21 @@ settings for the ad1 realm: `xpack.security.authc.realms.ad1.*`. The API already omits all `ssl` settings, `bind_dn`, and `bind_password` due to the sensitive nature of the information. +`xpack.security.fips_mode.enabled`:: +Enables fips mode of operation. Set this to `true` if you run this {es} instance in a FIPS 140-2 enabled JVM. For more information, see <>. Defaults to `false`. + [float] [[password-security-settings]] ==== Default password security settings `xpack.security.authc.accept_default_password`:: In `elasticsearch.yml`, set this to `false` to disable support for the default "changeme" password. +[[password-hashing-settings]] +==== Password hashing settings +`xpack.security.authc.password_hashing.algorithm`:: +Specifies the hashing algorithm that is used for secure user credential storage. +See <>. Defaults to `bcrypt`. + [float] [[anonymous-access-settings]] ==== Anonymous access settings @@ -164,9 +173,8 @@ the standard {es} <>. Defaults to `20m`. cache at any given time. Defaults to 100,000. `cache.hash_algo`:: (Expert Setting) The hashing algorithm that is used for the -in-memory cached user credentials. For possible values, see -{xpack-ref}/controlling-user-cache.html[Cache hash algorithms]. Defaults to -`ssha256`. +in-memory cached user credentials. For possible values, see <>. +Defaults to `ssha256`. [[ref-users-settings]] @@ -190,8 +198,7 @@ Defaults to 100,000. `cache.hash_algo`:: (Expert Setting) The hashing algorithm that is used for the in-memory cached -user credentials. See the {xpack-ref}/controlling-user-cache.html#controlling-user-cache[Cache hash algorithms] table for -all possible values. Defaults to `ssha256`. +user credentials. See <>. Defaults to `ssha256`. [[ref-ldap-settings]] [float] @@ -239,6 +246,13 @@ This setting is multivalued; you can specify multiple user contexts. Required to operate in user template mode. If `user_search.base_dn` is specified, this setting is not valid. For more information on the different modes, see {xpack-ref}/ldap-realm.html[LDAP realms]. + +`authorization_realms`:: +The names of the realms that should be consulted for delegate authorization. +If this setting is used, then the LDAP realm does not perform role mapping and +instead loads the user from the listed realms. The referenced realms are +consulted in the order that they are defined in this list. +See {stack-ov}/realm-chains.html#authorization_realms[Delegating authorization to another realm] + -- NOTE: If any settings starting with `user_search` are specified, the @@ -327,7 +341,7 @@ the filter. If not set, the user DN is passed into the filter. Defaults to Empt `unmapped_groups_as_roles`:: If set to `true`, the names of any unmapped LDAP groups are used as role names and assigned to the user. A group is considered to be _unmapped_ if it is not -not referenced in a +referenced in a {xpack-ref}/mapping-roles.html#mapping-roles-file[role-mapping file]. API-based role mappings are not considered. Defaults to `false`. @@ -444,8 +458,7 @@ Defaults to `100000`. `cache.hash_algo`:: (Expert Setting) Specifies the hashing algorithm that is used for the -in-memory cached user credentials. See {xpack-ref}/controlling-user-cache.html#controlling-user-cache[Cache hash algorithms] -table for all possible values. Defaults to `ssha256`. +in-memory cached user credentials. See <>. Defaults to `ssha256`. [[ref-ad-settings]] [float] @@ -473,7 +486,7 @@ this setting controls the amount of time to cache DNS lookups. Defaults to `1h`. `domain_name`:: -The domain name of Active Directory. If the the `url` and `user_search_dn` +The domain name of Active Directory. If the `url` and the `user_search_dn` settings are not specified, the cluster can derive those values from this setting. Required. @@ -684,7 +697,7 @@ Defaults to `100000`. `cache.hash_algo`:: (Expert Setting) Specifies the hashing algorithm that is used for -the in-memory cached user credentials (see {xpack-ref}/controlling-user-cache.html#controlling-user-cache[Cache hash algorithms] table for all possible values). Defaults to `ssha256`. +the in-memory cached user credentials. See <>. Defaults to `ssha256`. `follow_referrals`:: If set to `true` {security} follows referrals returned by the LDAP server. @@ -727,6 +740,12 @@ Specifies the {xpack-ref}/security-files.html[location] of the {xpack-ref}/mapping-roles.html[YAML role mapping configuration file]. Defaults to `ES_PATH_CONF/role_mapping.yml`. +`authorization_realms`:: +The names of the realms that should be consulted for delegate authorization. +If this setting is used, then the PKI realm does not perform role mapping and +instead loads the user from the listed realms. +See {stack-ov}/realm-chains.html#authorization_realms[Delegating authorization to another realm] + `cache.ttl`:: Specifies the time-to-live for cached user entries. A user and a hash of its credentials are cached for this period of time. Use the @@ -850,11 +869,26 @@ Defaults to `false`. Specifies whether to populate the {es} user's metadata with the values that are provided by the SAML attributes. Defaults to `true`. +`authorization_realms`:: +The names of the realms that should be consulted for delegate authorization. +If this setting is used, then the SAML realm does not perform role mapping and +instead loads the user from the listed realms. +See {stack-ov}/realm-chains.html#authorization_realms[Delegating authorization to another realm] + `allowed_clock_skew`:: The maximum amount of skew that can be tolerated between the IdP's clock and the {es} node's clock. Defaults to `3m` (3 minutes). +`req_authn_context_class_ref`:: +A comma separated list of Authentication Context Class Reference values to be +included in the Requested Authentication Context when requesting the IdP to +authenticate the current user. The Authentication Context of the corresponding +authentication response should contain at least one of the requested values. ++ +For more information, see +{stack-ov}/saml-guide-authentication.html#req-authn-context[Requesting specific authentication methods]. + [float] [[ref-saml-signing-settings]] ===== SAML realm signing settings @@ -1056,6 +1090,33 @@ Specifies the supported protocols for TLS/SSL. Specifies the cipher suites that should be supported. +[float] +[[ref-kerberos-settings]] +===== Kerberos realm settings + +For a Kerberos realm, the `type` must be set to `kerberos`. In addition to the +<>, you can specify +the following settings: + +`keytab.path`:: Specifies the path to the Kerberos keytab file that contains the +service principal used by this {es} node. This must be a location within the +{es} configuration directory and the file must have read permissions. Required. + +`remove_realm_name`:: Set to `true` to remove the realm part of principal names. +Principal names in Kerberos have the form `user/instance@REALM`. If this option +is `true`, the realm part (`@REALM`) will not be included in the username. +Defaults to `false`. + +`krb.debug`:: Set to `true` to enable debug logs for the Java login module that +provides support for Kerberos authentication. Defaults to `false`. + +`cache.ttl`:: The time-to-live for cached user entries. A user is cached for +this period of time. Specify the time period using the standard {es} +<>. Defaults to `20m`. + +`cache.max_users`:: The maximum number of user entries that can live in the +cache at any given time. Defaults to 100,000. + [float] [[load-balancing]] ===== Load balancing and failover @@ -1094,7 +1155,12 @@ settings such as those for HTTP or Transport. `xpack.ssl.supported_protocols`:: Supported protocols with versions. Valid protocols: `SSLv2Hello`, `SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`. Defaults to `TLSv1.2`, `TLSv1.1`, -`TLSv1`. +`TLSv1`. ++ +-- +NOTE: If `xpack.security.fips_mode.enabled` is `true`, you cannot use `SSLv2Hello` +or `SSLv3`. See <>. +-- `xpack.ssl.client_authentication`:: Controls the server's behavior in regard to requesting a certificate @@ -1193,6 +1259,9 @@ Password to the truststore. `xpack.ssl.truststore.secure_password` (<>):: Password to the truststore. +WARNING: If `xpack.security.fips_mode.enabled` is `true`, you cannot use Java +keystore files. See <>. + [float] ===== PKCS#12 files @@ -1231,6 +1300,9 @@ Password to the truststore. `xpack.ssl.truststore.secure_password` (<>):: Password to the truststore. +WARNING: If `xpack.security.fips_mode.enabled` is `true`, you cannot use PKCS#12 +keystore files. See <>. + [[pkcs12-truststore-note]] [NOTE] Storing trusted certificates in a PKCS#12 file, although supported, is @@ -1308,3 +1380,5 @@ List of IP addresses to allow for this profile. `transport.profiles.$PROFILE.xpack.security.filter.deny`:: List of IP addresses to deny for this profile. + +include::security-hash-settings.asciidoc[] \ No newline at end of file diff --git a/docs/reference/setup/bootstrap-checks.asciidoc b/docs/reference/setup/bootstrap-checks.asciidoc index a8b8dd82d61..f0e5cfc71c9 100644 --- a/docs/reference/setup/bootstrap-checks.asciidoc +++ b/docs/reference/setup/bootstrap-checks.asciidoc @@ -155,6 +155,11 @@ the kernel allows a process to have at least 262,144 memory-mapped areas and is enforced on Linux only. To pass the maximum map count check, you must configure `vm.max_map_count` via `sysctl` to be at least `262144`. +Alternatively, the maximum map count check is only needed if you are using +`mmapfs` as the <> for your indices. If you +<> the use of `mmapfs` then this bootstrap check will +not be enforced. + === Client JVM check There are two different JVMs provided by OpenJDK-derived JVMs: the diff --git a/docs/reference/setup/install.asciidoc b/docs/reference/setup/install.asciidoc index c0ebfb60fa7..26a207824af 100644 --- a/docs/reference/setup/install.asciidoc +++ b/docs/reference/setup/install.asciidoc @@ -41,6 +41,8 @@ Elasticsearch website or from our RPM repository. `msi`:: +beta[] ++ The `msi` package is suitable for installation on Windows 64-bit systems with at least .NET 4.5 framework installed, and is the easiest choice for getting started with Elasticsearch on Windows. MSIs may be downloaded from the Elasticsearch website. diff --git a/docs/reference/setup/install/windows.asciidoc b/docs/reference/setup/install/windows.asciidoc index f5e248598ca..f2e9077e20e 100644 --- a/docs/reference/setup/install/windows.asciidoc +++ b/docs/reference/setup/install/windows.asciidoc @@ -47,21 +47,21 @@ aside panel with additional information for each input: image::images/msi_installer/msi_installer_help.png[] Within the first screen, select the directory for the installation. In addition, select directories for where -data, logs and configuration will reside or <>: +data, logs and configuration will be placed or <>: [[msi-installer-locations]] image::images/msi_installer/msi_installer_locations.png[] Then select whether to install as a service or start Elasticsearch manually as needed. When -installing as a service, you can also decide which account to run the service under as well -as whether the service should be started after installation and when Windows is started or -restarted: +installing as a service, you can also configure the Windows account to run the service with, +whether the service should be started after installation and the Windows startup behaviour: [[msi-installer-service]] image::images/msi_installer/msi_installer_service.png[] -IMPORTANT: When selecting an account to run the service with, be sure that the chosen account -has sufficient privileges to access the installation and other deployment directories chosen. +IMPORTANT: When selecting a Windows account to run the service with, be sure that the chosen account +has sufficient privileges to access the installation and other deployment directories chosen. Also +ensure the account is able to run Windows services. Common configuration settings are exposed within the Configuration section, allowing the cluster name, node name and roles to be set, in addition to memory and network settings: @@ -69,28 +69,26 @@ name, node name and roles to be set, in addition to memory and network settings: [[msi-installer-configuration]] image::images/msi_installer/msi_installer_configuration.png[] -A list of common plugins that can be downloaded and installed as -part of the installation, with the option to configure a HTTPS proxy through which to download: +A list of common plugins that can be downloaded and installed as part of the installation, with the option to configure a HTTPS proxy through which to download these plugins. + +TIP: Ensure the installation machine has access to the internet and that any corporate firewalls in place are configured to allow downloads from `artifacts.elastic.co`: [[msi-installer-selected-plugins]] image::images/msi_installer/msi_installer_selected_plugins.png[] -Upon choosing to install {xpack} plugin, an additional step allows a choice of the type of {xpack} -license to install, in addition to {security} configuration and built-in user configuration: +As of version 6.3.0, X-Pack is now https://www.elastic.co/products/x-pack/open[bundled by default]. The final step allows a choice of the type of X-Pack license to install, in addition to security configuration and built-in user configuration: [[msi-installer-xpack]] image::images/msi_installer/msi_installer_xpack.png[] -NOTE: {xpack} includes a choice of a Trial or Basic license for 30 days. After that, you can obtain one of the -https://www.elastic.co/subscriptions[available subscriptions] or {ref}/security-settings.html[disable Security]. -The Basic license is free and includes the https://www.elastic.co/products/x-pack/monitoring[Monitoring] extension. +NOTE: X-Pack includes a choice of a Trial or Basic license. A Trial license is valid for 30 days, after which you can obtain one of the available subscriptions. The Basic license is free and perpetual. Consult the https://www.elastic.co/subscriptions[available subscriptions] for further details on which features are available under which license. -After clicking the install button, the installer will begin installation: +After clicking the install button, the installation will begin: [[msi-installer-installing]] image::images/msi_installer/msi_installer_installing.png[] -and will indicate when it has been successfully installed: +...and will indicate when it has been successfully installed: [[msi-installer-success]] image::images/msi_installer/msi_installer_success.png[] @@ -107,7 +105,7 @@ then running: msiexec.exe /i elasticsearch-{version}.msi /qn -------------------------------------------- -By default, msiexec does not wait for the installation process to complete, since it runs in the +By default, `msiexec.exe` does not wait for the installation process to complete, since it runs in the Windows subsystem. To wait on the process to finish and ensure that `%ERRORLEVEL%` is set accordingly, it is recommended to use `start /wait` to create a process and wait for it to exit @@ -132,13 +130,13 @@ Supported Windows Installer command line arguments can be viewed using msiexec.exe /help -------------------------------------------- -or by consulting the https://msdn.microsoft.com/en-us/library/windows/desktop/aa367988(v=vs.85).aspx[Windows Installer SDK Command-Line Options]. +...or by consulting the https://msdn.microsoft.com/en-us/library/windows/desktop/aa367988(v=vs.85).aspx[Windows Installer SDK Command-Line Options]. [[msi-command-line-options]] ==== Command line options All settings exposed within the GUI are also available as command line arguments (referred to -as _properties_ within Windows Installer documentation) that can be passed to msiexec: +as _properties_ within Windows Installer documentation) that can be passed to `msiexec.exe`: [horizontal] `INSTALLDIR`:: @@ -282,47 +280,46 @@ as _properties_ within Windows Installer documentation) that can be passed to ms `XPACKLICENSE`:: - When installing {xpack} plugin, the type of license to install, - either `Basic` or `Trial`. Defaults to `Basic` + The type of X-Pack license to install, either `Basic` or `Trial`. Defaults to `Basic` `XPACKSECURITYENABLED`:: - When installing {xpack} plugin with a `Trial` license, whether {security} should be enabled. + When installing with a `Trial` license, whether X-Pack Security should be enabled. Defaults to `true` `BOOTSTRAPPASSWORD`:: - When installing {xpack} plugin with a `Trial` license and {security} enabled, the password to + When installing with a `Trial` license and X-Pack Security enabled, the password to used to bootstrap the cluster and persisted as the `bootstrap.password` setting in the keystore. Defaults to a randomized value. `SKIPSETTINGPASSWORDS`:: - When installing {xpack} plugin with a `Trial` license and {security} enabled, whether the + When installing with a `Trial` license and X-Pack Security enabled, whether the installation should skip setting up the built-in users `elastic`, `kibana` and `logstash_system`. Defaults to `false` `ELASTICUSERPASSWORD`:: - When installing {xpack} plugin with a `Trial` license and {security} enabled, the password + When installing with a `Trial` license and X-Pack Security enabled, the password to use for the built-in user `elastic`. Defaults to `""` `KIBANAUSERPASSWORD`:: - When installing {xpack} plugin with a `Trial` license and {security} enabled, the password + When installing with a `Trial` license and X-Pack Security enabled, the password to use for the built-in user `kibana`. Defaults to `""` `LOGSTASHSYSTEMUSERPASSWORD`:: - When installing {xpack} plugin with a `Trial` license and {security} enabled, the password + When installing with a `Trial` license and X-Pack Security enabled, the password to use for the built-in user `logstash_system`. Defaults to `""` To pass a value, simply append the property name and value using the format `=""` to -the installation command. For example, to use a different installation directory to the default one and to install https://www.elastic.co/products/x-pack[{xpack}]: +the installation command. For example, to use a different installation directory to the default one and to install https://www.elastic.co/products/x-pack[X-Pack]: ["source","sh",subs="attributes,callouts"] -------------------------------------------- -start /wait msiexec.exe /i elasticsearch-{version}.msi /qn INSTALLDIR="C:\Custom Install Directory" PLUGINS="x-pack" +start /wait msiexec.exe /i elasticsearch-{version}.msi /qn INSTALLDIR="C:\Custom Install Directory\{version}" PLUGINS="x-pack" -------------------------------------------- Consult the https://msdn.microsoft.com/en-us/library/windows/desktop/aa367988(v=vs.85).aspx[Windows Installer SDK Command-Line Options] @@ -330,9 +327,10 @@ for additional rules related to values containing quotation marks. ifdef::include-xpack[] [[msi-installer-enable-indices]] -==== Enable automatic creation of {xpack} indices +==== Enable automatic creation of X-Pack indices -{xpack} will try to automatically create a number of indices within {es}. + +X-Pack will try to automatically create a number of indices within Elasticsearch. include::xpack-indices.asciidoc[] endif::include-xpack[] @@ -344,7 +342,7 @@ include::msi-windows-start.asciidoc[] ==== Configuring Elasticsearch on the command line Elasticsearch loads its configuration from the `%ES_PATH_CONF%\elasticsearch.yml` -file by default. The format of this config file is explained in +file by default. The format of this config file is explained in <>. Any settings that can be specified in the config file can also be specified on @@ -393,10 +391,11 @@ with PowerShell: [source,powershell] -------------------------------------------- -Get-Service Elasticsearch | Stop-Service | Start-Service +Get-Service Elasticsearch | Stop-Service +Get-Service Elasticsearch | Start-Service -------------------------------------------- -Changes can be made to jvm.options and elasticsearch.yml configuration files to configure the +Changes can be made to `jvm.options` and `elasticsearch.yml` configuration files to configure the service after installation. Most changes (like JVM settings) will require a restart of the service in order to take effect. @@ -404,16 +403,16 @@ service in order to take effect. ==== Upgrade using the graphical user interface (GUI) The `.msi` package supports upgrading an installed version of Elasticsearch to a newer -version of Elasticsearch. The upgrade process through the GUI handles upgrading all +version. The upgrade process through the GUI handles upgrading all installed plugins as well as retaining both your data and configuration. -Downloading and clicking on a newer version of the `.msi` package will launch the GUI wizard. -The first step will list the read only properties from the previous installation: +Downloading and double-clicking on a newer version of the `.msi` package will launch the GUI wizard. +The first step will list the read-only properties from the previous installation: [[msi-installer-upgrade-notice]] image::images/msi_installer/msi_installer_upgrade_notice.png[] -The following configuration step allows certain configuration options to be changed: +The next step allows certain configuration options to be changed: [[msi-installer-upgrade-configuration]] image::images/msi_installer/msi_installer_upgrade_configuration.png[] @@ -434,11 +433,11 @@ The `.msi` can also upgrade Elasticsearch using the command line. A command line upgrade requires passing the **same** command line properties as used at first install time; the Windows Installer does not remember these properties. -For example, if you originally installed with the command line options `PLUGINS="x-pack"` and +For example, if you originally installed with the command line options `PLUGINS="ingest-geoip"` and `LOCKMEMORY="true"`, then you must pass these same values when performing an upgrade from the command line. -The **exception** to this is `INSTALLDIR` (if originally specified), which must be a different directory to the +The **exception** to this is the `INSTALLDIR` parameter (if originally specified), which must be a different directory to the current installation. If setting `INSTALLDIR`, the final directory in the path **must** be the version of Elasticsearch e.g. @@ -466,9 +465,8 @@ start /wait msiexec.exe /i elasticsearch-{version}.msi /qn /l upgrade.log The `.msi` package handles uninstallation of all directories and files added as part of installation. -WARNING: Uninstallation will remove **all** directories and their contents created as part of -installation, **including data within the data directory**. If you wish to retain your data upon -uninstallation, it is recommended that you make a copy of the data directory before uninstallation. +WARNING: Uninstallation will remove **all** contents created as part of +installation, **except for data, config or logs directories**. It is recommended that you make a copy of your data directory before upgrading or consider using the snapshot API. MSI installer packages do not provide a GUI for uninstallation. An installed program can be uninstalled by pressing the Windows key and typing `add or remove programs` to open the system settings. diff --git a/docs/reference/setup/secure-settings.asciidoc b/docs/reference/setup/secure-settings.asciidoc index 2177440457a..6abf5dea14d 100644 --- a/docs/reference/setup/secure-settings.asciidoc +++ b/docs/reference/setup/secure-settings.asciidoc @@ -91,9 +91,6 @@ using the `bin/elasticsearch-keystore add` command, call: [source,js] ---- POST _nodes/reload_secure_settings -{ - "secure_settings_password": "" -} ---- // CONSOLE This API will decrypt and re-read the entire keystore, on every cluster node, diff --git a/docs/reference/sql/concepts.asciidoc b/docs/reference/sql/concepts.asciidoc index 1dc23e391fa..dab33618762 100644 --- a/docs/reference/sql/concepts.asciidoc +++ b/docs/reference/sql/concepts.asciidoc @@ -9,7 +9,7 @@ NOTE: This documentation while trying to be complete, does assume the reader has As a general rule, {es-sql} as the name indicates provides a SQL interface to {es}. As such, it follows the SQL terminology and conventions first, whenever possible. However the backing engine itself is {es} for which {es-sql} was purposely created hence why features or concepts that are not available, or cannot be mapped correctly, in SQL appear in {es-sql}. -Last but not least, {es-sql} tries to obey the https://en.wikipedia.org/wiki/Principle_of_least_astonishment[principle of least suprise], though as all things in the world, everything is relative. +Last but not least, {es-sql} tries to obey the https://en.wikipedia.org/wiki/Principle_of_least_astonishment[principle of least surprise], though as all things in the world, everything is relative. === Mapping concepts across SQL and {es} @@ -25,8 +25,8 @@ So let's start from the bottom; these roughly are: |`column` |`field` -|In both cases, at the lowest level, data is stored in in _named_ entries, of a variety of <>, containing _one_ value. SQL calls such an entry a _column_ while {es} a _field_. -Notice that in {es} a field can contain _multiple_ values of the same type (esentially a list) while in SQL, a _column_ can contain _exactly_ one value of said type. +|In both cases, at the lowest level, data is stored in _named_ entries, of a variety of <>, containing _one_ value. SQL calls such an entry a _column_ while {es} a _field_. +Notice that in {es} a field can contain _multiple_ values of the same type (essentially a list) while in SQL, a _column_ can contain _exactly_ one value of said type. {es-sql} will do its best to preserve the SQL semantic and, depending on the query, reject those that return fields with more than one value. |`row` @@ -43,7 +43,7 @@ Notice that in {es} a field can contain _multiple_ values of the same type (esen |`catalog` or `database` |`cluster` instance -|In SQL, `catalog` or `database` are used interchangebly and represent a set of schemas that is, a number of tables. +|In SQL, `catalog` or `database` are used interchangeably and represent a set of schemas that is, a number of tables. In {es} the set of indices available are grouped in a `cluster`. The semantics also differ a bit; a `database` is essentially yet another namespace (which can have some implications on the way data is stored) while an {es} `cluster` is a runtime instance, or rather a set of at least one {es} instance (typically running distributed). In practice this means that while in SQL one can potentially have multiple catalogs inside an instance, in {es} one is restricted to only _one_. @@ -62,4 +62,4 @@ Multiple clusters, each with its own namespace, connected to each other in a fed |=== -As one can see while the mapping between the concepts are not exactly one to one and the semantics somewhat different, there are more things in common than differences. In fact, thanks to SQL declarative nature, many concepts can move across {es} transparently and the terminology of the two likely to be used interchangebly through-out the rest of the material. \ No newline at end of file +As one can see while the mapping between the concepts are not exactly one to one and the semantics somewhat different, there are more things in common than differences. In fact, thanks to SQL declarative nature, many concepts can move across {es} transparently and the terminology of the two likely to be used interchangeably through-out the rest of the material. \ No newline at end of file diff --git a/docs/reference/sql/endpoints/cli.asciidoc b/docs/reference/sql/endpoints/cli.asciidoc index 0908c2344bb..eef2fbfbf59 100644 --- a/docs/reference/sql/endpoints/cli.asciidoc +++ b/docs/reference/sql/endpoints/cli.asciidoc @@ -22,6 +22,15 @@ the first parameter: $ ./bin/elasticsearch-sql-cli https://some.server:9200 -------------------------------------------------- +If security is enabled on your cluster, you can pass the username +and password in the form `username:password@host_name:port` +to the SQL CLI: + +[source,bash] +-------------------------------------------------- +$ ./bin/elasticsearch-sql-cli https://sql_user:strongpassword@some.server:9200 +-------------------------------------------------- + Once the CLI is running you can use any <> that Elasticsearch supports: diff --git a/docs/reference/sql/functions/aggs.asciidoc b/docs/reference/sql/functions/aggs.asciidoc new file mode 100644 index 00000000000..c2d485dbe6a --- /dev/null +++ b/docs/reference/sql/functions/aggs.asciidoc @@ -0,0 +1,168 @@ +[role="xpack"] +[testenv="basic"] +[[sql-functions-aggs]] +=== Aggregate Functions + +Functions for computing a _single_ result from a set of input values. +{es-sql} supports aggregate functions only alongside <> (implicit or explicit). + +==== General Purpose + +[[sql-functions-aggs-avg]] +===== `AVG` + +*Input*: Numeric, *Output*: `double` + +https://en.wikipedia.org/wiki/Arithmetic_mean[Average] (arithmetic mean) of input values. + + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[aggAvg] +---- + +[[sql-functions-aggs-count]] +===== `COUNT` + +*Input*: Any, *Output*: `bigint` + +Total number (count) of input values. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[aggCountStar] +---- + +[[sql-functions-aggs-count-distinct]] +===== `COUNT(DISTINCT)` + +*Input*: Any, *Output*: `bigint` + +Total number of _distinct_ values in input values. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[aggCountDistinct] +---- + +[[sql-functions-aggs-max]] +===== `MAX` + +*Input*: Numeric, *Output*: Same as input + +Maximum value across input values. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[aggMax] +---- + +[[sql-functions-aggs-min]] +===== `MIN` + +*Input*: Numeric, *Output*: Same as input + +Minimum value across input values. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[aggMin] +---- + +[[sql-functions-aggs-sum]] +===== `SUM` + +*Input*: Numeric, *Output*: `bigint` for integer input, `double` for floating points + +Sum of input values. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[aggSum] +---- + +==== Statistics + +[[sql-functions-aggs-kurtosis]] +===== `KURTOSIS` + +*Input*: Numeric, *Output*: `double` + +https://en.wikipedia.org/wiki/Kurtosis[Quantify] the shape of the distribution of input values. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[aggKurtosis] +---- + +[[sql-functions-aggs-percentile]] +===== `PERCENTILE` + +*Input*: Numeric, *Output*: `double` + +The nth https://en.wikipedia.org/wiki/Percentile[percentile] of input values. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[aggPercentile] +---- + +[[sql-functions-aggs-percentile-rank]] +===== `PERCENTILE_RANK` + +*Input*: Numeric, *Output*: `double` + +The https://en.wikipedia.org/wiki/Percentile_rank[percentile rank] of input values of input values. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[aggPercentileRank] +---- + +[[sql-functions-aggs-skewness]] +===== `SKEWNESS` + +*Input*: Numeric, *Output*: `double` + +https://en.wikipedia.org/wiki/Skewness[Quantify] the asymmetric distribution of input values. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[aggSkewness] +---- + +[[sql-functions-aggs-stddev-pop]] +===== `STDDEV_POP` + +*Input*: Numeric, *Output*: `double` + +https://en.wikipedia.org/wiki/Standard_deviations[Population standard deviation] of input values. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[aggStddevPop] +---- + +[[sql-functions-aggs-sum-squares]] +===== `SUM_OF_SQUARES` + +*Input*: Numeric, *Output*: `double` + +https://en.wikipedia.org/wiki/Total_sum_of_squares[Sum of squares] of input values. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[aggSumOfSquares] +---- + +[[sql-functions-aggs-var-pop]] +===== `VAR_POP` + +*Input*: Numeric, *Output*: `double` + +https://en.wikipedia.org/wiki/Variance[Population] variance of input values. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[aggVarPop] +---- \ No newline at end of file diff --git a/docs/reference/sql/functions/date-time.asciidoc b/docs/reference/sql/functions/date-time.asciidoc new file mode 100644 index 00000000000..f0d90cbb07e --- /dev/null +++ b/docs/reference/sql/functions/date-time.asciidoc @@ -0,0 +1,94 @@ +[role="xpack"] +[testenv="basic"] +[[sql-functions-datetime]] +=== Date and Time Functions + +* Extract the year from a date (`YEAR`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/datetime.csv-spec[year] +-------------------------------------------------- + +* Extract the month of the year from a date (`MONTH_OF_YEAR` or `MONTH`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/datetime.csv-spec[monthOfYear] +-------------------------------------------------- + +* Extract the week of the year from a date (`WEEK_OF_YEAR` or `WEEK`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/datetime.csv-spec[weekOfYear] +-------------------------------------------------- + +* Extract the day of the year from a date (`DAY_OF_YEAR` or `DOY`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/datetime.csv-spec[dayOfYear] +-------------------------------------------------- + +* Extract the day of the month from a date (`DAY_OF_MONTH`, `DOM`, or `DAY`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/datetime.csv-spec[dayOfMonth] +-------------------------------------------------- + +* Extract the day of the week from a date (`DAY_OF_WEEK` or `DOW`). +Monday is `1`, Tuesday is `2`, etc. + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/datetime.csv-spec[dayOfWeek] +-------------------------------------------------- + +* Extract the hour of the day from a date (`HOUR_OF_DAY` or `HOUR`). +Monday is `1`, Tuesday is `2`, etc. + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/datetime.csv-spec[hourOfDay] +-------------------------------------------------- + +* Extract the minute of the day from a date (`MINUTE_OF_DAY`). + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/datetime.csv-spec[minuteOfDay] +-------------------------------------------------- + +* Extract the minute of the hour from a date (`MINUTE_OF_HOUR`, `MINUTE`). + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/datetime.csv-spec[minuteOfHour] +-------------------------------------------------- + +* Extract the second of the minute from a date (`SECOND_OF_MINUTE`, `SECOND`). + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/datetime.csv-spec[secondOfMinute] +-------------------------------------------------- + +* Extract + +As an alternative, one can support `EXTRACT` to extract fields from datetimes. +You can run any <> +with `EXTRACT( FROM )`. So + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/datetime.csv-spec[extractDayOfYear] +-------------------------------------------------- + +is the equivalent to + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/datetime.csv-spec[dayOfYear] +-------------------------------------------------- \ No newline at end of file diff --git a/docs/reference/sql/functions/index.asciidoc b/docs/reference/sql/functions/index.asciidoc index 93d201a1828..82e8154de93 100644 --- a/docs/reference/sql/functions/index.asciidoc +++ b/docs/reference/sql/functions/index.asciidoc @@ -3,416 +3,20 @@ [[sql-functions]] == Functions and Operators -{es-sql} provides a number of built-in operators and functions. - -=== Comparison Operators - -{es-sql} supports the following comparison operators: - -* Equality (`=`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/filter.sql-spec[whereFieldEquality] --------------------------------------------------- - -* Inequality (`<>` or `!=` or `<=>`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/filter.sql-spec[whereFieldNonEquality] --------------------------------------------------- - -* Comparison (`<`, `<=`, `>`, `>=`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/filter.sql-spec[whereFieldLessThan] --------------------------------------------------- - -* `BETWEEN` - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/filter.sql-spec[whereBetween] --------------------------------------------------- - -* `IS NULL`/`IS NOT NULL` - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/filter.sql-spec[whereIsNotNullAndIsNull] --------------------------------------------------- - - -=== Logical Operators - -{es-sql} supports the following logical operators: - -* `AND` - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/filter.sql-spec[whereFieldAndComparison] --------------------------------------------------- - -* `OR` - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/filter.sql-spec[whereFieldOrComparison] --------------------------------------------------- - -* `NOT` - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/filter.sql-spec[whereFieldEqualityNot] --------------------------------------------------- - - -=== Math Operators - -{es-sql} supports the following math operators: - -* Add (`+`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/arithmetic.sql-spec[plus] --------------------------------------------------- - -* Subtract (infix `-`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/arithmetic.sql-spec[minus] --------------------------------------------------- - -* Negate (unary `-`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/arithmetic.sql-spec[unaryMinus] --------------------------------------------------- - -* Multiply (`*`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/arithmetic.sql-spec[multiply] --------------------------------------------------- - -* Divide (`/`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/arithmetic.sql-spec[divide] --------------------------------------------------- - -* https://en.wikipedia.org/wiki/Modulo_operation[Modulo] or Reminder(`%`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/arithmetic.sql-spec[mod] --------------------------------------------------- - - -=== Math Functions - -All math and trigonometric functions require their input (where applicable) -to be numeric. - -==== Generic - -* `ABS` - -https://en.wikipedia.org/wiki/Absolute_value[Absolute value], returns \[same type as input] - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/math.sql-spec[abs] --------------------------------------------------- - -* `CBRT` - -https://en.wikipedia.org/wiki/Cube_root[Cube root], returns `double` - -// TODO make the example in the tests presentable - -* `CEIL` - -https://en.wikipedia.org/wiki/Floor_and_ceiling_functions[Ceiling], returns `double` - -* `CEILING` - -Same as `CEIL` - -// TODO make the example in the tests presentable - -* `E` - -https://en.wikipedia.org/wiki/E_%28mathematical_constant%29[Euler's number], returns `2.7182818284590452354` - - -* https://en.wikipedia.org/wiki/Rounding#Round_half_up[Round] (`ROUND`) - -// TODO make the example in the tests presentable - -NOTE: This rounds "half up" meaning that `ROUND(-1.5)` results in `-1`. - - -* https://en.wikipedia.org/wiki/Floor_and_ceiling_functions[Floor] (`FLOOR`) - -// TODO make the example in the tests presentable - -* https://en.wikipedia.org/wiki/Natural_logarithm[Natural logarithm] (`LOG`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/math.sql-spec[log] --------------------------------------------------- - -* https://en.wikipedia.org/wiki/Logarithm[Logarithm] base 10 (`LOG10`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/math.sql-spec[log10] --------------------------------------------------- - -* https://en.wikipedia.org/wiki/Square_root[Square root] (`SQRT`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/math.sql-spec[sqrt] --------------------------------------------------- - -* https://en.wikipedia.org/wiki/Exponential_function[e^x^] (`EXP`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/math.sql-spec[exp] --------------------------------------------------- - -* https://docs.oracle.com/javase/8/docs/api/java/lang/Math.html#expm1-double-[e^x^ - 1] (`EXPM1`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/math.sql-spec[expm1] --------------------------------------------------- - -==== Trigonometric - -* Convert from https://en.wikipedia.org/wiki/Radian[radians] -to https://en.wikipedia.org/wiki/Degree_(angle)[degrees] (`DEGREES`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/math.sql-spec[degrees] --------------------------------------------------- - -* Convert from https://en.wikipedia.org/wiki/Degree_(angle)[degrees] -to https://en.wikipedia.org/wiki/Radian[radians] (`RADIANS`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/math.sql-spec[degrees] --------------------------------------------------- - -* https://en.wikipedia.org/wiki/Trigonometric_functions#sine[Sine] (`SIN`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/math.sql-spec[sin] --------------------------------------------------- - -* https://en.wikipedia.org/wiki/Trigonometric_functions#cosine[Cosine] (`COS`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/math.sql-spec[cos] --------------------------------------------------- - -* https://en.wikipedia.org/wiki/Trigonometric_functions#tangent[Tangent] (`TAN`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/math.sql-spec[tan] --------------------------------------------------- - -* https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[Arc sine] (`ASIN`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/math.sql-spec[asin] --------------------------------------------------- - -* https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[Arc cosine] (`ACOS`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/math.sql-spec[acos] --------------------------------------------------- - -* https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[Arc tangent] (`ATAN`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/math.sql-spec[atan] --------------------------------------------------- - -* https://en.wikipedia.org/wiki/Hyperbolic_function[Hyperbolic sine] (`SINH`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/math.sql-spec[sinh] --------------------------------------------------- - -* https://en.wikipedia.org/wiki/Hyperbolic_function[Hyperbolic cosine] (`COSH`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/math.sql-spec[cosh] --------------------------------------------------- - -[[sql-functions-datetime]] -=== Date and Time Functions - -* Extract the year from a date (`YEAR`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/datetime.csv-spec[year] --------------------------------------------------- - -* Extract the month of the year from a date (`MONTH_OF_YEAR` or `MONTH`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/datetime.csv-spec[monthOfYear] --------------------------------------------------- - -* Extract the week of the year from a date (`WEEK_OF_YEAR` or `WEEK`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/datetime.csv-spec[weekOfYear] --------------------------------------------------- - -* Extract the day of the year from a date (`DAY_OF_YEAR` or `DOY`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/datetime.csv-spec[dayOfYear] --------------------------------------------------- - -* Extract the day of the month from a date (`DAY_OF_MONTH`, `DOM`, or `DAY`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/datetime.csv-spec[dayOfMonth] --------------------------------------------------- - -* Extract the day of the week from a date (`DAY_OF_WEEK` or `DOW`). -Monday is `1`, Tuesday is `2`, etc. - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/datetime.csv-spec[dayOfWeek] --------------------------------------------------- - -* Extract the hour of the day from a date (`HOUR_OF_DAY` or `HOUR`). -Monday is `1`, Tuesday is `2`, etc. - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/datetime.csv-spec[hourOfDay] --------------------------------------------------- - -* Extract the minute of the day from a date (`MINUTE_OF_DAY`). - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/datetime.csv-spec[minuteOfDay] --------------------------------------------------- - -* Extract the minute of the hour from a date (`MINUTE_OF_HOUR`, `MINUTE`). - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/datetime.csv-spec[minuteOfHour] --------------------------------------------------- - -* Extract the second of the minute from a date (`SECOND_OF_MINUTE`, `SECOND`). - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/datetime.csv-spec[secondOfMinute] --------------------------------------------------- - -* Extract - -As an alternative, one can support `EXTRACT` to extract fields from datetimes. -You can run any <> -with `EXTRACT( FROM )`. So - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/datetime.csv-spec[extractDayOfYear] --------------------------------------------------- - -is the equivalent to - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/datetime.csv-spec[dayOfYear] --------------------------------------------------- - - -[[sql-functions-aggregate]] -=== Aggregate Functions - -==== Basic - -* https://en.wikipedia.org/wiki/Arithmetic_mean[Average] (`AVG`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/agg.sql-spec[avg] --------------------------------------------------- - -* Count the number of matching fields (`COUNT`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/agg.sql-spec[countStar] --------------------------------------------------- - -* Count the number of distinct values in matching documents (`COUNT(DISTINCT`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/agg.sql-spec[countDistinct] --------------------------------------------------- - -* Find the maximum value in matching documents (`MAX`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/agg.sql-spec[max] --------------------------------------------------- - -* Find the minimum value in matching documents (`MIN`) - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/agg.sql-spec[min] --------------------------------------------------- - -* https://en.wikipedia.org/wiki/Kahan_summation_algorithm[Sum] -all values of matching documents (`SUM`). - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/agg.csv-spec[sum] --------------------------------------------------- +{es-sql} provides a comprehensive set of built-in operators and functions: + +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +include::operators.asciidoc[] +include::aggs.asciidoc[] +include::date-time.asciidoc[] +include::search.asciidoc[] +include::math.asciidoc[] +include::string.asciidoc[] +include::type-conversion.asciidoc[] diff --git a/docs/reference/sql/functions/math.asciidoc b/docs/reference/sql/functions/math.asciidoc new file mode 100644 index 00000000000..604603f2973 --- /dev/null +++ b/docs/reference/sql/functions/math.asciidoc @@ -0,0 +1,159 @@ +[role="xpack"] +[testenv="basic"] +[[sql-functions-math]] +=== Math Functions + +All math and trigonometric functions require their input (where applicable) +to be numeric. + +==== Generic + +* `ABS` + +https://en.wikipedia.org/wiki/Absolute_value[Absolute value], returns \[same type as input] + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/math.sql-spec[abs] +-------------------------------------------------- + +* `CBRT` + +https://en.wikipedia.org/wiki/Cube_root[Cube root], returns `double` + +// TODO make the example in the tests presentable + +* `CEIL` + +https://en.wikipedia.org/wiki/Floor_and_ceiling_functions[Ceiling], returns `double` + +* `CEILING` + +Same as `CEIL` + +// TODO make the example in the tests presentable + +* `E` + +https://en.wikipedia.org/wiki/E_%28mathematical_constant%29[Euler's number], returns `2.7182818284590452354` + + +* https://en.wikipedia.org/wiki/Rounding#Round_half_up[Round] (`ROUND`) + +// TODO make the example in the tests presentable + +NOTE: This rounds "half up" meaning that `ROUND(-1.5)` results in `-1`. + + +* https://en.wikipedia.org/wiki/Floor_and_ceiling_functions[Floor] (`FLOOR`) + +// TODO make the example in the tests presentable + +* https://en.wikipedia.org/wiki/Natural_logarithm[Natural logarithm] (`LOG`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/math.sql-spec[log] +-------------------------------------------------- + +* https://en.wikipedia.org/wiki/Logarithm[Logarithm] base 10 (`LOG10`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/math.sql-spec[log10] +-------------------------------------------------- + +* https://en.wikipedia.org/wiki/Square_root[Square root] (`SQRT`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/math.sql-spec[sqrt] +-------------------------------------------------- + +* https://en.wikipedia.org/wiki/Exponential_function[e^x^] (`EXP`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/math.sql-spec[exp] +-------------------------------------------------- + +* https://docs.oracle.com/javase/8/docs/api/java/lang/Math.html#expm1-double-[e^x^ - 1] (`EXPM1`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/math.sql-spec[expm1] +-------------------------------------------------- + +==== Trigonometric + +* Convert from https://en.wikipedia.org/wiki/Radian[radians] +to https://en.wikipedia.org/wiki/Degree_(angle)[degrees] (`DEGREES`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/math.sql-spec[degrees] +-------------------------------------------------- + +* Convert from https://en.wikipedia.org/wiki/Degree_(angle)[degrees] +to https://en.wikipedia.org/wiki/Radian[radians] (`RADIANS`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/math.sql-spec[degrees] +-------------------------------------------------- + +* https://en.wikipedia.org/wiki/Trigonometric_functions#sine[Sine] (`SIN`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/math.sql-spec[sin] +-------------------------------------------------- + +* https://en.wikipedia.org/wiki/Trigonometric_functions#cosine[Cosine] (`COS`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/math.sql-spec[cos] +-------------------------------------------------- + +* https://en.wikipedia.org/wiki/Trigonometric_functions#tangent[Tangent] (`TAN`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/math.sql-spec[tan] +-------------------------------------------------- + +* https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[Arc sine] (`ASIN`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/math.sql-spec[asin] +-------------------------------------------------- + +* https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[Arc cosine] (`ACOS`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/math.sql-spec[acos] +-------------------------------------------------- + +* https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[Arc tangent] (`ATAN`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/math.sql-spec[atan] +-------------------------------------------------- + +* https://en.wikipedia.org/wiki/Hyperbolic_function[Hyperbolic sine] (`SINH`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/math.sql-spec[sinh] +-------------------------------------------------- + +* https://en.wikipedia.org/wiki/Hyperbolic_function[Hyperbolic cosine] (`COSH`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/math.sql-spec[cosh] +-------------------------------------------------- diff --git a/docs/reference/sql/functions/operators.asciidoc b/docs/reference/sql/functions/operators.asciidoc new file mode 100644 index 00000000000..9c90d12320e --- /dev/null +++ b/docs/reference/sql/functions/operators.asciidoc @@ -0,0 +1,115 @@ +[role="xpack"] +[testenv="basic"] +[[sql-operators]] +=== Comparison Operators + +Boolean operator for comparing one or two expressions. + +* Equality (`=`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/filter.sql-spec[whereFieldEquality] +-------------------------------------------------- + +* Inequality (`<>` or `!=` or `<=>`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/filter.sql-spec[whereFieldNonEquality] +-------------------------------------------------- + +* Comparison (`<`, `<=`, `>`, `>=`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/filter.sql-spec[whereFieldLessThan] +-------------------------------------------------- + +* `BETWEEN` + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/filter.sql-spec[whereBetween] +-------------------------------------------------- + +* `IS NULL`/`IS NOT NULL` + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/filter.sql-spec[whereIsNotNullAndIsNull] +-------------------------------------------------- + +[[sql-operators-logical]] +=== Logical Operators + +Boolean operator for evaluating one or two expressions. + +* `AND` + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/filter.sql-spec[whereFieldAndComparison] +-------------------------------------------------- + +* `OR` + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/filter.sql-spec[whereFieldOrComparison] +-------------------------------------------------- + +* `NOT` + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/filter.sql-spec[whereFieldEqualityNot] +-------------------------------------------------- + +[[sql-operators-math]] +=== Math Operators + +Perform mathematical operations affecting one or two values. +The result is a value of numeric type. + +* Add (`+`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/arithmetic.sql-spec[plus] +-------------------------------------------------- + +* Subtract (infix `-`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/arithmetic.sql-spec[minus] +-------------------------------------------------- + +* Negate (unary `-`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/arithmetic.sql-spec[unaryMinus] +-------------------------------------------------- + +* Multiply (`*`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/arithmetic.sql-spec[multiply] +-------------------------------------------------- + +* Divide (`/`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/arithmetic.sql-spec[divide] +-------------------------------------------------- + +* https://en.wikipedia.org/wiki/Modulo_operation[Modulo] or Reminder(`%`) + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/arithmetic.sql-spec[mod] +-------------------------------------------------- diff --git a/docs/reference/sql/functions/search.asciidoc b/docs/reference/sql/functions/search.asciidoc new file mode 100644 index 00000000000..564f57dcbdd --- /dev/null +++ b/docs/reference/sql/functions/search.asciidoc @@ -0,0 +1,35 @@ +[role="xpack"] +[testenv="basic"] +[[sql-functions-search]] +=== Full-Text Search Functions + +Search functions should be used when performing full-text search, namely +when the `MATCH` or `QUERY` predicates are being used. +Outside a, so-called, search context, these functions will return default values +such as `0` or `NULL`. + +[[sql-functions-search-score]] +==== `SCORE` + +*Input*: None, *Output*: `double` + +Returns the {defguide}/relevance-intro.html[relevance] of a given input to the executed query. +The higher score, the more relevant the data. + +NOTE: When doing multiple text queries in the `WHERE` clause then, their scores will be +combined using the same rules as {es}'s +<>. + +Typically `SCORE` is used for ordering the results of a query based on their relevance: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[orderByScore] +---- + +However, it is perfectly fine to return the score without sorting by it: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[scoreWithMatch] +---- \ No newline at end of file diff --git a/docs/reference/sql/functions/string.asciidoc b/docs/reference/sql/functions/string.asciidoc new file mode 100644 index 00000000000..ccc11938028 --- /dev/null +++ b/docs/reference/sql/functions/string.asciidoc @@ -0,0 +1,240 @@ +[role="xpack"] +[testenv="basic"] +[[sql-functions-string]] +=== String Functions + +Functions for performing string manipulation. + +[[sql-functions-string-ascii]] +==== `ASCII` + +*Input*: `string`, *Output*: `integer` + +Returns the ASCII code value of the leftmost character of string_exp as an integer. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[stringAscii] +---- + +[[sql-functions-string-bit-length]] +==== `BIT_LENGTH` + +*Input*: `string`, *Output*: `integer` + +Returns the length in bits of the input. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[stringBitLength] +---- + +[[sql-functions-string-char]] +==== `CHAR` + +*Input*: `numeric`, *Output*: `string` + +Returns the character that has the ASCII code value specified by the numeric input. The value should be between 0 and 255; otherwise, the return value is data source–dependent. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[stringChar] +---- + +[[sql-functions-string-char-length]] +==== `CHAR_LENGTH` + +*Input*: `string`, *Output*: `integer` + +Returns the length in characters of the input, if the string expression is of a character data type; otherwise, returns the length in bytes of the string expression (the smallest integer not less than the number of bits divided by 8). + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[stringCharLength] +---- + +[[sql-functions-string-concat]] +==== `CONCAT` + +*Input*: `string1`, `string2`, *Output*: `string` + +turns a character string that is the result of concatenating string1 to string2. If one of the string is `NULL`, +the other string will be returned. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[stringConcat] +---- + +[[sql-functions-string-insert]] +==== `INSERT` + +*Input*: `string1`, `start`, `length`, `string2`, *Output*: `string` + +Returns a string where length characters have been deleted from string1, beginning at start, and where string2 has been inserted into string1, beginning at start. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[stringInsert] +---- + +[[sql-functions-string-lcase]] +==== `LCASE` + +*Input*: `string`, *Output*: `string` + +Returns a string equal to that in string, with all uppercase characters converted to lowercase. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[stringLCase] +---- + +[[sql-functions-string-left]] +==== `LEFT` + +*Input*: `string`, *Output*: `string` + +Returns the leftmost count characters of string. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[stringLeft] +---- + +[[sql-functions-string-length]] +==== `LENGTH` + +*Input*: `string`, *Output*: `integer` + +Returns the number of characters in string, excluding trailing blanks. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[stringLength] +---- + +[[sql-functions-string-locate]] +==== `LOCATE` + +*Input*: `string1`, `string2`[, `start`]`, *Output*: `integer` + +Returns the starting position of the first occurrence of string1 within string2. The search for the first occurrence of string1 begins with the first character position in string2 unless the optional argument, start, is specified. If start is specified, the search begins with the character position indicated by the value of start. The first character position in string2 is indicated by the value 1. If string1 is not found within string2, the value 0 is returned. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[stringLocateWoStart] +---- + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[stringLocateWithStart] +---- + +[[sql-functions-string-ltrim]] +==== `LTRIM` + +*Input*: `string`, *Output*: `string` + +Returns the characters of string_exp, with leading blanks removed. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[stringLTrim] +---- + +[[sql-functions-string-position]] +==== `POSITION` + +*Input*: `string1`, `string2`, *Output*: `integer` + +Returns the position of the string1 in string2. The result is an exact numeric. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[stringPosition] +---- + +[[sql-functions-string-repeat]] +==== `REPEAT` + +*Input*: `string`, `count`, *Output*: `string` + +Returns a character string composed of string1 repeated count times. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[stringRepeat] +---- + +[[sql-functions-string-replace]] +==== `REPLACE` + +*Input*: `string1`, `string2`, `string3`, *Output*: `string` + +Search string1 for occurrences of string2, and replace with string3. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[stringReplace] +---- + +[[sql-functions-string-right]] +==== `RIGHT` + +*Input*: `string`, `count`, *Output*: `string` + +Returns the rightmost count characters of string. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[stringRight] +---- + +[[sql-functions-string-rtrim]] +==== `RTRIM` + +*Input*: `string`, *Output*: `string` + +Returns the characters of string with trailing blanks removed. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[stringRTrim] +---- + +[[sql-functions-string-space]] +==== `SPACE` + +*Input*: `integer`, *Output*: `string` + +Returns a character string consisting of count spaces. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[stringSpace] +---- + +[[sql-functions-string-substring]] +==== `SUBSTRING` + +*Input*: `string`, `start`, `length`, *Output*: `integer` + +Returns a character string that is derived from the string, beginning at the character position specified by `start` for `length` characters. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[stringSubString] +---- + +[[sql-functions-string-ucase]] +==== `UCASE` + +*Input*: `string`, *Output*: `string` + +Returns a string equal to that of the input, with all lowercase characters converted to uppercase. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[stringUCase] +---- diff --git a/docs/reference/sql/functions/type-conversion.asciidoc b/docs/reference/sql/functions/type-conversion.asciidoc new file mode 100644 index 00000000000..549b05d69d8 --- /dev/null +++ b/docs/reference/sql/functions/type-conversion.asciidoc @@ -0,0 +1,39 @@ +[role="xpack"] +[testenv="basic"] +[[sql-functions-type-conversion]] +=== Type Conversion Functions + +Functions for converting an expression of one data type to another. + +[[sql-functions-type-conversion-cast]] +==== `CAST` + +.Synopsis +[source, sql] +---- +CAST ( expression<1> AS data_type<2> ) +---- + +<1> Expression to cast +<2> Target data type to cast to + +.Description + +Casts the result of the given expression to the target type. +If the cast is not possible (for example because of target type is too narrow or because +the value itself cannot be converted), the query fails. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[conversionStringToIntCast] +---- + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[conversionIntToStringCast] +---- + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[conversionStringToDateCast] +---- \ No newline at end of file diff --git a/docs/reference/sql/language/data-types.asciidoc b/docs/reference/sql/language/data-types.asciidoc index 7f98add9724..9cf9c4eed62 100644 --- a/docs/reference/sql/language/data-types.asciidoc +++ b/docs/reference/sql/language/data-types.asciidoc @@ -17,7 +17,7 @@ Most of {es} <> are available in {es-sql}, as indicat | <> | `tinyint` | 3 | <> | `smallint` | 5 | <> | `integer` | 10 -| <> | `long` | 19 +| <> | `bigint` | 19 | <> | `double` | 15 | <> | `real` | 7 | <> | `float` | 16 diff --git a/docs/reference/sql/language/index-patterns.asciidoc b/docs/reference/sql/language/index-patterns.asciidoc new file mode 100644 index 00000000000..58574e03cfb --- /dev/null +++ b/docs/reference/sql/language/index-patterns.asciidoc @@ -0,0 +1,70 @@ +[role="xpack"] +[testenv="basic"] +[[sql-index-patterns]] +== Index patterns + +{es-sql} supports two types of patterns for matching multiple indices or tables: + +* {es} multi-index + +The {es} notation for enumerating, including or excluding <> +is supported _as long_ as it is quoted or escaped as a table identifier. + +For example: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[showTablesEsMultiIndex] +---- + +Notice the pattern is surrounded by double quotes `"`. It enumerated `*` meaning all indices however +it excludes (due to `-`) all indices that start with `l`. +This notation is very convenient and powerful as it allows both inclusion and exclusion, depending on +the target naming convention. + +* SQL `LIKE` notation + +The common `LIKE` statement (including escaping if needed) to match a wildcard pattern, based on one `_` +or multiple `%` characters. + +Using `SHOW TABLES` command again: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[showTablesLikeWildcard] +---- + +The pattern matches all tables that start with `emp`. + +This command supports _escaping_ as well, for example: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[showTablesLikeEscape] +---- + +Notice how now `emp%` does not match any tables because `%`, which means match zero or more characters, +has been escaped by `!` and thus becomes an regular char. And since there is no table named `emp%`, +an empty table is returned. + +In a nutshell, the differences between the two type of patterns are: + +[cols="^h,^,^",options="header"] +|=== +| Feature | Multi index | SQL `LIKE` + +| Type of quoting | `"` | `'` +| Inclusion | Yes | Yes +| Exclusion | Yes | No +| Enumeration | Yes | No +| One char pattern | No | `_` +| Multi char pattern | `*` | `%` +| Escaping | No | `ESCAPE` + +|=== + +Which one to use, is up to you however try to stick to the same one across your queries for consistency. + +NOTE: As the query type of quoting between the two patterns is fairly similar (`"` vs `'`), {es-sql} _always_ +requires the keyword `LIKE` for SQL `LIKE` pattern. + diff --git a/docs/reference/sql/language/index.asciidoc b/docs/reference/sql/language/index.asciidoc index 6558e9ad92b..f63afd6ebd8 100644 --- a/docs/reference/sql/language/index.asciidoc +++ b/docs/reference/sql/language/index.asciidoc @@ -7,6 +7,8 @@ This chapter describes the SQL semantics supported in X-Pack namely: <>:: Data types <>:: Commands +<>:: Index patterns include::data-types.asciidoc[] include::syntax/index.asciidoc[] +include::index-patterns.asciidoc[] diff --git a/docs/reference/sql/language/syntax/describe-table.asciidoc b/docs/reference/sql/language/syntax/describe-table.asciidoc index 396be25bb51..66c1829c14f 100644 --- a/docs/reference/sql/language/syntax/describe-table.asciidoc +++ b/docs/reference/sql/language/syntax/describe-table.asciidoc @@ -6,14 +6,14 @@ .Synopsis [source, sql] ---- -DESCRIBE table +DESCRIBE [table identifier<1>|[LIKE pattern<2>]] ---- or [source, sql] ---- -DESC table +DESC [table identifier<1>|[LIKE pattern<2>]] ---- diff --git a/docs/reference/sql/language/syntax/show-columns.asciidoc b/docs/reference/sql/language/syntax/show-columns.asciidoc index 539c35c5795..92743000362 100644 --- a/docs/reference/sql/language/syntax/show-columns.asciidoc +++ b/docs/reference/sql/language/syntax/show-columns.asciidoc @@ -6,9 +6,15 @@ .Synopsis [source, sql] ---- -SHOW COLUMNS [ FROM | IN ] ? table +SHOW COLUMNS [ FROM | IN ]? [ table identifier<1> | [ LIKE pattern<2> ] ] ---- +<1> single table identifier or double quoted es multi index +<2> SQL LIKE pattern + +See <> for more information about +patterns. + .Description List the columns in table and their data type (and other attributes). @@ -17,3 +23,4 @@ List the columns in table and their data type (and other attributes). ---- include-tagged::{sql-specs}/docs.csv-spec[showColumns] ---- + diff --git a/docs/reference/sql/language/syntax/show-functions.asciidoc b/docs/reference/sql/language/syntax/show-functions.asciidoc index 1e4220ef529..d77aa008586 100644 --- a/docs/reference/sql/language/syntax/show-functions.asciidoc +++ b/docs/reference/sql/language/syntax/show-functions.asciidoc @@ -6,7 +6,7 @@ .Synopsis [source, sql] ---- -SHOW FUNCTIONS [ LIKE? pattern<1>? ]? +SHOW FUNCTIONS [ LIKE pattern<1>? ]? ---- <1> SQL match pattern diff --git a/docs/reference/sql/language/syntax/show-tables.asciidoc b/docs/reference/sql/language/syntax/show-tables.asciidoc index b401e9f7d90..5748ae31806 100644 --- a/docs/reference/sql/language/syntax/show-tables.asciidoc +++ b/docs/reference/sql/language/syntax/show-tables.asciidoc @@ -6,10 +6,15 @@ .Synopsis [source, sql] ---- -SHOW TABLES [ LIKE? pattern<1>? ]? +SHOW TABLES [ table identifier<1> | [ LIKE pattern<2> ] ]? ---- -<1> SQL match pattern +<1> single table identifier or double quoted es multi index +<2> SQL LIKE pattern + +See <> for more information about +patterns. + .Description @@ -20,7 +25,15 @@ List the tables available to the current user and their type. include-tagged::{sql-specs}/docs.csv-spec[showTables] ---- -The `LIKE` clause can be used to restrict the list of names to the given pattern. +Match multiple indices by using {es} <> +notation: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[showTablesEsMultiIndex] +---- + +One can also use the `LIKE` clause to restrict the list of names to the given pattern. The pattern can be an exact match: ["source","sql",subs="attributes,callouts,macros"] diff --git a/docs/reference/sql/overview.asciidoc b/docs/reference/sql/overview.asciidoc index a72f5ca61fe..3da070a447a 100644 --- a/docs/reference/sql/overview.asciidoc +++ b/docs/reference/sql/overview.asciidoc @@ -28,7 +28,7 @@ No need for additional hardware, processes, runtimes or libraries to query {es}; Lightweight and efficient:: -{es-sql} does not abstract {es} and its search capabilities - on the contrary, it embrases and exposes to SQL to allow proper full-text search, in real-time, in the same declarative, succint fashion. +{es-sql} does not abstract {es} and its search capabilities - on the contrary, it embraces and exposes SQL to allow proper full-text search, in real-time, in the same declarative, succint fashion. diff --git a/docs/reference/testing/testing-framework.asciidoc b/docs/reference/testing/testing-framework.asciidoc index dfc7371dd37..321122d81f5 100644 --- a/docs/reference/testing/testing-framework.asciidoc +++ b/docs/reference/testing/testing-framework.asciidoc @@ -230,7 +230,7 @@ As many Elasticsearch tests are checking for a similar output, like the amount o `assertMatchCount()`:: Asserts a matching count from a percolation response `assertFirstHit()`:: Asserts the first hit hits the specified matcher `assertSecondHit()`:: Asserts the second hit hits the specified matcher -`assertThirdHit()`:: Asserts the third hits hits the specified matcher +`assertThirdHit()`:: Asserts the third hit hits the specified matcher `assertSearchHit()`:: Assert a certain element in a search response hits the specified matcher `assertNoFailures()`:: Asserts that no shard failures have occurred in the response `assertFailures()`:: Asserts that shard failures have happened during a search request diff --git a/docs/reference/upgrade/set-paths-tip.asciidoc b/docs/reference/upgrade/set-paths-tip.asciidoc index 38a07f7ac2b..2dd120767c2 100644 --- a/docs/reference/upgrade/set-paths-tip.asciidoc +++ b/docs/reference/upgrade/set-paths-tip.asciidoc @@ -15,4 +15,4 @@ The <> and <> packages place these directories in the appropriate place for each operating system. In production, we recommend installing using the deb or rpm package. -================================================ \ No newline at end of file +================================================ diff --git a/docs/resiliency/index.asciidoc b/docs/resiliency/index.asciidoc index aac8c192372..8157e51e5e0 100644 --- a/docs/resiliency/index.asciidoc +++ b/docs/resiliency/index.asciidoc @@ -459,7 +459,7 @@ Upgrading indices create with Lucene 3.x (Elasticsearch v0.20 and before) to Luc [float] === Improve error handling when deleting files (STATUS: DONE, v1.4.0.Beta1) -Lucene uses reference counting to prevent files that are still in use from being deleted. Lucene testing discovered a bug ({JIRA}5919[LUCENE-5919]) when decrementing the ref count on a batch of files. If deleting some of the files resulted in an exception (e.g. due to interference from a virus scanner), the files that had had their ref counts decremented successfully could later have their ref counts deleted again, incorrectly, resulting in files being physically deleted before their time. This is fixed in Lucene 4.10. +Lucene uses reference counting to prevent files that are still in use from being deleted. Lucene testing discovered a bug ({JIRA}5919[LUCENE-5919]) when decrementing the ref count on a batch of files. If deleting some of the files resulted in an exception (e.g. due to interference from a virus scanner), the files that had their ref counts decremented successfully could later have their ref counts deleted again, incorrectly, resulting in files being physically deleted before their time. This is fixed in Lucene 4.10. [float] === Using Lucene Checksums to verify shards during snapshot/restore (STATUS:DONE, v1.3.3) diff --git a/docs/src/test/cluster/config/scripts/calculate_score.painless b/docs/src/test/cluster/config/scripts/calculate_score.painless deleted file mode 100644 index 0fad3fc59f9..00000000000 --- a/docs/src/test/cluster/config/scripts/calculate_score.painless +++ /dev/null @@ -1 +0,0 @@ -Math.log(_score * 2) + params.my_modifier diff --git a/docs/src/test/cluster/config/scripts/my_combine_script.painless b/docs/src/test/cluster/config/scripts/my_combine_script.painless deleted file mode 100644 index 106ef08d91f..00000000000 --- a/docs/src/test/cluster/config/scripts/my_combine_script.painless +++ /dev/null @@ -1,5 +0,0 @@ -double profit = 0; -for (t in params._agg.transactions) { - profit += t -} -return profit diff --git a/docs/src/test/cluster/config/scripts/my_init_script.painless b/docs/src/test/cluster/config/scripts/my_init_script.painless deleted file mode 100644 index fb6aa11723c..00000000000 --- a/docs/src/test/cluster/config/scripts/my_init_script.painless +++ /dev/null @@ -1 +0,0 @@ -params._agg.transactions = [] diff --git a/docs/src/test/cluster/config/scripts/my_map_script.painless b/docs/src/test/cluster/config/scripts/my_map_script.painless deleted file mode 100644 index f4700482d55..00000000000 --- a/docs/src/test/cluster/config/scripts/my_map_script.painless +++ /dev/null @@ -1 +0,0 @@ -params._agg.transactions.add(doc.type.value == 'sale' ? doc.amount.value : -1 * doc.amount.value) diff --git a/docs/src/test/cluster/config/scripts/my_reduce_script.painless b/docs/src/test/cluster/config/scripts/my_reduce_script.painless deleted file mode 100644 index ca4f67ca2db..00000000000 --- a/docs/src/test/cluster/config/scripts/my_reduce_script.painless +++ /dev/null @@ -1,5 +0,0 @@ -double profit = 0; -for (a in params._aggs) { - profit += a -} -return profit diff --git a/docs/src/test/cluster/config/scripts/my_script.painless b/docs/src/test/cluster/config/scripts/my_script.painless deleted file mode 100644 index 55d0e99ed0f..00000000000 --- a/docs/src/test/cluster/config/scripts/my_script.painless +++ /dev/null @@ -1,2 +0,0 @@ -// Simple script to load a field. Not really a good example, but a simple one. -doc[params.field].value diff --git a/gradle.properties b/gradle.properties index 08b03629ad5..6b04e99c204 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,2 +1,3 @@ org.gradle.daemon=false org.gradle.jvmargs=-Xmx2g +options.forkOptions.memoryMaximumSize=2g diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 0d4a9516871..28861d273a5 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 94161917d18..76d8f343e75 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-4.9-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-4.10-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=39e2d5803bbd5eaf6c8efe07067b0e5a00235e8c71318642b2ed262920b27721 +distributionSha256Sum=fc049dcbcb245d5892bebae143bd515a78f6a5a93cec99d489b312dc0ce4aad9 diff --git a/libs/cli/build.gradle b/libs/cli/build.gradle index 00d6d96ef0d..b1f3b338255 100644 --- a/libs/cli/build.gradle +++ b/libs/cli/build.gradle @@ -16,9 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' apply plugin: 'nebula.optional-base' apply plugin: 'nebula.maven-base-publish' @@ -34,5 +31,5 @@ test.enabled = false jarHell.enabled = false forbiddenApisMain { - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } diff --git a/libs/core/build.gradle b/libs/core/build.gradle index 2017c2a418a..9c90837bd80 100644 --- a/libs/core/build.gradle +++ b/libs/core/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -48,12 +46,13 @@ if (!isEclipse && !isIdea) { targetCompatibility = 9 } - /* Enable this when forbiddenapis was updated to 2.6. - * See: https://github.com/elastic/elasticsearch/issues/29292 forbiddenApisJava9 { - targetCompatibility = 9 + if (project.runtimeJavaVersion < JavaVersion.VERSION_1_9) { + targetCompatibility = JavaVersion.VERSION_1_9 + javaHome = project.java9Home + } + replaceSignatureFiles 'jdk-signatures' } - */ jar { metaInf { @@ -91,7 +90,7 @@ dependencies { forbiddenApisMain { // :libs:core does not depend on server // TODO: Need to decide how we want to handle for forbidden signatures with the changes to server - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } if (isEclipse) { diff --git a/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java b/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java index e171daeb79b..3de0ae5117e 100644 --- a/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java +++ b/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java @@ -255,6 +255,10 @@ public class JarHell { } private static void checkClass(Map clazzes, String clazz, Path jarpath) { + if (clazz.equals("module-info") || clazz.endsWith(".module-info")) { + // Ignore jigsaw module descriptions + return; + } Path previous = clazzes.put(clazz, jarpath); if (previous != null) { if (previous.equals(jarpath)) { diff --git a/libs/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java b/libs/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java index e58268ef192..95c56f94ee4 100644 --- a/libs/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java +++ b/libs/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java @@ -76,6 +76,28 @@ public class JarHellTests extends ESTestCase { } } + public void testModuleInfo() throws Exception { + Path dir = createTempDir(); + JarHell.checkJarHell( + asSet( + makeJar(dir, "foo.jar", null, "module-info.class"), + makeJar(dir, "bar.jar", null, "module-info.class") + ), + logger::debug + ); + } + + public void testModuleInfoPackage() throws Exception { + Path dir = createTempDir(); + JarHell.checkJarHell( + asSet( + makeJar(dir, "foo.jar", null, "foo/bar/module-info.class"), + makeJar(dir, "bar.jar", null, "foo/bar/module-info.class") + ), + logger::debug + ); + } + public void testDirsOnClasspath() throws Exception { Path dir1 = createTempDir(); Path dir2 = createTempDir(); diff --git a/libs/dissect/build.gradle b/libs/dissect/build.gradle index c09a2a4ebd1..853c78646c2 100644 --- a/libs/dissect/build.gradle +++ b/libs/dissect/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -33,7 +31,7 @@ dependencies { } forbiddenApisMain { - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } if (isEclipse) { diff --git a/libs/grok/build.gradle b/libs/grok/build.gradle index 61437be6aff..37b494624ed 100644 --- a/libs/grok/build.gradle +++ b/libs/grok/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -34,7 +32,7 @@ dependencies { } forbiddenApisMain { - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } if (isEclipse) { diff --git a/libs/nio/build.gradle b/libs/nio/build.gradle index 43c9a133a3f..f6a6ff65245 100644 --- a/libs/nio/build.gradle +++ b/libs/nio/build.gradle @@ -16,9 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' @@ -62,5 +59,5 @@ if (isEclipse) { forbiddenApisMain { // nio does not depend on core, so only jdk signatures should be checked // es-all is not checked as we connect and accept sockets - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } diff --git a/libs/secure-sm/build.gradle b/libs/secure-sm/build.gradle index 93fdfd01c8f..3baf3513b12 100644 --- a/libs/secure-sm/build.gradle +++ b/libs/secure-sm/build.gradle @@ -16,9 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' @@ -47,7 +44,7 @@ dependencies { } forbiddenApisMain { - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } if (isEclipse) { diff --git a/libs/x-content/build.gradle b/libs/x-content/build.gradle index c8b37108ff9..0ec4e0d6ad3 100644 --- a/libs/x-content/build.gradle +++ b/libs/x-content/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -57,7 +55,7 @@ dependencies { forbiddenApisMain { // x-content does not depend on server // TODO: Need to decide how we want to handle for forbidden signatures with the changes to core - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } if (isEclipse) { diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregator.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregator.java index aa19f62fedc..714e7759c54 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregator.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregator.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.matrix.stats; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArray; @@ -61,8 +62,8 @@ final class MatrixStatsAggregator extends MetricsAggregator { } @Override - public boolean needsScores() { - return (valuesSources == null) ? false : valuesSources.needsScores(); + public ScoreMode scoreMode() { + return (valuesSources != null && valuesSources.needsScores()) ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } @Override diff --git a/modules/analysis-common/build.gradle b/modules/analysis-common/build.gradle index 391b74934c9..e5193ab3c84 100644 --- a/modules/analysis-common/build.gradle +++ b/modules/analysis-common/build.gradle @@ -20,4 +20,13 @@ esplugin { description 'Adds "built in" analyzers to Elasticsearch.' classname 'org.elasticsearch.analysis.common.CommonAnalysisPlugin' + extendedPlugins = ['lang-painless'] +} + +dependencies { + compileOnly project(':modules:lang-painless') +} + +integTestCluster { + module project(':modules:lang-painless') } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/AnalysisPainlessExtension.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/AnalysisPainlessExtension.java new file mode 100644 index 00000000000..85abec4ce91 --- /dev/null +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/AnalysisPainlessExtension.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.elasticsearch.painless.spi.PainlessExtension; +import org.elasticsearch.painless.spi.Whitelist; +import org.elasticsearch.painless.spi.WhitelistLoader; +import org.elasticsearch.script.ScriptContext; + +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class AnalysisPainlessExtension implements PainlessExtension { + + private static final Whitelist WHITELIST = + WhitelistLoader.loadFromResourceFiles(AnalysisPainlessExtension.class, "painless_whitelist.txt"); + + @Override + public Map, List> getContextWhitelists() { + return Collections.singletonMap(AnalysisPredicateScript.CONTEXT, Collections.singletonList(WHITELIST)); + } +} diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/AnalysisPredicateScript.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/AnalysisPredicateScript.java new file mode 100644 index 00000000000..7de588a958c --- /dev/null +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/AnalysisPredicateScript.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.elasticsearch.script.ScriptContext; + +/** + * A predicate based on the current token in a TokenStream + */ +public abstract class AnalysisPredicateScript { + + /** + * Encapsulation of the state of the current token + */ + public static class Token { + public CharSequence term; + public int pos; + public int posInc; + public int posLen; + public int startOffset; + public int endOffset; + public String type; + public boolean isKeyword; + + public CharSequence getTerm() { + return term; + } + + public int getPositionIncrement() { + return posInc; + } + + public int getPosition() { + return pos; + } + + public int getPositionLength() { + return posLen; + } + + public int getStartOffset() { + return startOffset; + } + + public int getEndOffset() { + return endOffset; + } + + public String getType() { + return type; + } + + public boolean isKeyword() { + return isKeyword; + } + } + + /** + * Returns {@code true} if the current term matches the predicate + */ + public abstract boolean execute(Token token); + + public interface Factory { + AnalysisPredicateScript newInstance(); + } + + public static final String[] PARAMETERS = new String[]{ "token" }; + public static final ScriptContext CONTEXT = new ScriptContext<>("analysis", Factory.class); + +} diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ChineseAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ChineseAnalyzerProvider.java index 01b529188c6..2259560bcbc 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ChineseAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ChineseAnalyzerProvider.java @@ -19,6 +19,7 @@ package org.elasticsearch.analysis.common; +import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -35,7 +36,7 @@ public class ChineseAnalyzerProvider extends AbstractIndexAnalyzerProvider scriptService = new SetOnce<>(); + + @Override + public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, ScriptService scriptService, + NamedXContentRegistry xContentRegistry, Environment environment, + NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { + this.scriptService.set(scriptService); + return Collections.emptyList(); + } + + @Override + public List> getContexts() { + return Collections.singletonList(AnalysisPredicateScript.CONTEXT); + } + @Override public Map>> getAnalyzers() { Map>> analyzers = new TreeMap<>(); @@ -202,6 +231,8 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin { filters.put("classic", ClassicFilterFactory::new); filters.put("czech_stem", CzechStemTokenFilterFactory::new); filters.put("common_grams", requiresAnalysisSettings(CommonGramsTokenFilterFactory::new)); + filters.put("condition", + requiresAnalysisSettings((i, e, n, s) -> new ScriptedConditionTokenFilterFactory(i, n, s, scriptService.get()))); filters.put("decimal_digit", DecimalDigitFilterFactory::new); filters.put("delimited_payload_filter", LegacyDelimitedPayloadTokenFilterFactory::new); filters.put("delimited_payload", DelimitedPayloadTokenFilterFactory::new); @@ -293,7 +324,7 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin { () -> new PatternAnalyzer(Regex.compile("\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/, null), true, CharArraySet.EMPTY_SET))); analyzers.add(new PreBuiltAnalyzerProviderFactory("snowball", CachingStrategy.LUCENE, - () -> new SnowballAnalyzer("English", StopAnalyzer.ENGLISH_STOP_WORDS_SET))); + () -> new SnowballAnalyzer("English", EnglishAnalyzer.ENGLISH_STOP_WORDS_SET))); // Language analyzers: analyzers.add(new PreBuiltAnalyzerProviderFactory("arabic", CachingStrategy.LUCENE, ArabicAnalyzer::new)); @@ -304,7 +335,8 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin { analyzers.add(new PreBuiltAnalyzerProviderFactory("bulgarian", CachingStrategy.LUCENE, BulgarianAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("catalan", CachingStrategy.LUCENE, CatalanAnalyzer::new)); // chinese analyzer: only for old indices, best effort - analyzers.add(new PreBuiltAnalyzerProviderFactory("chinese", CachingStrategy.ONE, StandardAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("chinese", CachingStrategy.ONE, + () -> new StandardAnalyzer(EnglishAnalyzer.ENGLISH_STOP_WORDS_SET))); analyzers.add(new PreBuiltAnalyzerProviderFactory("cjk", CachingStrategy.LUCENE, CJKAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("czech", CachingStrategy.LUCENE, CzechAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("danish", CachingStrategy.LUCENE, DanishAnalyzer::new)); @@ -376,14 +408,14 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin { DelimitedPayloadTokenFilterFactory.DEFAULT_ENCODER))); filters.add(PreConfiguredTokenFilter.singleton("dutch_stem", false, input -> new SnowballFilter(input, new DutchStemmer()))); filters.add(PreConfiguredTokenFilter.singleton("edge_ngram", false, input -> - new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE))); + new EdgeNGramTokenFilter(input, 1))); filters.add(PreConfiguredTokenFilter.singletonWithVersion("edgeNGram", false, (reader, version) -> { if (version.onOrAfter(org.elasticsearch.Version.V_6_4_0)) { DEPRECATION_LOGGER.deprecatedAndMaybeLog("edgeNGram_deprecation", "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " + "Please change the filter name to [edge_ngram] instead."); } - return new EdgeNGramTokenFilter(reader, EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE); + return new EdgeNGramTokenFilter(reader, 1); })); filters.add(PreConfiguredTokenFilter.singleton("elision", true, input -> new ElisionFilter(input, FrenchAnalyzer.DEFAULT_ARTICLES))); @@ -400,14 +432,14 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin { new LimitTokenCountFilter(input, LimitTokenCountFilterFactory.DEFAULT_MAX_TOKEN_COUNT, LimitTokenCountFilterFactory.DEFAULT_CONSUME_ALL_TOKENS))); - filters.add(PreConfiguredTokenFilter.singleton("ngram", false, NGramTokenFilter::new)); + filters.add(PreConfiguredTokenFilter.singleton("ngram", false, reader -> new NGramTokenFilter(reader, 1, 2, false))); filters.add(PreConfiguredTokenFilter.singletonWithVersion("nGram", false, (reader, version) -> { if (version.onOrAfter(org.elasticsearch.Version.V_6_4_0)) { DEPRECATION_LOGGER.deprecatedAndMaybeLog("nGram_deprecation", "The [nGram] token filter name is deprecated and will be removed in a future version. " + "Please change the filter name to [ngram] instead."); } - return new NGramTokenFilter(reader); + return new NGramTokenFilter(reader, 1, 2, false); })); filters.add(PreConfiguredTokenFilter.singleton("persian_normalization", true, PersianNormalizationFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("porter_stem", false, PorterStemFilter::new)); @@ -430,7 +462,8 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin { filters.add(PreConfiguredTokenFilter.singleton("sorani_normalization", true, SoraniNormalizationFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("stemmer", false, PorterStemFilter::new)); // The stop filter is in lucene-core but the English stop words set is in lucene-analyzers-common - filters.add(PreConfiguredTokenFilter.singleton("stop", false, input -> new StopFilter(input, StopAnalyzer.ENGLISH_STOP_WORDS_SET))); + filters.add(PreConfiguredTokenFilter.singleton("stop", false, + input -> new StopFilter(input, EnglishAnalyzer.ENGLISH_STOP_WORDS_SET))); filters.add(PreConfiguredTokenFilter.singleton("trim", true, TrimFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("truncate", false, input -> new TruncateTokenFilter(input, 10))); filters.add(PreConfiguredTokenFilter.singleton("type_as_payload", false, TypeAsPayloadTokenFilter::new)); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/EdgeNGramTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/EdgeNGramTokenFilterFactory.java index af6d30a0354..6bcd2b737fe 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/EdgeNGramTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/EdgeNGramTokenFilterFactory.java @@ -21,7 +21,6 @@ package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter; -import org.apache.lucene.analysis.ngram.NGramTokenFilter; import org.apache.lucene.analysis.reverse.ReverseStringFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -41,8 +40,8 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory { EdgeNGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); - this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE); - this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE); + this.minGram = settings.getAsInt("min_gram", 1); + this.maxGram = settings.getAsInt("max_gram", 2); this.side = parseSide(settings.get("side", "front")); } @@ -63,7 +62,8 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory { result = new ReverseStringFilter(result); } - result = new EdgeNGramTokenFilter(result, minGram, maxGram); + // TODO: Expose preserveOriginal + result = new EdgeNGramTokenFilter(result, minGram, maxGram, false); // side=BACK is not supported anymore but applying ReverseStringFilter up-front and after the token filter has the same effect if (side == SIDE_BACK) { diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenFilterFactory.java index 22b06061316..8d99ec1d1a1 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenFilterFactory.java @@ -39,8 +39,8 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory { NGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); int maxAllowedNgramDiff = indexSettings.getMaxNgramDiff(); - this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE); - this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE); + this.minGram = settings.getAsInt("min_gram", 1); + this.maxGram = settings.getAsInt("max_gram", 2); int ngramDiff = maxGram - minGram; if (ngramDiff > maxAllowedNgramDiff) { if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) { @@ -57,6 +57,7 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory { @Override public TokenStream create(TokenStream tokenStream) { - return new NGramTokenFilter(tokenStream, minGram, maxGram); + // TODO: Expose preserveOriginal + return new NGramTokenFilter(tokenStream, minGram, maxGram, false); } } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterFactory.java new file mode 100644 index 00000000000..cf7fd5b047a --- /dev/null +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterFactory.java @@ -0,0 +1,117 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.miscellaneous.ConditionalTokenFilter; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.KeywordAttribute; +import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; +import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; +import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute; +import org.apache.lucene.analysis.tokenattributes.TypeAttribute; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; +import org.elasticsearch.index.analysis.ReferringFilterFactory; +import org.elasticsearch.index.analysis.TokenFilterFactory; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +/** + * A factory for a conditional token filter that only applies child filters if the underlying token + * matches an {@link AnalysisPredicateScript} + */ +public class ScriptedConditionTokenFilterFactory extends AbstractTokenFilterFactory implements ReferringFilterFactory { + + private final AnalysisPredicateScript.Factory factory; + private final List filters = new ArrayList<>(); + private final List filterNames; + + ScriptedConditionTokenFilterFactory(IndexSettings indexSettings, String name, + Settings settings, ScriptService scriptService) { + super(indexSettings, name, settings); + + Settings scriptSettings = settings.getAsSettings("script"); + Script script = Script.parse(scriptSettings); + if (script.getType() != ScriptType.INLINE) { + throw new IllegalArgumentException("Cannot use stored scripts in tokenfilter [" + name + "]"); + } + this.factory = scriptService.compile(script, AnalysisPredicateScript.CONTEXT); + + this.filterNames = settings.getAsList("filter"); + if (this.filterNames.isEmpty()) { + throw new IllegalArgumentException("Empty list of filters provided to tokenfilter [" + name + "]"); + } + } + + @Override + public TokenStream create(TokenStream tokenStream) { + Function filter = in -> { + for (TokenFilterFactory tff : filters) { + in = tff.create(in); + } + return in; + }; + AnalysisPredicateScript script = factory.newInstance(); + final AnalysisPredicateScript.Token token = new AnalysisPredicateScript.Token(); + return new ConditionalTokenFilter(tokenStream, filter) { + + CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); + PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class); + PositionLengthAttribute posLenAtt = addAttribute(PositionLengthAttribute.class); + OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); + TypeAttribute typeAtt = addAttribute(TypeAttribute.class); + KeywordAttribute keywordAtt = addAttribute(KeywordAttribute.class); + + @Override + protected boolean shouldFilter() { + token.term = termAtt; + token.posInc = posIncAtt.getPositionIncrement(); + token.pos += token.posInc; + token.posLen = posLenAtt.getPositionLength(); + token.startOffset = offsetAtt.startOffset(); + token.endOffset = offsetAtt.endOffset(); + token.type = typeAtt.type(); + token.isKeyword = keywordAtt.isKeyword(); + return script.execute(token); + } + }; + } + + @Override + public void setReferences(Map factories) { + for (String filter : filterNames) { + TokenFilterFactory tff = factories.get(filter); + if (tff == null) { + throw new IllegalArgumentException("ScriptedConditionTokenFilter [" + name() + + "] refers to undefined token filter [" + filter + "]"); + } + filters.add(tff); + } + } + +} diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzer.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzer.java index bc4b9a763fd..74e6bbcc65c 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzer.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzer.java @@ -27,11 +27,10 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.en.EnglishPossessiveFilter; import org.apache.lucene.analysis.snowball.SnowballFilter; -import org.apache.lucene.analysis.standard.StandardFilter; import org.apache.lucene.analysis.standard.StandardTokenizer; import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter; -/** Filters {@link StandardTokenizer} with {@link StandardFilter}, {@link +/** Filters {@link StandardTokenizer} with {@link * LowerCaseFilter}, {@link StopFilter} and {@link SnowballFilter}. * * Available stemmers are listed in org.tartarus.snowball.ext. The name of a @@ -57,8 +56,7 @@ public final class SnowballAnalyzer extends Analyzer { stopSet = CharArraySet.unmodifiableSet(CharArraySet.copy(stopWords)); } - /** Constructs a {@link StandardTokenizer} filtered by a {@link - StandardFilter}, a {@link LowerCaseFilter}, a {@link StopFilter}, + /** Constructs a {@link StandardTokenizer} filtered by a {@link LowerCaseFilter}, a {@link StopFilter}, and a {@link SnowballFilter} */ @Override public TokenStreamComponents createComponents(String fieldName) { diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzerProvider.java index 0f213df9ad7..6eec01570a8 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzerProvider.java @@ -19,8 +19,8 @@ package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; -import org.apache.lucene.analysis.core.StopAnalyzer; import org.apache.lucene.analysis.de.GermanAnalyzer; +import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.apache.lucene.analysis.fr.FrenchAnalyzer; import org.apache.lucene.analysis.nl.DutchAnalyzer; import org.elasticsearch.common.settings.Settings; @@ -42,7 +42,7 @@ import static java.util.Collections.unmodifiableMap; * Configuration of language is done with the "language" attribute or the analyzer. * Also supports additional stopwords via "stopwords" attribute *

- * The SnowballAnalyzer comes with a StandardFilter, LowerCaseFilter, StopFilter + * The SnowballAnalyzer comes with a LowerCaseFilter, StopFilter * and the SnowballFilter. * * @@ -52,7 +52,7 @@ public class SnowballAnalyzerProvider extends AbstractIndexAnalyzerProvider defaultLanguageStopwords = new HashMap<>(); - defaultLanguageStopwords.put("English", StopAnalyzer.ENGLISH_STOP_WORDS_SET); + defaultLanguageStopwords.put("English", EnglishAnalyzer.ENGLISH_STOP_WORDS_SET); defaultLanguageStopwords.put("Dutch", DutchAnalyzer.getDefaultStopSet()); defaultLanguageStopwords.put("German", GermanAnalyzer.getDefaultStopSet()); defaultLanguageStopwords.put("German2", GermanAnalyzer.getDefaultStopSet()); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StandardHtmlStripAnalyzer.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StandardHtmlStripAnalyzer.java index f0b2b7188e5..e2ee540fe3e 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StandardHtmlStripAnalyzer.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StandardHtmlStripAnalyzer.java @@ -25,8 +25,7 @@ import org.apache.lucene.analysis.StopFilter; import org.apache.lucene.analysis.StopwordAnalyzerBase; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.core.StopAnalyzer; -import org.apache.lucene.analysis.standard.StandardFilter; +import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.apache.lucene.analysis.standard.StandardTokenizer; public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase { @@ -36,7 +35,7 @@ public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase { */ @Deprecated public StandardHtmlStripAnalyzer() { - super(StopAnalyzer.ENGLISH_STOP_WORDS_SET); + super(EnglishAnalyzer.ENGLISH_STOP_WORDS_SET); } StandardHtmlStripAnalyzer(CharArraySet stopwords) { @@ -46,8 +45,7 @@ public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase { @Override protected TokenStreamComponents createComponents(final String fieldName) { final Tokenizer src = new StandardTokenizer(); - TokenStream tok = new StandardFilter(src); - tok = new LowerCaseFilter(tok); + TokenStream tok = new LowerCaseFilter(src); if (!stopwords.isEmpty()) { tok = new StopFilter(tok, stopwords); } diff --git a/modules/analysis-common/src/main/resources/META-INF/services/org.elasticsearch.painless.spi.PainlessExtension b/modules/analysis-common/src/main/resources/META-INF/services/org.elasticsearch.painless.spi.PainlessExtension new file mode 100644 index 00000000000..44e98a3dd9c --- /dev/null +++ b/modules/analysis-common/src/main/resources/META-INF/services/org.elasticsearch.painless.spi.PainlessExtension @@ -0,0 +1 @@ +org.elasticsearch.analysis.common.AnalysisPainlessExtension \ No newline at end of file diff --git a/modules/analysis-common/src/main/resources/org/elasticsearch/analysis/common/painless_whitelist.txt b/modules/analysis-common/src/main/resources/org/elasticsearch/analysis/common/painless_whitelist.txt new file mode 100644 index 00000000000..83b70be5877 --- /dev/null +++ b/modules/analysis-common/src/main/resources/org/elasticsearch/analysis/common/painless_whitelist.txt @@ -0,0 +1,28 @@ +# +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +class org.elasticsearch.analysis.common.AnalysisPredicateScript$Token { + CharSequence getTerm() + int getPosition() + int getPositionIncrement() + int getPositionLength() + int getStartOffset() + int getEndOffset() + String getType() + boolean isKeyword() +} \ No newline at end of file diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java index 1d2b8a36810..b5dc23fbdb8 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java @@ -64,7 +64,7 @@ public class CommonAnalysisPluginTests extends ESTestCase { public void testNGramNoDeprecationWarningPre6_4() throws IOException { Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put(IndexMetaData.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.V_6_3_0)) + VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_3_0)) .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); @@ -104,7 +104,7 @@ public class CommonAnalysisPluginTests extends ESTestCase { public void testEdgeNGramNoDeprecationWarningPre6_4() throws IOException { Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put(IndexMetaData.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.V_6_3_0)) + VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_3_0)) .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HtmlStripCharFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HtmlStripCharFilterFactoryTests.java index 0d5389a6d65..e2848779788 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HtmlStripCharFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HtmlStripCharFilterFactoryTests.java @@ -60,7 +60,7 @@ public class HtmlStripCharFilterFactoryTests extends ESTestCase { public void testNoDeprecationWarningPre6_3() throws IOException { Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put(IndexMetaData.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.V_6_2_4)) + VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_2_4)) .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PatternAnalyzerTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PatternAnalyzerTests.java index d2d226d6250..29122d72921 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PatternAnalyzerTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PatternAnalyzerTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.analysis.common; */ import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.core.StopAnalyzer; +import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.apache.lucene.util.BytesRef; import org.elasticsearch.test.ESTokenStreamTestCase; @@ -44,7 +44,7 @@ public class PatternAnalyzerTests extends ESTokenStreamTestCase { // split on non-letter pattern, lowercase, english stopwords PatternAnalyzer b = new PatternAnalyzer(Pattern.compile("\\W+"), true, - StopAnalyzer.ENGLISH_STOP_WORDS_SET); + EnglishAnalyzer.ENGLISH_STOP_WORDS_SET); assertAnalyzesTo(b, "The quick brown Fox,the abcd1234 (56.78) dc.", new String[] { "quick", "brown", "fox", "abcd1234", "56", "78", "dc" }); } @@ -61,7 +61,7 @@ public class PatternAnalyzerTests extends ESTokenStreamTestCase { // Split on whitespace patterns, lowercase, english stopwords PatternAnalyzer b = new PatternAnalyzer(Pattern.compile("\\s+"), true, - StopAnalyzer.ENGLISH_STOP_WORDS_SET); + EnglishAnalyzer.ENGLISH_STOP_WORDS_SET); assertAnalyzesTo(b, "The quick brown Fox,the abcd1234 (56.78) dc.", new String[] { "quick", "brown", "fox,the", "abcd1234", "(56.78)", "dc." }); } @@ -78,7 +78,7 @@ public class PatternAnalyzerTests extends ESTokenStreamTestCase { // split on comma, lowercase, english stopwords PatternAnalyzer b = new PatternAnalyzer(Pattern.compile(","), true, - StopAnalyzer.ENGLISH_STOP_WORDS_SET); + EnglishAnalyzer.ENGLISH_STOP_WORDS_SET); assertAnalyzesTo(b, "Here,Are,some,Comma,separated,words,", new String[] { "here", "some", "comma", "separated", "words" }); } @@ -109,7 +109,7 @@ public class PatternAnalyzerTests extends ESTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - Analyzer a = new PatternAnalyzer(Pattern.compile(","), true, StopAnalyzer.ENGLISH_STOP_WORDS_SET); + Analyzer a = new PatternAnalyzer(Pattern.compile(","), true, EnglishAnalyzer.ENGLISH_STOP_WORDS_SET); checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterTests.java new file mode 100644 index 00000000000..39134ef1f53 --- /dev/null +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterTests.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.IndexAnalyzers; +import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.test.ESTokenStreamTestCase; +import org.elasticsearch.test.IndexSettingsModule; + +import java.util.Collections; + +public class ScriptedConditionTokenFilterTests extends ESTokenStreamTestCase { + + public void testSimpleCondition() throws Exception { + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put("index.analysis.filter.cond.type", "condition") + .put("index.analysis.filter.cond.script.source", "token.getTerm().length() > 5") + .putList("index.analysis.filter.cond.filter", "uppercase") + .put("index.analysis.analyzer.myAnalyzer.type", "custom") + .put("index.analysis.analyzer.myAnalyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.myAnalyzer.filter", "cond") + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); + + AnalysisPredicateScript.Factory factory = () -> new AnalysisPredicateScript() { + @Override + public boolean execute(Token token) { + return token.getTerm().length() > 5; + } + }; + + @SuppressWarnings("unchecked") + ScriptService scriptService = new ScriptService(indexSettings, Collections.emptyMap(), Collections.emptyMap()){ + @Override + public FactoryType compile(Script script, ScriptContext context) { + assertEquals(context, AnalysisPredicateScript.CONTEXT); + assertEquals(new Script("token.getTerm().length() > 5"), script); + return (FactoryType) factory; + } + }; + + CommonAnalysisPlugin plugin = new CommonAnalysisPlugin(); + plugin.createComponents(null, null, null, null, scriptService, null, null, null, null); + AnalysisModule module + = new AnalysisModule(TestEnvironment.newEnvironment(settings), Collections.singletonList(plugin)); + + IndexAnalyzers analyzers = module.getAnalysisRegistry().build(idxSettings); + + try (NamedAnalyzer analyzer = analyzers.get("myAnalyzer")) { + assertNotNull(analyzer); + assertAnalyzesTo(analyzer, "Vorsprung Durch Technik", new String[]{ + "VORSPRUNG", "Durch", "TECHNIK" + }); + } + + } + +} diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SnowballAnalyzerTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SnowballAnalyzerTests.java index 0b9998eda31..360d17ef0f4 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SnowballAnalyzerTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SnowballAnalyzerTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.analysis.common; */ import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.elasticsearch.test.ESTokenStreamTestCase; public class SnowballAnalyzerTests extends ESTokenStreamTestCase { @@ -33,7 +33,7 @@ public class SnowballAnalyzerTests extends ESTokenStreamTestCase { public void testStopwords() throws Exception { Analyzer a = new SnowballAnalyzer("English", - StandardAnalyzer.STOP_WORDS_SET); + EnglishAnalyzer.ENGLISH_STOP_WORDS_SET); assertAnalyzesTo(a, "the quick brown fox jumped", new String[]{"quick", "brown", "fox", "jump"}); } diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/60_analysis_scripting.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/60_analysis_scripting.yml new file mode 100644 index 00000000000..4305e5db0af --- /dev/null +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/60_analysis_scripting.yml @@ -0,0 +1,36 @@ +## Test analysis scripts + +"condition": + - do: + indices.analyze: + body: + text: "Vorsprung Durch Technik" + tokenizer: "whitespace" + filter: + - type: condition + filter: [ "lowercase" ] + script: + source: "token.term.length() > 5" + + - length: { tokens: 3 } + - match: { tokens.0.token: "vorsprung" } + - match: { tokens.1.token: "Durch" } + - match: { tokens.2.token: "technik" } + +--- +"condition-vars": + - do: + indices.analyze: + body: + text: "Vorsprung Durch Technik" + tokenizer: "whitespace" + filter: + - type: condition + filter: [ "lowercase" ] + script: + source: "token.position > 1 && token.positionIncrement > 0 && token.startOffset > 0 && token.endOffset > 0 && (token.positionLength == 1 || token.type == \"a\" || token.keyword)" + + - length: { tokens: 3 } + - match: { tokens.0.token: "Vorsprung" } + - match: { tokens.1.token: "durch" } + - match: { tokens.2.token: "technik" } diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle index 4f35bbee28d..1681258e7c7 100644 --- a/modules/ingest-common/build.gradle +++ b/modules/ingest-common/build.gradle @@ -26,6 +26,7 @@ esplugin { dependencies { compileOnly project(':modules:lang-painless') compile project(':libs:grok') + compile project(':libs:dissect') } compileJava.options.compilerArgs << "-Xlint:-unchecked,-rawtypes" diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java index 23c98ca1e0c..792e5e4ebed 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java @@ -57,16 +57,17 @@ abstract class AbstractStringProcessor extends AbstractProcessor { } @Override - public final void execute(IngestDocument document) { + public final IngestDocument execute(IngestDocument document) { String val = document.getFieldValue(field, String.class, ignoreMissing); if (val == null && ignoreMissing) { - return; + return document; } else if (val == null) { throw new IllegalArgumentException("field [" + field + "] is null, cannot process it."); } document.setFieldValue(targetField, process(val)); + return document; } protected abstract T process(String value); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java index 0543ae8591f..058d1bf22d8 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java @@ -56,8 +56,9 @@ public final class AppendProcessor extends AbstractProcessor { } @Override - public void execute(IngestDocument ingestDocument) throws Exception { + public IngestDocument execute(IngestDocument ingestDocument) throws Exception { ingestDocument.appendFieldValue(field, value); + return ingestDocument; } @Override diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ConvertProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ConvertProcessor.java index 2e881b82b59..aca48efe6c1 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ConvertProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ConvertProcessor.java @@ -173,12 +173,12 @@ public final class ConvertProcessor extends AbstractProcessor { } @Override - public void execute(IngestDocument document) { + public IngestDocument execute(IngestDocument document) { Object oldValue = document.getFieldValue(field, Object.class, ignoreMissing); Object newValue; if (oldValue == null && ignoreMissing) { - return; + return document; } else if (oldValue == null) { throw new IllegalArgumentException("Field [" + field + "] is null, cannot be converted to type [" + convertType + "]"); } @@ -194,6 +194,7 @@ public final class ConvertProcessor extends AbstractProcessor { newValue = convertType.convert(oldValue); } document.setFieldValue(targetField, newValue); + return document; } @Override diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateIndexNameProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateIndexNameProcessor.java index 0d6253c88f9..4a88f15b641 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateIndexNameProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateIndexNameProcessor.java @@ -63,7 +63,7 @@ public final class DateIndexNameProcessor extends AbstractProcessor { } @Override - public void execute(IngestDocument ingestDocument) throws Exception { + public IngestDocument execute(IngestDocument ingestDocument) throws Exception { // Date can be specified as a string or long: Object obj = ingestDocument.getFieldValue(field, Object.class); String date = null; @@ -101,6 +101,7 @@ public final class DateIndexNameProcessor extends AbstractProcessor { .append('>'); String dynamicIndexName = builder.toString(); ingestDocument.setFieldValue(IngestDocument.MetaData.INDEX.getFieldName(), dynamicIndexName); + return ingestDocument; } @Override diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java index 4a9654f8cd0..dd6e6006eeb 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java @@ -74,7 +74,7 @@ public final class DateProcessor extends AbstractProcessor { } @Override - public void execute(IngestDocument ingestDocument) { + public IngestDocument execute(IngestDocument ingestDocument) { Object obj = ingestDocument.getFieldValue(field, Object.class); String value = null; if (obj != null) { @@ -98,6 +98,7 @@ public final class DateProcessor extends AbstractProcessor { } ingestDocument.setFieldValue(targetField, ISODateTimeFormat.dateTime().print(dateTime)); + return ingestDocument; } @Override diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DissectProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DissectProcessor.java new file mode 100644 index 00000000000..fa51d047e73 --- /dev/null +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DissectProcessor.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.common; + +import org.elasticsearch.dissect.DissectParser; +import org.elasticsearch.ingest.AbstractProcessor; +import org.elasticsearch.ingest.ConfigurationUtils; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; + +import java.util.Map; + +public final class DissectProcessor extends AbstractProcessor { + + public static final String TYPE = "dissect"; + //package private members for testing + final String field; + final boolean ignoreMissing; + final String pattern; + final String appendSeparator; + final DissectParser dissectParser; + + DissectProcessor(String tag, String field, String pattern, String appendSeparator, boolean ignoreMissing) { + super(tag); + this.field = field; + this.ignoreMissing = ignoreMissing; + this.pattern = pattern; + this.appendSeparator = appendSeparator; + this.dissectParser = new DissectParser(pattern, appendSeparator); + } + + @Override + public IngestDocument execute(IngestDocument ingestDocument) { + String input = ingestDocument.getFieldValue(field, String.class, ignoreMissing); + if (input == null && ignoreMissing) { + return ingestDocument; + } else if (input == null) { + throw new IllegalArgumentException("field [" + field + "] is null, cannot process it."); + } + dissectParser.parse(input).forEach(ingestDocument::setFieldValue); + return ingestDocument; + } + + @Override + public String getType() { + return TYPE; + } + + public static final class Factory implements Processor.Factory { + + @Override + public DissectProcessor create(Map registry, String processorTag, Map config) { + String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); + String pattern = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "pattern"); + String appendSeparator = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "append_separator", ""); + boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); + return new DissectProcessor(processorTag, field, pattern, appendSeparator, ignoreMissing); + } + } +} diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java index bfc32311733..0698f6ed0a6 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java @@ -41,7 +41,7 @@ public final class DotExpanderProcessor extends AbstractProcessor { @Override @SuppressWarnings("unchecked") - public void execute(IngestDocument ingestDocument) throws Exception { + public IngestDocument execute(IngestDocument ingestDocument) throws Exception { String path; Map map; if (this.path != null) { @@ -75,6 +75,7 @@ public final class DotExpanderProcessor extends AbstractProcessor { Object value = map.remove(field); ingestDocument.setFieldValue(path, value); } + return ingestDocument; } @Override diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DropProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DropProcessor.java new file mode 100644 index 00000000000..a0eabe38979 --- /dev/null +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DropProcessor.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.common; + +import java.util.Map; +import org.elasticsearch.ingest.AbstractProcessor; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; + +/** + * Drop processor only returns {@code null} for the execution result to indicate that any document + * executed by it should not be indexed. + */ +public final class DropProcessor extends AbstractProcessor { + + public static final String TYPE = "drop"; + + private DropProcessor(final String tag) { + super(tag); + } + + @Override + public IngestDocument execute(final IngestDocument ingestDocument) throws Exception { + return null; + } + + @Override + public String getType() { + return TYPE; + } + + public static final class Factory implements Processor.Factory { + + @Override + public Processor create(final Map processorFactories, final String tag, + final Map config) { + return new DropProcessor(tag); + } + } +} diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/FailProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/FailProcessor.java index b1f946c10a2..0b62fbf72c8 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/FailProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/FailProcessor.java @@ -48,7 +48,7 @@ public final class FailProcessor extends AbstractProcessor { } @Override - public void execute(IngestDocument document) { + public IngestDocument execute(IngestDocument document) { throw new FailProcessorException(document.renderTemplate(message)); } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java index f5bf9cc9591..ad93298c646 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java @@ -28,6 +28,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; +import org.elasticsearch.script.ScriptService; import static org.elasticsearch.ingest.ConfigurationUtils.newConfigurationException; import static org.elasticsearch.ingest.ConfigurationUtils.readBooleanProperty; @@ -62,24 +63,29 @@ public final class ForEachProcessor extends AbstractProcessor { } @Override - public void execute(IngestDocument ingestDocument) throws Exception { + public IngestDocument execute(IngestDocument ingestDocument) throws Exception { List values = ingestDocument.getFieldValue(field, List.class, ignoreMissing); if (values == null) { if (ignoreMissing) { - return; + return ingestDocument; } throw new IllegalArgumentException("field [" + field + "] is null, cannot loop over its elements."); } List newValues = new ArrayList<>(values.size()); + IngestDocument document = ingestDocument; for (Object value : values) { Object previousValue = ingestDocument.getIngestMetadata().put("_value", value); try { - processor.execute(ingestDocument); + document = processor.execute(document); + if (document == null) { + return null; + } } finally { newValues.add(ingestDocument.getIngestMetadata().put("_value", previousValue)); } } - ingestDocument.setFieldValue(field, newValues); + document.setFieldValue(field, newValues); + return document; } @Override @@ -96,6 +102,13 @@ public final class ForEachProcessor extends AbstractProcessor { } public static final class Factory implements Processor.Factory { + + private final ScriptService scriptService; + + Factory(ScriptService scriptService) { + this.scriptService = scriptService; + } + @Override public ForEachProcessor create(Map factories, String tag, Map config) throws Exception { @@ -107,7 +120,8 @@ public final class ForEachProcessor extends AbstractProcessor { throw newConfigurationException(TYPE, tag, "processor", "Must specify exactly one processor type"); } Map.Entry> entry = entries.iterator().next(); - Processor processor = ConfigurationUtils.readProcessor(factories, entry.getKey(), entry.getValue()); + Processor processor = + ConfigurationUtils.readProcessor(factories, scriptService, entry.getKey(), entry.getValue()); return new ForEachProcessor(tag, field, processor, ignoreMissing); } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessor.java index 88cba512b86..19883053d2a 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessor.java @@ -54,11 +54,11 @@ public final class GrokProcessor extends AbstractProcessor { } @Override - public void execute(IngestDocument ingestDocument) throws Exception { + public IngestDocument execute(IngestDocument ingestDocument) throws Exception { String fieldValue = ingestDocument.getFieldValue(matchField, String.class, ignoreMissing); if (fieldValue == null && ignoreMissing) { - return; + return ingestDocument; } else if (fieldValue == null) { throw new IllegalArgumentException("field [" + matchField + "] is null, cannot process it."); } @@ -81,6 +81,7 @@ public final class GrokProcessor extends AbstractProcessor { ingestDocument.setFieldValue(PATTERN_MATCH_KEY, "0"); } } + return ingestDocument; } @Override diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java index bc475a2a005..d9dba2cc100 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java @@ -72,7 +72,7 @@ public class IngestCommonPlugin extends Plugin implements ActionPlugin, IngestPl processors.put(ConvertProcessor.TYPE, new ConvertProcessor.Factory()); processors.put(GsubProcessor.TYPE, new GsubProcessor.Factory()); processors.put(FailProcessor.TYPE, new FailProcessor.Factory(parameters.scriptService)); - processors.put(ForEachProcessor.TYPE, new ForEachProcessor.Factory()); + processors.put(ForEachProcessor.TYPE, new ForEachProcessor.Factory(parameters.scriptService)); processors.put(DateIndexNameProcessor.TYPE, new DateIndexNameProcessor.Factory(parameters.scriptService)); processors.put(SortProcessor.TYPE, new SortProcessor.Factory()); processors.put(GrokProcessor.TYPE, new GrokProcessor.Factory(GROK_PATTERNS, createGrokThreadWatchdog(parameters))); @@ -82,6 +82,9 @@ public class IngestCommonPlugin extends Plugin implements ActionPlugin, IngestPl processors.put(KeyValueProcessor.TYPE, new KeyValueProcessor.Factory()); processors.put(URLDecodeProcessor.TYPE, new URLDecodeProcessor.Factory()); processors.put(BytesProcessor.TYPE, new BytesProcessor.Factory()); + processors.put(PipelineProcessor.TYPE, new PipelineProcessor.Factory(parameters.ingestService)); + processors.put(DissectProcessor.TYPE, new DissectProcessor.Factory()); + processors.put(DropProcessor.TYPE, new DropProcessor.Factory()); return Collections.unmodifiableMap(processors); } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JoinProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JoinProcessor.java index 57216a71e02..f29a6888861 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JoinProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JoinProcessor.java @@ -60,7 +60,7 @@ public final class JoinProcessor extends AbstractProcessor { } @Override - public void execute(IngestDocument document) { + public IngestDocument execute(IngestDocument document) { List list = document.getFieldValue(field, List.class); if (list == null) { throw new IllegalArgumentException("field [" + field + "] is null, cannot join."); @@ -69,6 +69,7 @@ public final class JoinProcessor extends AbstractProcessor { .map(Object::toString) .collect(Collectors.joining(separator)); document.setFieldValue(targetField, joined); + return document; } @Override diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java index c0a9d37abda..90a648347cd 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java @@ -107,12 +107,13 @@ public final class JsonProcessor extends AbstractProcessor { } @Override - public void execute(IngestDocument document) throws Exception { + public IngestDocument execute(IngestDocument document) throws Exception { if (addToRoot) { apply(document.getSourceAndMetadata(), field); } else { document.setFieldValue(targetField, apply(document.getFieldValue(field, Object.class))); } + return document; } @Override diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/KeyValueProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/KeyValueProcessor.java index 9cce3cedf3d..69c7e9ff751 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/KeyValueProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/KeyValueProcessor.java @@ -188,8 +188,9 @@ public final class KeyValueProcessor extends AbstractProcessor { } @Override - public void execute(IngestDocument document) { + public IngestDocument execute(IngestDocument document) { execution.accept(document); + return document; } @Override diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/PipelineProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/PipelineProcessor.java new file mode 100644 index 00000000000..1958a3e5232 --- /dev/null +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/PipelineProcessor.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.common; + +import java.util.Map; +import org.elasticsearch.ingest.AbstractProcessor; +import org.elasticsearch.ingest.ConfigurationUtils; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.ingest.Pipeline; +import org.elasticsearch.ingest.Processor; + +public class PipelineProcessor extends AbstractProcessor { + + public static final String TYPE = "pipeline"; + + private final String pipelineName; + + private final IngestService ingestService; + + private PipelineProcessor(String tag, String pipelineName, IngestService ingestService) { + super(tag); + this.pipelineName = pipelineName; + this.ingestService = ingestService; + } + + @Override + public IngestDocument execute(IngestDocument ingestDocument) throws Exception { + Pipeline pipeline = ingestService.getPipeline(pipelineName); + if (pipeline == null) { + throw new IllegalStateException("Pipeline processor configured for non-existent pipeline [" + pipelineName + ']'); + } + return ingestDocument.executePipeline(pipeline); + } + + @Override + public String getType() { + return TYPE; + } + + public static final class Factory implements Processor.Factory { + + private final IngestService ingestService; + + public Factory(IngestService ingestService) { + this.ingestService = ingestService; + } + + @Override + public PipelineProcessor create(Map registry, String processorTag, + Map config) throws Exception { + String pipeline = + ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "pipeline"); + return new PipelineProcessor(processorTag, pipeline, ingestService); + } + } +} diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/Processors.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/Processors.java index 8a0b1529892..00209f55600 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/Processors.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/Processors.java @@ -46,4 +46,5 @@ public final class Processors { public static String urlDecode(String value) { return URLDecodeProcessor.apply(value); } + } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RemoveProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RemoveProcessor.java index 2b9eaa9a13d..6002abb9e67 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RemoveProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RemoveProcessor.java @@ -52,7 +52,7 @@ public final class RemoveProcessor extends AbstractProcessor { } @Override - public void execute(IngestDocument document) { + public IngestDocument execute(IngestDocument document) { if (ignoreMissing) { fields.forEach(field -> { String path = document.renderTemplate(field); @@ -63,6 +63,7 @@ public final class RemoveProcessor extends AbstractProcessor { } else { fields.forEach(document::removeField); } + return document; } @Override diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RenameProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RenameProcessor.java index a35a164ddd3..2abd920048f 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RenameProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RenameProcessor.java @@ -59,11 +59,11 @@ public final class RenameProcessor extends AbstractProcessor { } @Override - public void execute(IngestDocument document) { + public IngestDocument execute(IngestDocument document) { String path = document.renderTemplate(field); if (document.hasField(path, true) == false) { if (ignoreMissing) { - return; + return document; } else { throw new IllegalArgumentException("field [" + path + "] doesn't exist"); } @@ -86,6 +86,7 @@ public final class RenameProcessor extends AbstractProcessor { document.setFieldValue(path, value); throw e; } + return document; } @Override diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java index 169b2ab646a..12ef53cdcfc 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java @@ -69,9 +69,10 @@ public final class ScriptProcessor extends AbstractProcessor { * @param document The Ingest document passed into the script context under the "ctx" object. */ @Override - public void execute(IngestDocument document) { + public IngestDocument execute(IngestDocument document) { IngestScript.Factory factory = scriptService.compile(script, IngestScript.CONTEXT); factory.newInstance(script.getParams()).execute(document.getSourceAndMetadata()); + return document; } @Override diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SetProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SetProcessor.java index 7aefa288618..0af51e5b895 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SetProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SetProcessor.java @@ -65,10 +65,11 @@ public final class SetProcessor extends AbstractProcessor { } @Override - public void execute(IngestDocument document) { + public IngestDocument execute(IngestDocument document) { if (overrideEnabled || document.hasField(field) == false || document.getFieldValue(field, Object.class) == null) { document.setFieldValue(field, value); } + return document; } @Override diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SortProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SortProcessor.java index 7ff266efe6b..a29cc346524 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SortProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SortProcessor.java @@ -94,7 +94,7 @@ public final class SortProcessor extends AbstractProcessor { @Override @SuppressWarnings("unchecked") - public void execute(IngestDocument document) { + public IngestDocument execute(IngestDocument document) { List> list = document.getFieldValue(field, List.class); if (list == null) { @@ -110,6 +110,7 @@ public final class SortProcessor extends AbstractProcessor { } document.setFieldValue(targetField, copy); + return document; } @Override diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SplitProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SplitProcessor.java index cdd90f937fd..96a765b5ba7 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SplitProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SplitProcessor.java @@ -68,11 +68,11 @@ public final class SplitProcessor extends AbstractProcessor { } @Override - public void execute(IngestDocument document) { + public IngestDocument execute(IngestDocument document) { String oldVal = document.getFieldValue(field, String.class, ignoreMissing); if (oldVal == null && ignoreMissing) { - return; + return document; } else if (oldVal == null) { throw new IllegalArgumentException("field [" + field + "] is null, cannot split."); } @@ -81,6 +81,7 @@ public final class SplitProcessor extends AbstractProcessor { List splitList = new ArrayList<>(strings.length); Collections.addAll(splitList, strings); document.setFieldValue(targetField, splitList); + return document; } @Override diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DissectProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DissectProcessorFactoryTests.java new file mode 100644 index 00000000000..ba1b2bd1eb5 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DissectProcessorFactoryTests.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.common; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.dissect.DissectException; +import org.elasticsearch.ingest.RandomDocumentPicks; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.Matchers.is; + +public class DissectProcessorFactoryTests extends ESTestCase { + + public void testCreate() { + DissectProcessor.Factory factory = new DissectProcessor.Factory(); + String fieldName = RandomDocumentPicks.randomFieldName(random()); + String processorTag = randomAlphaOfLength(10); + String pattern = "%{a},%{b},%{c}"; + String appendSeparator = ":"; + + Map config = new HashMap<>(); + config.put("field", fieldName); + config.put("pattern", pattern); + config.put("append_separator", appendSeparator); + config.put("ignore_missing", true); + + DissectProcessor processor = factory.create(null, processorTag, config); + assertThat(processor.getTag(), equalTo(processorTag)); + assertThat(processor.field, equalTo(fieldName)); + assertThat(processor.pattern, equalTo(pattern)); + assertThat(processor.appendSeparator, equalTo(appendSeparator)); + assertThat(processor.dissectParser, is(notNullValue())); + assertThat(processor.ignoreMissing, is(true)); + } + + public void testCreateMissingField() { + DissectProcessor.Factory factory = new DissectProcessor.Factory(); + Map config = new HashMap<>(); + config.put("pattern", "%{a},%{b},%{c}"); + Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, "_tag", config)); + assertThat(e.getMessage(), Matchers.equalTo("[field] required property is missing")); + } + + public void testCreateMissingPattern() { + DissectProcessor.Factory factory = new DissectProcessor.Factory(); + Map config = new HashMap<>(); + config.put("field", randomAlphaOfLength(10)); + Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, "_tag", config)); + assertThat(e.getMessage(), Matchers.equalTo("[pattern] required property is missing")); + } + + public void testCreateMissingOptionals() { + DissectProcessor.Factory factory = new DissectProcessor.Factory(); + Map config = new HashMap<>(); + config.put("pattern", "%{a},%{b},%{c}"); + config.put("field", randomAlphaOfLength(10)); + DissectProcessor processor = factory.create(null, "_tag", config); + assertThat(processor.appendSeparator, equalTo("")); + assertThat(processor.ignoreMissing, is(false)); + } + + public void testCreateBadPattern() { + DissectProcessor.Factory factory = new DissectProcessor.Factory(); + Map config = new HashMap<>(); + config.put("pattern", "no keys defined"); + config.put("field", randomAlphaOfLength(10)); + expectThrows(DissectException.class, () -> factory.create(null, "_tag", config)); + } +} diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DissectProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DissectProcessorTests.java new file mode 100644 index 00000000000..bb5d26d01a8 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DissectProcessorTests.java @@ -0,0 +1,114 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.common; + +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.dissect.DissectException; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; +import org.elasticsearch.ingest.RandomDocumentPicks; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.CoreMatchers; + +import java.util.Collections; +import java.util.HashMap; + +import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument; +import static org.hamcrest.Matchers.equalTo; + +/** + * Basic tests for the {@link DissectProcessor}. See the {@link org.elasticsearch.dissect.DissectParser} test suite for a comprehensive + * set of dissect tests. + */ +public class DissectProcessorTests extends ESTestCase { + + public void testMatch() { + IngestDocument ingestDocument = new IngestDocument("_index", "_type", "_id", null, null, null, + Collections.singletonMap("message", "foo,bar,baz")); + DissectProcessor dissectProcessor = new DissectProcessor("", "message", "%{a},%{b},%{c}", "", true); + dissectProcessor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue("a", String.class), equalTo("foo")); + assertThat(ingestDocument.getFieldValue("b", String.class), equalTo("bar")); + assertThat(ingestDocument.getFieldValue("c", String.class), equalTo("baz")); + } + + public void testMatchOverwrite() { + IngestDocument ingestDocument = new IngestDocument("_index", "_type", "_id", null, null, null, + MapBuilder.newMapBuilder() + .put("message", "foo,bar,baz") + .put("a", "willgetstompped") + .map()); + assertThat(ingestDocument.getFieldValue("a", String.class), equalTo("willgetstompped")); + DissectProcessor dissectProcessor = new DissectProcessor("", "message", "%{a},%{b},%{c}", "", true); + dissectProcessor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue("a", String.class), equalTo("foo")); + assertThat(ingestDocument.getFieldValue("b", String.class), equalTo("bar")); + assertThat(ingestDocument.getFieldValue("c", String.class), equalTo("baz")); + } + + public void testAdvancedMatch() { + IngestDocument ingestDocument = new IngestDocument("_index", "_type", "_id", null, null, null, + Collections.singletonMap("message", "foo bar,,,,,,,baz nope:notagain 😊 🐇 🙃")); + DissectProcessor dissectProcessor = + new DissectProcessor("", "message", "%{a->} %{*b->},%{&b} %{}:%{?skipme} %{+smile/2} 🐇 %{+smile/1}", "::::", true); + dissectProcessor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue("a", String.class), equalTo("foo")); + assertThat(ingestDocument.getFieldValue("bar", String.class), equalTo("baz")); + expectThrows(IllegalArgumentException.class, () -> ingestDocument.getFieldValue("nope", String.class)); + expectThrows(IllegalArgumentException.class, () -> ingestDocument.getFieldValue("notagain", String.class)); + assertThat(ingestDocument.getFieldValue("smile", String.class), equalTo("🙃::::😊")); + } + + public void testMiss() { + IngestDocument ingestDocument = new IngestDocument("_index", "_type", "_id", null, null, null, + Collections.singletonMap("message", "foo:bar,baz")); + DissectProcessor dissectProcessor = new DissectProcessor("", "message", "%{a},%{b},%{c}", "", true); + DissectException e = expectThrows(DissectException.class, () -> dissectProcessor.execute(ingestDocument)); + assertThat(e.getMessage(), CoreMatchers.containsString("Unable to find match for dissect pattern")); + } + + public void testNonStringValueWithIgnoreMissing() { + String fieldName = RandomDocumentPicks.randomFieldName(random()); + Processor processor = new DissectProcessor("", fieldName, "%{a},%{b},%{c}", "", true); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); + ingestDocument.setFieldValue(fieldName, randomInt()); + Exception e = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); + assertThat(e.getMessage(), equalTo("field [" + fieldName + "] of type [java.lang.Integer] cannot be cast to [java.lang.String]")); + } + + public void testNullValueWithIgnoreMissing() throws Exception { + String fieldName = RandomDocumentPicks.randomFieldName(random()); + Processor processor = new DissectProcessor("", fieldName, "%{a},%{b},%{c}", "", true); + IngestDocument originalIngestDocument = RandomDocumentPicks + .randomIngestDocument(random(), Collections.singletonMap(fieldName, null)); + IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); + processor.execute(ingestDocument); + assertIngestDocument(originalIngestDocument, ingestDocument); + } + + public void testNullValueWithOutIgnoreMissing() { + String fieldName = RandomDocumentPicks.randomFieldName(random()); + Processor processor = new DissectProcessor("", fieldName, "%{a},%{b},%{c}", "", false); + IngestDocument originalIngestDocument = RandomDocumentPicks + .randomIngestDocument(random(), Collections.singletonMap(fieldName, null)); + IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); + expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); + } +} diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java index f382ad8dcfb..7ab19c4147e 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.TestProcessor; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; @@ -30,14 +31,17 @@ import java.util.HashMap; import java.util.Map; import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; public class ForEachProcessorFactoryTests extends ESTestCase { + private final ScriptService scriptService = mock(ScriptService.class); + public void testCreate() throws Exception { Processor processor = new TestProcessor(ingestDocument -> { }); Map registry = new HashMap<>(); registry.put("_name", (r, t, c) -> processor); - ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(); + ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService); Map config = new HashMap<>(); config.put("field", "_field"); @@ -53,7 +57,7 @@ public class ForEachProcessorFactoryTests extends ESTestCase { Processor processor = new TestProcessor(ingestDocument -> { }); Map registry = new HashMap<>(); registry.put("_name", (r, t, c) -> processor); - ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(); + ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService); Map config = new HashMap<>(); config.put("field", "_field"); @@ -71,7 +75,7 @@ public class ForEachProcessorFactoryTests extends ESTestCase { Map registry = new HashMap<>(); registry.put("_first", (r, t, c) -> processor); registry.put("_second", (r, t, c) -> processor); - ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(); + ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService); Map config = new HashMap<>(); config.put("field", "_field"); @@ -84,7 +88,7 @@ public class ForEachProcessorFactoryTests extends ESTestCase { } public void testCreateWithNonExistingProcessorType() throws Exception { - ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(); + ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService); Map config = new HashMap<>(); config.put("field", "_field"); config.put("processor", Collections.singletonMap("_name", Collections.emptyMap())); @@ -97,7 +101,7 @@ public class ForEachProcessorFactoryTests extends ESTestCase { Processor processor = new TestProcessor(ingestDocument -> { }); Map registry = new HashMap<>(); registry.put("_name", (r, t, c) -> processor); - ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(); + ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService); Map config = new HashMap<>(); config.put("processor", Collections.singletonList(Collections.singletonMap("_name", Collections.emptyMap()))); Exception exception = expectThrows(Exception.class, () -> forEachFactory.create(registry, null, config)); @@ -105,7 +109,7 @@ public class ForEachProcessorFactoryTests extends ESTestCase { } public void testCreateWithMissingProcessor() { - ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(); + ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService); Map config = new HashMap<>(); config.put("field", "_field"); Exception exception = expectThrows(Exception.class, () -> forEachFactory.create(Collections.emptyMap(), null, config)); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java index ffc5bcd4ac9..282994d8eb3 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java @@ -154,9 +154,10 @@ public class ForEachProcessorTests extends ESTestCase { public void testRandom() throws Exception { Processor innerProcessor = new Processor() { @Override - public void execute(IngestDocument ingestDocument) throws Exception { + public IngestDocument execute(IngestDocument ingestDocument) throws Exception { String existingValue = ingestDocument.getFieldValue("_ingest._value", String.class); ingestDocument.setFieldValue("_ingest._value", existingValue + "."); + return ingestDocument; } @Override diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/PipelineProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/PipelineProcessorTests.java new file mode 100644 index 00000000000..6e18bac40d4 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/PipelineProcessorTests.java @@ -0,0 +1,132 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.ingest.common; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ingest.CompoundProcessor; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.ingest.Pipeline; +import org.elasticsearch.ingest.Processor; +import org.elasticsearch.ingest.RandomDocumentPicks; +import org.elasticsearch.test.ESTestCase; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class PipelineProcessorTests extends ESTestCase { + + public void testExecutesPipeline() throws Exception { + String pipelineId = "pipeline"; + IngestService ingestService = mock(IngestService.class); + CompletableFuture invoked = new CompletableFuture<>(); + IngestDocument testIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); + Pipeline pipeline = new Pipeline( + pipelineId, null, null, + new CompoundProcessor(new Processor() { + @Override + public IngestDocument execute(final IngestDocument ingestDocument) throws Exception { + invoked.complete(ingestDocument); + return ingestDocument; + } + + @Override + public String getType() { + return null; + } + + @Override + public String getTag() { + return null; + } + }) + ); + when(ingestService.getPipeline(pipelineId)).thenReturn(pipeline); + PipelineProcessor.Factory factory = new PipelineProcessor.Factory(ingestService); + Map config = new HashMap<>(); + config.put("pipeline", pipelineId); + factory.create(Collections.emptyMap(), null, config).execute(testIngestDocument); + assertEquals(testIngestDocument, invoked.get()); + } + + public void testThrowsOnMissingPipeline() throws Exception { + IngestService ingestService = mock(IngestService.class); + IngestDocument testIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); + PipelineProcessor.Factory factory = new PipelineProcessor.Factory(ingestService); + Map config = new HashMap<>(); + config.put("pipeline", "missingPipelineId"); + IllegalStateException e = expectThrows( + IllegalStateException.class, + () -> factory.create(Collections.emptyMap(), null, config).execute(testIngestDocument) + ); + assertEquals( + "Pipeline processor configured for non-existent pipeline [missingPipelineId]", e.getMessage() + ); + } + + public void testThrowsOnRecursivePipelineInvocations() throws Exception { + String innerPipelineId = "inner"; + String outerPipelineId = "outer"; + IngestService ingestService = mock(IngestService.class); + IngestDocument testIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); + Map outerConfig = new HashMap<>(); + outerConfig.put("pipeline", innerPipelineId); + PipelineProcessor.Factory factory = new PipelineProcessor.Factory(ingestService); + Pipeline outer = new Pipeline( + outerPipelineId, null, null, + new CompoundProcessor(factory.create(Collections.emptyMap(), null, outerConfig)) + ); + Map innerConfig = new HashMap<>(); + innerConfig.put("pipeline", outerPipelineId); + Pipeline inner = new Pipeline( + innerPipelineId, null, null, + new CompoundProcessor(factory.create(Collections.emptyMap(), null, innerConfig)) + ); + when(ingestService.getPipeline(outerPipelineId)).thenReturn(outer); + when(ingestService.getPipeline(innerPipelineId)).thenReturn(inner); + outerConfig.put("pipeline", innerPipelineId); + ElasticsearchException e = expectThrows( + ElasticsearchException.class, + () -> factory.create(Collections.emptyMap(), null, outerConfig).execute(testIngestDocument) + ); + assertEquals( + "Recursive invocation of pipeline [inner] detected.", e.getRootCause().getMessage() + ); + } + + public void testAllowsRepeatedPipelineInvocations() throws Exception { + String innerPipelineId = "inner"; + IngestService ingestService = mock(IngestService.class); + IngestDocument testIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); + Map outerConfig = new HashMap<>(); + outerConfig.put("pipeline", innerPipelineId); + PipelineProcessor.Factory factory = new PipelineProcessor.Factory(ingestService); + Pipeline inner = new Pipeline( + innerPipelineId, null, null, new CompoundProcessor() + ); + when(ingestService.getPipeline(innerPipelineId)).thenReturn(inner); + Processor outerProc = factory.create(Collections.emptyMap(), null, outerConfig); + outerProc.execute(testIngestDocument); + outerProc.execute(testIngestDocument); + } +} diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml index 86557946ac0..eb23b7840ee 100644 --- a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml @@ -10,9 +10,11 @@ - contains: { nodes.$master.modules: { name: ingest-common } } - contains: { nodes.$master.ingest.processors: { type: append } } + - contains: { nodes.$master.ingest.processors: { type: bytes } } - contains: { nodes.$master.ingest.processors: { type: convert } } - contains: { nodes.$master.ingest.processors: { type: date } } - contains: { nodes.$master.ingest.processors: { type: date_index_name } } + - contains: { nodes.$master.ingest.processors: { type: dissect } } - contains: { nodes.$master.ingest.processors: { type: dot_expander } } - contains: { nodes.$master.ingest.processors: { type: fail } } - contains: { nodes.$master.ingest.processors: { type: foreach } } @@ -30,4 +32,3 @@ - contains: { nodes.$master.ingest.processors: { type: split } } - contains: { nodes.$master.ingest.processors: { type: trim } } - contains: { nodes.$master.ingest.processors: { type: uppercase } } - - contains: { nodes.$master.ingest.processors: { type: bytes } } diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/200_dissect_processor.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/200_dissect_processor.yml new file mode 100644 index 00000000000..1a7c2e593d4 --- /dev/null +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/200_dissect_processor.yml @@ -0,0 +1,90 @@ +--- +teardown: +- do: + ingest.delete_pipeline: + id: "my_pipeline" + ignore: 404 + +--- +"Test dissect processor match": +- do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "dissect" : { + "field" : "message", + "pattern" : "%{a} %{b} %{c}" + } + } + ] + } +- match: { acknowledged: true } + +- do: + index: + index: test + type: test + id: 1 + pipeline: "my_pipeline" + body: {message: "foo bar baz"} + +- do: + get: + index: test + type: test + id: 1 +- match: { _source.message: "foo bar baz" } +- match: { _source.a: "foo" } +- match: { _source.b: "bar" } +- match: { _source.c: "baz" } +--- +"Test dissect processor mismatch": +- do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "dissect" : { + "field" : "message", + "pattern" : "%{a},%{b},%{c}" + } + } + ] + } +- match: { acknowledged: true } + +- do: + catch: '/Unable to find match for dissect pattern: \%\{a\},\%\{b\},\%\{c\} against source: foo bar baz/' + index: + index: test + type: test + id: 2 + pipeline: "my_pipeline" + body: {message: "foo bar baz"} + +--- +"Test fail to create dissect processor": +- do: + catch: '/Unable to parse pattern/' + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "dissect" : { + "field" : "message", + "pattern" : "bad pattern" + } + } + ] + } + diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_conditional_processor.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_conditional_processor.yml new file mode 100644 index 00000000000..532519c4ca0 --- /dev/null +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_conditional_processor.yml @@ -0,0 +1,81 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "my_pipeline" + ignore: 404 + +--- +"Test conditional processor fulfilled condition": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "bytes" : { + "if" : "ctx.conditional_field == 'bar'", + "field" : "bytes_source_field", + "target_field" : "bytes_target_field" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + type: test + id: 1 + pipeline: "my_pipeline" + body: {bytes_source_field: "1kb", conditional_field: "bar"} + + - do: + get: + index: test + type: test + id: 1 + - match: { _source.bytes_source_field: "1kb" } + - match: { _source.conditional_field: "bar" } + - match: { _source.bytes_target_field: 1024 } + +--- +"Test conditional processor unfulfilled condition": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "bytes" : { + "if" : "ctx.conditional_field == 'foo'", + "field" : "bytes_source_field", + "target_field" : "bytes_target_field" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + type: test + id: 1 + pipeline: "my_pipeline" + body: {bytes_source_field: "1kb", conditional_field: "bar"} + + - do: + get: + index: test + type: test + id: 1 + - match: { _source.bytes_source_field: "1kb" } + - match: { _source.conditional_field: "bar" } + - is_false: _source.bytes_target_field + diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_pipeline_processor.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_pipeline_processor.yml new file mode 100644 index 00000000000..355ba2d4210 --- /dev/null +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_pipeline_processor.yml @@ -0,0 +1,113 @@ +--- +teardown: +- do: + ingest.delete_pipeline: + id: "inner" + ignore: 404 + +- do: + ingest.delete_pipeline: + id: "outer" + ignore: 404 + +--- +"Test Pipeline Processor with Simple Inner Pipeline": +- do: + ingest.put_pipeline: + id: "inner" + body: > + { + "description" : "inner pipeline", + "processors" : [ + { + "set" : { + "field": "foo", + "value": "bar" + } + }, + { + "set" : { + "field": "baz", + "value": "blub" + } + } + ] + } +- match: { acknowledged: true } + +- do: + ingest.put_pipeline: + id: "outer" + body: > + { + "description" : "outer pipeline", + "processors" : [ + { + "pipeline" : { + "pipeline": "inner" + } + } + ] + } +- match: { acknowledged: true } + +- do: + index: + index: test + type: test + id: 1 + pipeline: "outer" + body: {} + +- do: + get: + index: test + type: test + id: 1 +- match: { _source.foo: "bar" } +- match: { _source.baz: "blub" } + +--- +"Test Pipeline Processor with Circular Pipelines": +- do: + ingest.put_pipeline: + id: "outer" + body: > + { + "description" : "outer pipeline", + "processors" : [ + { + "pipeline" : { + "pipeline": "inner" + } + } + ] + } +- match: { acknowledged: true } + +- do: + ingest.put_pipeline: + id: "inner" + body: > + { + "description" : "inner pipeline", + "processors" : [ + { + "pipeline" : { + "pipeline": "outer" + } + } + ] + } +- match: { acknowledged: true } + +- do: + catch: /illegal_state_exception/ + index: + index: test + type: test + id: 1 + pipeline: "outer" + body: {} +- match: { error.root_cause.0.type: "exception" } +- match: { error.root_cause.0.reason: "java.lang.IllegalArgumentException: java.lang.IllegalStateException: Recursive invocation of pipeline [inner] detected." } diff --git a/modules/lang-expression/licenses/lucene-expressions-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 0ebdddcc5f1..00000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fded6bb485b8b01bb2a9280162fd14d4d3ce4510 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-66c671ea80.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..047bca7b614 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +58b9db095c569b4c4da491810f14e1429878b594 \ No newline at end of file diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java index 23dc0fd276c..55f8deb0592 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java @@ -26,7 +26,7 @@ import org.apache.lucene.expressions.js.VariableContext; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.queries.function.ValueSource; import org.apache.lucene.queries.function.valuesource.DoubleConstValueSource; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.SortField; import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.Nullable; @@ -336,7 +336,7 @@ public class ExpressionScriptEngine extends AbstractComponent implements ScriptE } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { script.setScorer(scorer); } diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java index f4095b3f68a..932e5979c0f 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java @@ -39,7 +39,7 @@ import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.metrics.stats.Stats; +import org.elasticsearch.search.aggregations.metrics.Stats; import org.elasticsearch.search.aggregations.pipeline.SimpleValue; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java index caa9fa4831a..eea9e31d4a7 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.script.mustache; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; @@ -120,21 +119,17 @@ public class MultiSearchTemplateRequest extends ActionRequest implements Composi @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - maxConcurrentSearchRequests = in.readVInt(); - } + maxConcurrentSearchRequests = in.readVInt(); requests = in.readStreamableList(SearchTemplateRequest::new); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeVInt(maxConcurrentSearchRequests); - } + out.writeVInt(maxConcurrentSearchRequests); out.writeStreamableList(requests); } - + @Override public boolean equals(Object o) { if (this == o) return true; @@ -148,9 +143,9 @@ public class MultiSearchTemplateRequest extends ActionRequest implements Composi @Override public int hashCode() { return Objects.hash(maxConcurrentSearchRequests, requests, indicesOptions); - } - - public static byte[] writeMultiLineFormat(MultiSearchTemplateRequest multiSearchTemplateRequest, + } + + public static byte[] writeMultiLineFormat(MultiSearchTemplateRequest multiSearchTemplateRequest, XContent xContent) throws IOException { ByteArrayOutputStream output = new ByteArrayOutputStream(); for (SearchTemplateRequest templateRequest : multiSearchTemplateRequest.requests()) { @@ -168,5 +163,5 @@ public class MultiSearchTemplateRequest extends ActionRequest implements Composi } return output.toByteArray(); } - + } diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateWithoutContentIT.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateWithoutContentIT.java index cbc6adf6be2..023d3b24676 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateWithoutContentIT.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateWithoutContentIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.script.mustache; +import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; import org.elasticsearch.test.rest.ESRestTestCase; @@ -30,14 +31,14 @@ public class SearchTemplateWithoutContentIT extends ESRestTestCase { public void testSearchTemplateMissingBody() throws IOException { ResponseException responseException = expectThrows(ResponseException.class, () -> client().performRequest( - randomBoolean() ? "POST" : "GET", "/_search/template")); + new Request(randomBoolean() ? "POST" : "GET", "/_search/template"))); assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode()); assertThat(responseException.getMessage(), containsString("request body or source parameter is required")); } public void testMultiSearchTemplateMissingBody() throws IOException { ResponseException responseException = expectThrows(ResponseException.class, () -> client().performRequest( - randomBoolean() ? "POST" : "GET", "/_msearch/template")); + new Request(randomBoolean() ? "POST" : "GET", "/_msearch/template"))); assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode()); assertThat(responseException.getMessage(), containsString("request body or source parameter is required")); } diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java index c38325edd14..31a9e595d0b 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java @@ -61,9 +61,19 @@ public final class Whitelist { /** The {@link List} of all the whitelisted Painless classes. */ public final List whitelistClasses; - /** Standard constructor. All values must be not {@code null}. */ - public Whitelist(ClassLoader classLoader, List whitelistClasses) { + /** The {@link List} of all the whitelisted static Painless methods. */ + public final List whitelistImportedMethods; + + /** The {@link List} of all the whitelisted Painless bindings. */ + public final List whitelistBindings; + + /** Standard constructor. All values must be not {@code null}. */ + public Whitelist(ClassLoader classLoader, List whitelistClasses, + List whitelistImportedMethods, List whitelistBindings) { + this.classLoader = Objects.requireNonNull(classLoader); this.whitelistClasses = Collections.unmodifiableList(Objects.requireNonNull(whitelistClasses)); + this.whitelistImportedMethods = Collections.unmodifiableList(Objects.requireNonNull(whitelistImportedMethods)); + this.whitelistBindings = Collections.unmodifiableList(Objects.requireNonNull(whitelistBindings)); } } diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistBinding.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistBinding.java new file mode 100644 index 00000000000..364dbbb09ca --- /dev/null +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistBinding.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless.spi; + +import java.util.List; +import java.util.Objects; + +/** + * A binding represents a method call that stores state. Each binding class must have exactly one + * public constructor and one public method excluding those inherited directly from {@link Object}. + * The canonical type name parameters provided must match those of the constructor and method combined. + * The constructor for a binding class will be called when the binding method is called for the first + * time at which point state may be stored for the arguments passed into the constructor. The method + * for a binding class will be called each time the binding method is called and may use the previously + * stored state. + */ +public class WhitelistBinding { + + /** Information about where this constructor was whitelisted from. */ + public final String origin; + + /** The Java class name this binding represents. */ + public final String targetJavaClassName; + + /** The method name for this binding. */ + public final String methodName; + + /** + * The canonical type name for the return type. + */ + public final String returnCanonicalTypeName; + + /** + * A {@link List} of {@link String}s that are the Painless type names for the parameters of the + * constructor which can be used to look up the Java constructor through reflection. + */ + public final List canonicalTypeNameParameters; + + /** Standard constructor. All values must be not {@code null}. */ + public WhitelistBinding(String origin, String targetJavaClassName, + String methodName, String returnCanonicalTypeName, List canonicalTypeNameParameters) { + + this.origin = Objects.requireNonNull(origin); + this.targetJavaClassName = Objects.requireNonNull(targetJavaClassName); + + this.methodName = Objects.requireNonNull(methodName); + this.returnCanonicalTypeName = Objects.requireNonNull(returnCanonicalTypeName); + this.canonicalTypeNameParameters = Objects.requireNonNull(canonicalTypeNameParameters); + } +} diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java index 0b216ae5c29..7b3eb75aa3e 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java @@ -62,9 +62,8 @@ public final class WhitelistClass { /** Standard constructor. All values must be not {@code null}. */ public WhitelistClass(String origin, String javaClassName, boolean noImport, - List whitelistConstructors, - List whitelistMethods, - List whitelistFields) { + List whitelistConstructors, List whitelistMethods, List whitelistFields) + { this.origin = Objects.requireNonNull(origin); this.javaClassName = Objects.requireNonNull(javaClassName); diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java index a4a0076626a..2f5dec769fc 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java @@ -133,6 +133,8 @@ public final class WhitelistLoader { */ public static Whitelist loadFromResourceFiles(Class resource, String... filepaths) { List whitelistClasses = new ArrayList<>(); + List whitelistStatics = new ArrayList<>(); + List whitelistBindings = new ArrayList<>(); // Execute a single pass through the whitelist text files. This will gather all the // constructors, methods, augmented methods, and fields for each whitelisted class. @@ -141,8 +143,9 @@ public final class WhitelistLoader { int number = -1; try (LineNumberReader reader = new LineNumberReader( - new InputStreamReader(resource.getResourceAsStream(filepath), StandardCharsets.UTF_8))) { + new InputStreamReader(resource.getResourceAsStream(filepath), StandardCharsets.UTF_8))) { + String parseType = null; String whitelistClassOrigin = null; String javaClassName = null; boolean noImport = false; @@ -165,7 +168,11 @@ public final class WhitelistLoader { // Ensure the final token of the line is '{'. if (line.endsWith("{") == false) { throw new IllegalArgumentException( - "invalid class definition: failed to parse class opening bracket [" + line + "]"); + "invalid class definition: failed to parse class opening bracket [" + line + "]"); + } + + if (parseType != null) { + throw new IllegalArgumentException("invalid definition: cannot embed class definition [" + line + "]"); } // Parse the Java class name. @@ -178,6 +185,7 @@ public final class WhitelistLoader { throw new IllegalArgumentException("invalid class definition: failed to parse class name [" + line + "]"); } + parseType = "class"; whitelistClassOrigin = "[" + filepath + "]:[" + number + "]"; javaClassName = tokens[0]; @@ -185,43 +193,124 @@ public final class WhitelistLoader { whitelistConstructors = new ArrayList<>(); whitelistMethods = new ArrayList<>(); whitelistFields = new ArrayList<>(); - - // Handle the end of a class, by creating a new WhitelistClass with all the previously gathered - // constructors, methods, augmented methods, and fields, and adding it to the list of whitelisted classes. - // Expects the following format: '}' '\n' - } else if (line.equals("}")) { - if (javaClassName == null) { - throw new IllegalArgumentException("invalid class definition: extraneous closing bracket"); + } else if (line.startsWith("static_import ")) { + // Ensure the final token of the line is '{'. + if (line.endsWith("{") == false) { + throw new IllegalArgumentException( + "invalid static import definition: failed to parse static import opening bracket [" + line + "]"); } - whitelistClasses.add(new WhitelistClass(whitelistClassOrigin, javaClassName, noImport, - whitelistConstructors, whitelistMethods, whitelistFields)); + if (parseType != null) { + throw new IllegalArgumentException("invalid definition: cannot embed static import definition [" + line + "]"); + } - // Set all the variables to null to ensure a new class definition is found before other parsable values. - whitelistClassOrigin = null; - javaClassName = null; - noImport = false; - whitelistConstructors = null; - whitelistMethods = null; - whitelistFields = null; + parseType = "static_import"; - // Handle all other valid cases. - } else { + // Handle the end of a definition and reset all previously gathered values. + // Expects the following format: '}' '\n' + } else if (line.equals("}")) { + if (parseType == null) { + throw new IllegalArgumentException("invalid definition: extraneous closing bracket"); + } + + // Create a new WhitelistClass with all the previously gathered constructors, methods, + // augmented methods, and fields, and add it to the list of whitelisted classes. + if ("class".equals(parseType)) { + whitelistClasses.add(new WhitelistClass(whitelistClassOrigin, javaClassName, noImport, + whitelistConstructors, whitelistMethods, whitelistFields)); + + whitelistClassOrigin = null; + javaClassName = null; + noImport = false; + whitelistConstructors = null; + whitelistMethods = null; + whitelistFields = null; + } + + // Reset the parseType. + parseType = null; + + // Handle static import definition types. + // Expects the following format: ID ID '(' ( ID ( ',' ID )* )? ')' ( 'from_class' | 'bound_to' ) ID '\n' + } else if ("static_import".equals(parseType)) { // Mark the origin of this parsable object. String origin = "[" + filepath + "]:[" + number + "]"; - // Ensure we have a defined class before adding any constructors, methods, augmented methods, or fields. - if (javaClassName == null) { - throw new IllegalArgumentException("invalid object definition: expected a class name [" + line + "]"); + // Parse the tokens prior to the method parameters. + int parameterStartIndex = line.indexOf('('); + + if (parameterStartIndex == -1) { + throw new IllegalArgumentException( + "illegal static import definition: start of method parameters not found [" + line + "]"); } + String[] tokens = line.substring(0, parameterStartIndex).trim().split("\\s+"); + + String methodName; + + // Based on the number of tokens, look up the Java method name. + if (tokens.length == 2) { + methodName = tokens[1]; + } else { + throw new IllegalArgumentException("invalid method definition: unexpected format [" + line + "]"); + } + + String returnCanonicalTypeName = tokens[0]; + + // Parse the method parameters. + int parameterEndIndex = line.indexOf(')'); + + if (parameterEndIndex == -1) { + throw new IllegalArgumentException( + "illegal static import definition: end of method parameters not found [" + line + "]"); + } + + String[] canonicalTypeNameParameters = + line.substring(parameterStartIndex + 1, parameterEndIndex).replaceAll("\\s+", "").split(","); + + // Handle the case for a method with no parameters. + if ("".equals(canonicalTypeNameParameters[0])) { + canonicalTypeNameParameters = new String[0]; + } + + // Parse the static import type and class. + tokens = line.substring(parameterEndIndex + 1).trim().split("\\s+"); + + String staticImportType; + String targetJavaClassName; + + // Based on the number of tokens, look up the type and class. + if (tokens.length == 2) { + staticImportType = tokens[0]; + targetJavaClassName = tokens[1]; + } else { + throw new IllegalArgumentException("invalid static import definition: unexpected format [" + line + "]"); + } + + // Add a static import method or binding depending on the static import type. + if ("from_class".equals(staticImportType)) { + whitelistStatics.add(new WhitelistMethod(origin, targetJavaClassName, + methodName, returnCanonicalTypeName, Arrays.asList(canonicalTypeNameParameters))); + } else if ("bound_to".equals(staticImportType)) { + whitelistBindings.add(new WhitelistBinding(origin, targetJavaClassName, + methodName, returnCanonicalTypeName, Arrays.asList(canonicalTypeNameParameters))); + } else { + throw new IllegalArgumentException("invalid static import definition: " + + "unexpected static import type [" + staticImportType + "] [" + line + "]"); + } + + // Handle class definition types. + } else if ("class".equals(parseType)) { + // Mark the origin of this parsable object. + String origin = "[" + filepath + "]:[" + number + "]"; + // Handle the case for a constructor definition. // Expects the following format: '(' ( ID ( ',' ID )* )? ')' '\n' if (line.startsWith("(")) { // Ensure the final token of the line is ')'. if (line.endsWith(")") == false) { throw new IllegalArgumentException( - "invalid constructor definition: expected a closing parenthesis [" + line + "]"); + "invalid constructor definition: expected a closing parenthesis [" + line + "]"); } // Parse the constructor parameters. @@ -234,34 +323,34 @@ public final class WhitelistLoader { whitelistConstructors.add(new WhitelistConstructor(origin, Arrays.asList(tokens))); - // Handle the case for a method or augmented method definition. - // Expects the following format: ID ID? ID '(' ( ID ( ',' ID )* )? ')' '\n' + // Handle the case for a method or augmented method definition. + // Expects the following format: ID ID? ID '(' ( ID ( ',' ID )* )? ')' '\n' } else if (line.contains("(")) { // Ensure the final token of the line is ')'. if (line.endsWith(")") == false) { throw new IllegalArgumentException( - "invalid method definition: expected a closing parenthesis [" + line + "]"); + "invalid method definition: expected a closing parenthesis [" + line + "]"); } // Parse the tokens prior to the method parameters. int parameterIndex = line.indexOf('('); - String[] tokens = line.trim().substring(0, parameterIndex).split("\\s+"); + String[] tokens = line.substring(0, parameterIndex).trim().split("\\s+"); - String javaMethodName; + String methodName; String javaAugmentedClassName; // Based on the number of tokens, look up the Java method name and if provided the Java augmented class. if (tokens.length == 2) { - javaMethodName = tokens[1]; + methodName = tokens[1]; javaAugmentedClassName = null; } else if (tokens.length == 3) { - javaMethodName = tokens[2]; + methodName = tokens[2]; javaAugmentedClassName = tokens[1]; } else { throw new IllegalArgumentException("invalid method definition: unexpected format [" + line + "]"); } - String painlessReturnTypeName = tokens[0]; + String returnCanonicalTypeName = tokens[0]; // Parse the method parameters. tokens = line.substring(parameterIndex + 1, line.length() - 1).replaceAll("\\s+", "").split(","); @@ -271,11 +360,11 @@ public final class WhitelistLoader { tokens = new String[0]; } - whitelistMethods.add(new WhitelistMethod(origin, javaAugmentedClassName, javaMethodName, - painlessReturnTypeName, Arrays.asList(tokens))); + whitelistMethods.add(new WhitelistMethod(origin, javaAugmentedClassName, methodName, + returnCanonicalTypeName, Arrays.asList(tokens))); - // Handle the case for a field definition. - // Expects the following format: ID ID '\n' + // Handle the case for a field definition. + // Expects the following format: ID ID '\n' } else { // Parse the field tokens. String[] tokens = line.split("\\s+"); @@ -287,20 +376,23 @@ public final class WhitelistLoader { whitelistFields.add(new WhitelistField(origin, tokens[1], tokens[0])); } + } else { + throw new IllegalArgumentException("invalid definition: unable to parse line [" + line + "]"); } } // Ensure all classes end with a '}' token before the end of the file. if (javaClassName != null) { - throw new IllegalArgumentException("invalid class definition: expected closing bracket"); + throw new IllegalArgumentException("invalid definition: expected closing bracket"); } } catch (Exception exception) { throw new RuntimeException("error in [" + filepath + "] at line [" + number + "]", exception); } } + ClassLoader loader = AccessController.doPrivileged((PrivilegedAction)resource::getClassLoader); - return new Whitelist(loader, whitelistClasses); + return new Whitelist(loader, whitelistClasses, whitelistStatics, whitelistBindings); } private WhitelistLoader() {} diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java index 5cd023a3591..f450ee0238d 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java @@ -67,7 +67,8 @@ public class WhitelistMethod { * is augmented as described in the class documentation. */ public WhitelistMethod(String origin, String augmentedCanonicalClassName, String methodName, - String returnCanonicalTypeName, List canonicalTypeNameParameters) { + String returnCanonicalTypeName, List canonicalTypeNameParameters) { + this.origin = Objects.requireNonNull(origin); this.augmentedCanonicalClassName = augmentedCanonicalClassName; this.methodName = methodName; diff --git a/modules/lang-painless/src/main/antlr/PainlessParser.g4 b/modules/lang-painless/src/main/antlr/PainlessParser.g4 index 5292b4d1950..27db9222f32 100644 --- a/modules/lang-painless/src/main/antlr/PainlessParser.g4 +++ b/modules/lang-painless/src/main/antlr/PainlessParser.g4 @@ -22,7 +22,7 @@ parser grammar PainlessParser; options { tokenVocab=PainlessLexer; } source - : function* statement* dstatement? EOF + : function* statement* EOF ; function @@ -35,7 +35,7 @@ parameters statement : rstatement - | dstatement SEMICOLON + | dstatement ( SEMICOLON | EOF ) ; // Note we use a predicate on the if/else case here to prevent the diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/BindingTest.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/BindingTest.java new file mode 100644 index 00000000000..fc2a10891f6 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/BindingTest.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +public class BindingTest { + public int state; + + public BindingTest(int state0, int state1) { + this.state = state0 + state1; + } + + public int testAddWithState(int istateless, double dstateless) { + return istateless + state + (int)dstateless; + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/FeatureTest.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/FeatureTest.java index 1e94c19f6d9..28cbb4aee19 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/FeatureTest.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/FeatureTest.java @@ -24,6 +24,21 @@ import java.util.function.Function; /** Currently just a dummy class for testing a few features not yet exposed by whitelist! */ public class FeatureTest { + /** static method that returns true */ + public static boolean overloadedStatic() { + return true; + } + + /** static method that returns what you ask it */ + public static boolean overloadedStatic(boolean whatToReturn) { + return whatToReturn; + } + + /** static method only whitelisted as a static */ + public static float staticAddFloatsTest(float x, float y) { + return x + y; + } + private int x; private int y; public int z; @@ -58,21 +73,12 @@ public class FeatureTest { this.y = y; } - /** static method that returns true */ - public static boolean overloadedStatic() { - return true; - } - - /** static method that returns what you ask it */ - public static boolean overloadedStatic(boolean whatToReturn) { - return whatToReturn; - } - /** method taking two functions! */ public Object twoFunctionsOfX(Function f, Function g) { return f.apply(g.apply(x)); } + /** method to take in a list */ public void listInput(List list) { } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java index 83eb74d827f..d18cf2780cf 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java @@ -31,6 +31,7 @@ import java.util.Map; public class Globals { private final Map syntheticMethods = new HashMap<>(); private final Map constantInitializers = new HashMap<>(); + private final Map> bindings = new HashMap<>(); private final BitSet statements; /** Create a new Globals from the set of statement boundaries */ @@ -54,7 +55,15 @@ public class Globals { throw new IllegalStateException("constant initializer: " + constant.name + " already exists"); } } - + + /** Adds a new binding to be written as a local variable */ + public String addBinding(Class type) { + String name = "$binding$" + bindings.size(); + bindings.put(name, type); + + return name; + } + /** Returns the current synthetic methods */ public Map getSyntheticMethods() { return syntheticMethods; @@ -64,7 +73,12 @@ public class Globals { public Map getConstantInitializers() { return constantInitializers; } - + + /** Returns the current bindings */ + public Map> getBindings() { + return bindings; + } + /** Returns the set of statement boundaries */ public BitSet getStatements() { return statements; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java index 094a62d188b..2c60136209c 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java @@ -26,6 +26,7 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.store.RAMDirectory; @@ -550,7 +551,7 @@ public class PainlessExecuteAction extends Action getContexts() { + @Override + public List> getContexts() { return Collections.singletonList(PainlessExecuteAction.PainlessTestScript.CONTEXT); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java index 7fa10f6e9fb..feebacc6068 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java @@ -1,17 +1,13 @@ // ANTLR GENERATED CODE: DO NOT EDIT package org.elasticsearch.painless.antlr; - -import org.antlr.v4.runtime.CharStream; import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.RuleContext; -import org.antlr.v4.runtime.RuntimeMetaData; -import org.antlr.v4.runtime.Vocabulary; -import org.antlr.v4.runtime.VocabularyImpl; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNDeserializer; -import org.antlr.v4.runtime.atn.LexerATNSimulator; -import org.antlr.v4.runtime.atn.PredictionContextCache; +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.*; +import org.antlr.v4.runtime.atn.*; import org.antlr.v4.runtime.dfa.DFA; +import org.antlr.v4.runtime.misc.*; @SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"}) abstract class PainlessLexer extends Lexer { @@ -21,16 +17,16 @@ abstract class PainlessLexer extends Lexer { protected static final PredictionContextCache _sharedContextCache = new PredictionContextCache(); public static final int - WS=1, COMMENT=2, LBRACK=3, RBRACK=4, LBRACE=5, RBRACE=6, LP=7, RP=8, DOT=9, - NSDOT=10, COMMA=11, SEMICOLON=12, IF=13, IN=14, ELSE=15, WHILE=16, DO=17, - FOR=18, CONTINUE=19, BREAK=20, RETURN=21, NEW=22, TRY=23, CATCH=24, THROW=25, - THIS=26, INSTANCEOF=27, BOOLNOT=28, BWNOT=29, MUL=30, DIV=31, REM=32, - ADD=33, SUB=34, LSH=35, RSH=36, USH=37, LT=38, LTE=39, GT=40, GTE=41, - EQ=42, EQR=43, NE=44, NER=45, BWAND=46, XOR=47, BWOR=48, BOOLAND=49, BOOLOR=50, - COND=51, COLON=52, ELVIS=53, REF=54, ARROW=55, FIND=56, MATCH=57, INCR=58, - DECR=59, ASSIGN=60, AADD=61, ASUB=62, AMUL=63, ADIV=64, AREM=65, AAND=66, - AXOR=67, AOR=68, ALSH=69, ARSH=70, AUSH=71, OCTAL=72, HEX=73, INTEGER=74, - DECIMAL=75, STRING=76, REGEX=77, TRUE=78, FALSE=79, NULL=80, TYPE=81, + WS=1, COMMENT=2, LBRACK=3, RBRACK=4, LBRACE=5, RBRACE=6, LP=7, RP=8, DOT=9, + NSDOT=10, COMMA=11, SEMICOLON=12, IF=13, IN=14, ELSE=15, WHILE=16, DO=17, + FOR=18, CONTINUE=19, BREAK=20, RETURN=21, NEW=22, TRY=23, CATCH=24, THROW=25, + THIS=26, INSTANCEOF=27, BOOLNOT=28, BWNOT=29, MUL=30, DIV=31, REM=32, + ADD=33, SUB=34, LSH=35, RSH=36, USH=37, LT=38, LTE=39, GT=40, GTE=41, + EQ=42, EQR=43, NE=44, NER=45, BWAND=46, XOR=47, BWOR=48, BOOLAND=49, BOOLOR=50, + COND=51, COLON=52, ELVIS=53, REF=54, ARROW=55, FIND=56, MATCH=57, INCR=58, + DECR=59, ASSIGN=60, AADD=61, ASUB=62, AMUL=63, ADIV=64, AREM=65, AAND=66, + AXOR=67, AOR=68, ALSH=69, ARSH=70, AUSH=71, OCTAL=72, HEX=73, INTEGER=74, + DECIMAL=75, STRING=76, REGEX=77, TRUE=78, FALSE=79, NULL=80, TYPE=81, ID=82, DOTINTEGER=83, DOTID=84; public static final int AFTER_DOT = 1; public static String[] modeNames = { @@ -38,39 +34,39 @@ abstract class PainlessLexer extends Lexer { }; public static final String[] ruleNames = { - "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", "DOT", - "NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", "FOR", - "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", "THIS", - "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", "SUB", "LSH", - "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", "NER", "BWAND", - "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS", "REF", "ARROW", - "FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", "AMUL", "ADIV", - "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "OCTAL", "HEX", - "INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", "NULL", "TYPE", + "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", "DOT", + "NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", "FOR", + "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", "THIS", + "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", "SUB", "LSH", + "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", "NER", "BWAND", + "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS", "REF", "ARROW", + "FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", "AMUL", "ADIV", + "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "OCTAL", "HEX", + "INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", "NULL", "TYPE", "ID", "DOTINTEGER", "DOTID" }; private static final String[] _LITERAL_NAMES = { - null, null, null, "'{'", "'}'", "'['", "']'", "'('", "')'", "'.'", "'?.'", - "','", "';'", "'if'", "'in'", "'else'", "'while'", "'do'", "'for'", "'continue'", - "'break'", "'return'", "'new'", "'try'", "'catch'", "'throw'", "'this'", - "'instanceof'", "'!'", "'~'", "'*'", "'/'", "'%'", "'+'", "'-'", "'<<'", - "'>>'", "'>>>'", "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='", - "'!=='", "'&'", "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'?:'", "'::'", - "'->'", "'=~'", "'==~'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='", - "'/='", "'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, + null, null, null, "'{'", "'}'", "'['", "']'", "'('", "')'", "'.'", "'?.'", + "','", "';'", "'if'", "'in'", "'else'", "'while'", "'do'", "'for'", "'continue'", + "'break'", "'return'", "'new'", "'try'", "'catch'", "'throw'", "'this'", + "'instanceof'", "'!'", "'~'", "'*'", "'/'", "'%'", "'+'", "'-'", "'<<'", + "'>>'", "'>>>'", "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='", + "'!=='", "'&'", "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'?:'", "'::'", + "'->'", "'=~'", "'==~'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='", + "'/='", "'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, null, null, null, null, null, "'true'", "'false'", "'null'" }; private static final String[] _SYMBOLIC_NAMES = { - null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", - "DOT", "NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", - "FOR", "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", - "THIS", "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", - "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", - "NER", "BWAND", "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS", - "REF", "ARROW", "FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", - "AMUL", "ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", - "OCTAL", "HEX", "INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", + null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", + "DOT", "NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", + "FOR", "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", + "THIS", "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", + "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", + "NER", "BWAND", "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS", + "REF", "ARROW", "FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", + "AMUL", "ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", + "OCTAL", "HEX", "INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", "NULL", "TYPE", "ID", "DOTINTEGER", "DOTID" }; public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java index bef57d22e9e..5a823ecfda3 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java @@ -1,25 +1,13 @@ // ANTLR GENERATED CODE: DO NOT EDIT package org.elasticsearch.painless.antlr; - -import org.antlr.v4.runtime.FailedPredicateException; -import org.antlr.v4.runtime.NoViableAltException; -import org.antlr.v4.runtime.Parser; -import org.antlr.v4.runtime.ParserRuleContext; -import org.antlr.v4.runtime.RecognitionException; -import org.antlr.v4.runtime.RuleContext; -import org.antlr.v4.runtime.RuntimeMetaData; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.Vocabulary; -import org.antlr.v4.runtime.VocabularyImpl; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNDeserializer; -import org.antlr.v4.runtime.atn.ParserATNSimulator; -import org.antlr.v4.runtime.atn.PredictionContextCache; +import org.antlr.v4.runtime.atn.*; import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.tree.ParseTreeVisitor; -import org.antlr.v4.runtime.tree.TerminalNode; - +import org.antlr.v4.runtime.*; +import org.antlr.v4.runtime.misc.*; +import org.antlr.v4.runtime.tree.*; import java.util.List; +import java.util.Iterator; +import java.util.ArrayList; @SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"}) class PainlessParser extends Parser { @@ -29,57 +17,57 @@ class PainlessParser extends Parser { protected static final PredictionContextCache _sharedContextCache = new PredictionContextCache(); public static final int - WS=1, COMMENT=2, LBRACK=3, RBRACK=4, LBRACE=5, RBRACE=6, LP=7, RP=8, DOT=9, - NSDOT=10, COMMA=11, SEMICOLON=12, IF=13, IN=14, ELSE=15, WHILE=16, DO=17, - FOR=18, CONTINUE=19, BREAK=20, RETURN=21, NEW=22, TRY=23, CATCH=24, THROW=25, - THIS=26, INSTANCEOF=27, BOOLNOT=28, BWNOT=29, MUL=30, DIV=31, REM=32, - ADD=33, SUB=34, LSH=35, RSH=36, USH=37, LT=38, LTE=39, GT=40, GTE=41, - EQ=42, EQR=43, NE=44, NER=45, BWAND=46, XOR=47, BWOR=48, BOOLAND=49, BOOLOR=50, - COND=51, COLON=52, ELVIS=53, REF=54, ARROW=55, FIND=56, MATCH=57, INCR=58, - DECR=59, ASSIGN=60, AADD=61, ASUB=62, AMUL=63, ADIV=64, AREM=65, AAND=66, - AXOR=67, AOR=68, ALSH=69, ARSH=70, AUSH=71, OCTAL=72, HEX=73, INTEGER=74, - DECIMAL=75, STRING=76, REGEX=77, TRUE=78, FALSE=79, NULL=80, TYPE=81, + WS=1, COMMENT=2, LBRACK=3, RBRACK=4, LBRACE=5, RBRACE=6, LP=7, RP=8, DOT=9, + NSDOT=10, COMMA=11, SEMICOLON=12, IF=13, IN=14, ELSE=15, WHILE=16, DO=17, + FOR=18, CONTINUE=19, BREAK=20, RETURN=21, NEW=22, TRY=23, CATCH=24, THROW=25, + THIS=26, INSTANCEOF=27, BOOLNOT=28, BWNOT=29, MUL=30, DIV=31, REM=32, + ADD=33, SUB=34, LSH=35, RSH=36, USH=37, LT=38, LTE=39, GT=40, GTE=41, + EQ=42, EQR=43, NE=44, NER=45, BWAND=46, XOR=47, BWOR=48, BOOLAND=49, BOOLOR=50, + COND=51, COLON=52, ELVIS=53, REF=54, ARROW=55, FIND=56, MATCH=57, INCR=58, + DECR=59, ASSIGN=60, AADD=61, ASUB=62, AMUL=63, ADIV=64, AREM=65, AAND=66, + AXOR=67, AOR=68, ALSH=69, ARSH=70, AUSH=71, OCTAL=72, HEX=73, INTEGER=74, + DECIMAL=75, STRING=76, REGEX=77, TRUE=78, FALSE=79, NULL=80, TYPE=81, ID=82, DOTINTEGER=83, DOTID=84; public static final int - RULE_source = 0, RULE_function = 1, RULE_parameters = 2, RULE_statement = 3, - RULE_rstatement = 4, RULE_dstatement = 5, RULE_trailer = 6, RULE_block = 7, - RULE_empty = 8, RULE_initializer = 9, RULE_afterthought = 10, RULE_declaration = 11, - RULE_decltype = 12, RULE_declvar = 13, RULE_trap = 14, RULE_expression = 15, - RULE_unary = 16, RULE_chain = 17, RULE_primary = 18, RULE_postfix = 19, - RULE_postdot = 20, RULE_callinvoke = 21, RULE_fieldaccess = 22, RULE_braceaccess = 23, - RULE_arrayinitializer = 24, RULE_listinitializer = 25, RULE_mapinitializer = 26, - RULE_maptoken = 27, RULE_arguments = 28, RULE_argument = 29, RULE_lambda = 30, + RULE_source = 0, RULE_function = 1, RULE_parameters = 2, RULE_statement = 3, + RULE_rstatement = 4, RULE_dstatement = 5, RULE_trailer = 6, RULE_block = 7, + RULE_empty = 8, RULE_initializer = 9, RULE_afterthought = 10, RULE_declaration = 11, + RULE_decltype = 12, RULE_declvar = 13, RULE_trap = 14, RULE_expression = 15, + RULE_unary = 16, RULE_chain = 17, RULE_primary = 18, RULE_postfix = 19, + RULE_postdot = 20, RULE_callinvoke = 21, RULE_fieldaccess = 22, RULE_braceaccess = 23, + RULE_arrayinitializer = 24, RULE_listinitializer = 25, RULE_mapinitializer = 26, + RULE_maptoken = 27, RULE_arguments = 28, RULE_argument = 29, RULE_lambda = 30, RULE_lamtype = 31, RULE_funcref = 32; public static final String[] ruleNames = { - "source", "function", "parameters", "statement", "rstatement", "dstatement", - "trailer", "block", "empty", "initializer", "afterthought", "declaration", - "decltype", "declvar", "trap", "expression", "unary", "chain", "primary", - "postfix", "postdot", "callinvoke", "fieldaccess", "braceaccess", "arrayinitializer", - "listinitializer", "mapinitializer", "maptoken", "arguments", "argument", + "source", "function", "parameters", "statement", "rstatement", "dstatement", + "trailer", "block", "empty", "initializer", "afterthought", "declaration", + "decltype", "declvar", "trap", "expression", "unary", "chain", "primary", + "postfix", "postdot", "callinvoke", "fieldaccess", "braceaccess", "arrayinitializer", + "listinitializer", "mapinitializer", "maptoken", "arguments", "argument", "lambda", "lamtype", "funcref" }; private static final String[] _LITERAL_NAMES = { - null, null, null, "'{'", "'}'", "'['", "']'", "'('", "')'", "'.'", "'?.'", - "','", "';'", "'if'", "'in'", "'else'", "'while'", "'do'", "'for'", "'continue'", - "'break'", "'return'", "'new'", "'try'", "'catch'", "'throw'", "'this'", - "'instanceof'", "'!'", "'~'", "'*'", "'/'", "'%'", "'+'", "'-'", "'<<'", - "'>>'", "'>>>'", "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='", - "'!=='", "'&'", "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'?:'", "'::'", - "'->'", "'=~'", "'==~'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='", - "'/='", "'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, + null, null, null, "'{'", "'}'", "'['", "']'", "'('", "')'", "'.'", "'?.'", + "','", "';'", "'if'", "'in'", "'else'", "'while'", "'do'", "'for'", "'continue'", + "'break'", "'return'", "'new'", "'try'", "'catch'", "'throw'", "'this'", + "'instanceof'", "'!'", "'~'", "'*'", "'/'", "'%'", "'+'", "'-'", "'<<'", + "'>>'", "'>>>'", "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='", + "'!=='", "'&'", "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'?:'", "'::'", + "'->'", "'=~'", "'==~'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='", + "'/='", "'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, null, null, null, null, null, "'true'", "'false'", "'null'" }; private static final String[] _SYMBOLIC_NAMES = { - null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", - "DOT", "NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", - "FOR", "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", - "THIS", "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", - "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", - "NER", "BWAND", "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS", - "REF", "ARROW", "FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", - "AMUL", "ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", - "OCTAL", "HEX", "INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", + null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", + "DOT", "NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", + "FOR", "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", + "THIS", "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", + "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", + "NER", "BWAND", "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS", + "REF", "ARROW", "FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", + "AMUL", "ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", + "OCTAL", "HEX", "INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", "NULL", "TYPE", "ID", "DOTINTEGER", "DOTID" }; public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); @@ -145,9 +133,6 @@ class PainlessParser extends Parser { public StatementContext statement(int i) { return getRuleContext(StatementContext.class,i); } - public DstatementContext dstatement() { - return getRuleContext(DstatementContext.class,0); - } public SourceContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @@ -177,7 +162,7 @@ class PainlessParser extends Parser { setState(66); function(); } - } + } } setState(71); _errHandler.sync(this); @@ -185,30 +170,19 @@ class PainlessParser extends Parser { } setState(75); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,1,_ctx); - while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { - if ( _alt==1 ) { - { - { - setState(72); - statement(); - } - } + _la = _input.LA(1); + while ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << IF) | (1L << WHILE) | (1L << DO) | (1L << FOR) | (1L << CONTINUE) | (1L << BREAK) | (1L << RETURN) | (1L << NEW) | (1L << TRY) | (1L << THROW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { + { + { + setState(72); + statement(); + } } setState(77); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,1,_ctx); + _la = _input.LA(1); } - setState(79); - _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << DO) | (1L << CONTINUE) | (1L << BREAK) | (1L << RETURN) | (1L << NEW) | (1L << THROW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { - { - setState(78); - dstatement(); - } - } - - setState(81); + setState(78); match(EOF); } } @@ -251,13 +225,13 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(83); + setState(80); decltype(); - setState(84); + setState(81); match(ID); - setState(85); + setState(82); parameters(); - setState(86); + setState(83); block(); } } @@ -307,38 +281,38 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(88); + setState(85); match(LP); - setState(100); + setState(97); _la = _input.LA(1); if (_la==TYPE) { { - setState(89); + setState(86); decltype(); - setState(90); + setState(87); match(ID); - setState(97); + setState(94); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(91); + setState(88); match(COMMA); - setState(92); + setState(89); decltype(); - setState(93); + setState(90); match(ID); } } - setState(99); + setState(96); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(102); + setState(99); match(RP); } } @@ -361,6 +335,7 @@ class PainlessParser extends Parser { return getRuleContext(DstatementContext.class,0); } public TerminalNode SEMICOLON() { return getToken(PainlessParser.SEMICOLON, 0); } + public TerminalNode EOF() { return getToken(PainlessParser.EOF, 0); } public StatementContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @@ -375,8 +350,9 @@ class PainlessParser extends Parser { public final StatementContext statement() throws RecognitionException { StatementContext _localctx = new StatementContext(_ctx, getState()); enterRule(_localctx, 6, RULE_statement); + int _la; try { - setState(108); + setState(105); switch (_input.LA(1)) { case IF: case WHILE: @@ -384,7 +360,7 @@ class PainlessParser extends Parser { case TRY: enterOuterAlt(_localctx, 1); { - setState(104); + setState(101); rstatement(); } break; @@ -415,10 +391,15 @@ class PainlessParser extends Parser { case ID: enterOuterAlt(_localctx, 2); { - setState(105); + setState(102); dstatement(); - setState(106); - match(SEMICOLON); + setState(103); + _la = _input.LA(1); + if ( !(_la==EOF || _la==SEMICOLON) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } } break; default: @@ -441,7 +422,7 @@ class PainlessParser extends Parser { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_rstatement; } - + public RstatementContext() { } public void copyFrom(RstatementContext ctx) { super.copyFrom(ctx); @@ -584,37 +565,37 @@ class PainlessParser extends Parser { int _la; try { int _alt; - setState(170); + setState(167); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,13,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,12,_ctx) ) { case 1: _localctx = new IfContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(110); + setState(107); match(IF); - setState(111); + setState(108); match(LP); - setState(112); + setState(109); expression(0); - setState(113); + setState(110); match(RP); - setState(114); + setState(111); trailer(); - setState(118); + setState(115); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,6,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,5,_ctx) ) { case 1: { - setState(115); + setState(112); match(ELSE); - setState(116); + setState(113); trailer(); } break; case 2: { - setState(117); + setState(114); if (!( _input.LA(1) != ELSE )) throw new FailedPredicateException(this, " _input.LA(1) != ELSE "); } break; @@ -625,15 +606,15 @@ class PainlessParser extends Parser { _localctx = new WhileContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(120); + setState(117); match(WHILE); - setState(121); + setState(118); match(LP); - setState(122); + setState(119); expression(0); - setState(123); + setState(120); match(RP); - setState(126); + setState(123); switch (_input.LA(1)) { case LBRACK: case LBRACE: @@ -666,13 +647,13 @@ class PainlessParser extends Parser { case TYPE: case ID: { - setState(124); + setState(121); trailer(); } break; case SEMICOLON: { - setState(125); + setState(122); empty(); } break; @@ -685,44 +666,44 @@ class PainlessParser extends Parser { _localctx = new ForContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(128); + setState(125); match(FOR); - setState(129); + setState(126); match(LP); - setState(131); + setState(128); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { { - setState(130); + setState(127); initializer(); } } - setState(133); + setState(130); match(SEMICOLON); - setState(135); + setState(132); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { { - setState(134); + setState(131); expression(0); } } - setState(137); + setState(134); match(SEMICOLON); - setState(139); + setState(136); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { { - setState(138); + setState(135); afterthought(); } } - setState(141); + setState(138); match(RP); - setState(144); + setState(141); switch (_input.LA(1)) { case LBRACK: case LBRACE: @@ -755,13 +736,13 @@ class PainlessParser extends Parser { case TYPE: case ID: { - setState(142); + setState(139); trailer(); } break; case SEMICOLON: { - setState(143); + setState(140); empty(); } break; @@ -774,21 +755,21 @@ class PainlessParser extends Parser { _localctx = new EachContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(146); + setState(143); match(FOR); - setState(147); + setState(144); match(LP); - setState(148); + setState(145); decltype(); - setState(149); + setState(146); match(ID); - setState(150); + setState(147); match(COLON); - setState(151); + setState(148); expression(0); - setState(152); + setState(149); match(RP); - setState(153); + setState(150); trailer(); } break; @@ -796,19 +777,19 @@ class PainlessParser extends Parser { _localctx = new IneachContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(155); + setState(152); match(FOR); - setState(156); + setState(153); match(LP); - setState(157); + setState(154); match(ID); - setState(158); + setState(155); match(IN); - setState(159); + setState(156); expression(0); - setState(160); + setState(157); match(RP); - setState(161); + setState(158); trailer(); } break; @@ -816,11 +797,11 @@ class PainlessParser extends Parser { _localctx = new TryContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(163); + setState(160); match(TRY); - setState(164); + setState(161); block(); - setState(166); + setState(163); _errHandler.sync(this); _alt = 1; do { @@ -828,7 +809,7 @@ class PainlessParser extends Parser { case 1: { { - setState(165); + setState(162); trap(); } } @@ -836,9 +817,9 @@ class PainlessParser extends Parser { default: throw new NoViableAltException(this); } - setState(168); + setState(165); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,12,_ctx); + _alt = getInterpreter().adaptivePredict(_input,11,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); } break; @@ -860,7 +841,7 @@ class PainlessParser extends Parser { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_dstatement; } - + public DstatementContext() { } public void copyFrom(DstatementContext ctx) { super.copyFrom(ctx); @@ -953,24 +934,24 @@ class PainlessParser extends Parser { DstatementContext _localctx = new DstatementContext(_ctx, getState()); enterRule(_localctx, 10, RULE_dstatement); try { - setState(187); + setState(184); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,14,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,13,_ctx) ) { case 1: _localctx = new DoContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(172); + setState(169); match(DO); - setState(173); + setState(170); block(); - setState(174); + setState(171); match(WHILE); - setState(175); + setState(172); match(LP); - setState(176); + setState(173); expression(0); - setState(177); + setState(174); match(RP); } break; @@ -978,7 +959,7 @@ class PainlessParser extends Parser { _localctx = new DeclContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(179); + setState(176); declaration(); } break; @@ -986,7 +967,7 @@ class PainlessParser extends Parser { _localctx = new ContinueContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(180); + setState(177); match(CONTINUE); } break; @@ -994,7 +975,7 @@ class PainlessParser extends Parser { _localctx = new BreakContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(181); + setState(178); match(BREAK); } break; @@ -1002,9 +983,9 @@ class PainlessParser extends Parser { _localctx = new ReturnContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(182); + setState(179); match(RETURN); - setState(183); + setState(180); expression(0); } break; @@ -1012,9 +993,9 @@ class PainlessParser extends Parser { _localctx = new ThrowContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(184); + setState(181); match(THROW); - setState(185); + setState(182); expression(0); } break; @@ -1022,7 +1003,7 @@ class PainlessParser extends Parser { _localctx = new ExprContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(186); + setState(183); expression(0); } break; @@ -1061,12 +1042,12 @@ class PainlessParser extends Parser { TrailerContext _localctx = new TrailerContext(_ctx, getState()); enterRule(_localctx, 12, RULE_trailer); try { - setState(191); + setState(188); switch (_input.LA(1)) { case LBRACK: enterOuterAlt(_localctx, 1); { - setState(189); + setState(186); block(); } break; @@ -1101,7 +1082,7 @@ class PainlessParser extends Parser { case ID: enterOuterAlt(_localctx, 2); { - setState(190); + setState(187); statement(); } break; @@ -1151,34 +1132,34 @@ class PainlessParser extends Parser { int _alt; enterOuterAlt(_localctx, 1); { - setState(193); + setState(190); match(LBRACK); - setState(197); + setState(194); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,16,_ctx); + _alt = getInterpreter().adaptivePredict(_input,15,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(194); + setState(191); statement(); } - } + } } - setState(199); + setState(196); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,16,_ctx); + _alt = getInterpreter().adaptivePredict(_input,15,_ctx); } - setState(201); + setState(198); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << DO) | (1L << CONTINUE) | (1L << BREAK) | (1L << RETURN) | (1L << NEW) | (1L << THROW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { { - setState(200); + setState(197); dstatement(); } } - setState(203); + setState(200); match(RBRACK); } } @@ -1212,7 +1193,7 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(205); + setState(202); match(SEMICOLON); } } @@ -1249,20 +1230,20 @@ class PainlessParser extends Parser { InitializerContext _localctx = new InitializerContext(_ctx, getState()); enterRule(_localctx, 18, RULE_initializer); try { - setState(209); + setState(206); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,18,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,17,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(207); + setState(204); declaration(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(208); + setState(205); expression(0); } break; @@ -1300,7 +1281,7 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(211); + setState(208); expression(0); } } @@ -1347,23 +1328,23 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(213); + setState(210); decltype(); - setState(214); + setState(211); declvar(); - setState(219); + setState(216); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(215); + setState(212); match(COMMA); - setState(216); + setState(213); declvar(); } } - setState(221); + setState(218); _errHandler.sync(this); _la = _input.LA(1); } @@ -1408,25 +1389,25 @@ class PainlessParser extends Parser { int _alt; enterOuterAlt(_localctx, 1); { - setState(222); + setState(219); match(TYPE); - setState(227); + setState(224); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,20,_ctx); + _alt = getInterpreter().adaptivePredict(_input,19,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(223); + setState(220); match(LBRACE); - setState(224); + setState(221); match(RBRACE); } - } + } } - setState(229); + setState(226); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,20,_ctx); + _alt = getInterpreter().adaptivePredict(_input,19,_ctx); } } } @@ -1465,15 +1446,15 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(230); + setState(227); match(ID); - setState(233); + setState(230); _la = _input.LA(1); if (_la==ASSIGN) { { - setState(231); + setState(228); match(ASSIGN); - setState(232); + setState(229); expression(0); } } @@ -1517,17 +1498,17 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(235); + setState(232); match(CATCH); - setState(236); + setState(233); match(LP); - setState(237); + setState(234); match(TYPE); - setState(238); + setState(235); match(ID); - setState(239); + setState(236); match(RP); - setState(240); + setState(237); block(); } } @@ -1547,7 +1528,7 @@ class PainlessParser extends Parser { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_expression; } - + public ExpressionContext() { } public void copyFrom(ExpressionContext ctx) { super.copyFrom(ctx); @@ -1723,35 +1704,35 @@ class PainlessParser extends Parser { _ctx = _localctx; _prevctx = _localctx; - setState(243); + setState(240); unary(); } _ctx.stop = _input.LT(-1); - setState(295); + setState(292); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,23,_ctx); + _alt = getInterpreter().adaptivePredict(_input,22,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { if ( _parseListeners!=null ) triggerExitRuleEvent(); _prevctx = _localctx; { - setState(293); + setState(290); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,22,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,21,_ctx) ) { case 1: { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(245); + setState(242); if (!(precpred(_ctx, 15))) throw new FailedPredicateException(this, "precpred(_ctx, 15)"); - setState(246); + setState(243); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << MUL) | (1L << DIV) | (1L << REM))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(247); + setState(244); expression(16); } break; @@ -1759,16 +1740,16 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(248); + setState(245); if (!(precpred(_ctx, 14))) throw new FailedPredicateException(this, "precpred(_ctx, 14)"); - setState(249); + setState(246); _la = _input.LA(1); if ( !(_la==ADD || _la==SUB) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(250); + setState(247); expression(15); } break; @@ -1776,16 +1757,16 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(251); + setState(248); if (!(precpred(_ctx, 13))) throw new FailedPredicateException(this, "precpred(_ctx, 13)"); - setState(252); + setState(249); _la = _input.LA(1); if ( !(_la==FIND || _la==MATCH) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(253); + setState(250); expression(14); } break; @@ -1793,16 +1774,16 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(254); + setState(251); if (!(precpred(_ctx, 12))) throw new FailedPredicateException(this, "precpred(_ctx, 12)"); - setState(255); + setState(252); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LSH) | (1L << RSH) | (1L << USH))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(256); + setState(253); expression(13); } break; @@ -1810,16 +1791,16 @@ class PainlessParser extends Parser { { _localctx = new CompContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(257); + setState(254); if (!(precpred(_ctx, 11))) throw new FailedPredicateException(this, "precpred(_ctx, 11)"); - setState(258); + setState(255); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LT) | (1L << LTE) | (1L << GT) | (1L << GTE))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(259); + setState(256); expression(12); } break; @@ -1827,16 +1808,16 @@ class PainlessParser extends Parser { { _localctx = new CompContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(260); + setState(257); if (!(precpred(_ctx, 9))) throw new FailedPredicateException(this, "precpred(_ctx, 9)"); - setState(261); + setState(258); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << EQ) | (1L << EQR) | (1L << NE) | (1L << NER))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(262); + setState(259); expression(10); } break; @@ -1844,11 +1825,11 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(263); + setState(260); if (!(precpred(_ctx, 8))) throw new FailedPredicateException(this, "precpred(_ctx, 8)"); - setState(264); + setState(261); match(BWAND); - setState(265); + setState(262); expression(9); } break; @@ -1856,11 +1837,11 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(266); + setState(263); if (!(precpred(_ctx, 7))) throw new FailedPredicateException(this, "precpred(_ctx, 7)"); - setState(267); + setState(264); match(XOR); - setState(268); + setState(265); expression(8); } break; @@ -1868,11 +1849,11 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(269); + setState(266); if (!(precpred(_ctx, 6))) throw new FailedPredicateException(this, "precpred(_ctx, 6)"); - setState(270); + setState(267); match(BWOR); - setState(271); + setState(268); expression(7); } break; @@ -1880,11 +1861,11 @@ class PainlessParser extends Parser { { _localctx = new BoolContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(272); + setState(269); if (!(precpred(_ctx, 5))) throw new FailedPredicateException(this, "precpred(_ctx, 5)"); - setState(273); + setState(270); match(BOOLAND); - setState(274); + setState(271); expression(6); } break; @@ -1892,11 +1873,11 @@ class PainlessParser extends Parser { { _localctx = new BoolContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(275); + setState(272); if (!(precpred(_ctx, 4))) throw new FailedPredicateException(this, "precpred(_ctx, 4)"); - setState(276); + setState(273); match(BOOLOR); - setState(277); + setState(274); expression(5); } break; @@ -1904,15 +1885,15 @@ class PainlessParser extends Parser { { _localctx = new ConditionalContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(278); + setState(275); if (!(precpred(_ctx, 3))) throw new FailedPredicateException(this, "precpred(_ctx, 3)"); - setState(279); + setState(276); match(COND); - setState(280); + setState(277); expression(0); - setState(281); + setState(278); match(COLON); - setState(282); + setState(279); expression(3); } break; @@ -1920,11 +1901,11 @@ class PainlessParser extends Parser { { _localctx = new ElvisContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(284); + setState(281); if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); - setState(285); + setState(282); match(ELVIS); - setState(286); + setState(283); expression(2); } break; @@ -1932,16 +1913,16 @@ class PainlessParser extends Parser { { _localctx = new AssignmentContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(287); + setState(284); if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); - setState(288); + setState(285); _la = _input.LA(1); if ( !(((((_la - 60)) & ~0x3f) == 0 && ((1L << (_la - 60)) & ((1L << (ASSIGN - 60)) | (1L << (AADD - 60)) | (1L << (ASUB - 60)) | (1L << (AMUL - 60)) | (1L << (ADIV - 60)) | (1L << (AREM - 60)) | (1L << (AAND - 60)) | (1L << (AXOR - 60)) | (1L << (AOR - 60)) | (1L << (ALSH - 60)) | (1L << (ARSH - 60)) | (1L << (AUSH - 60)))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(289); + setState(286); expression(1); } break; @@ -1949,20 +1930,20 @@ class PainlessParser extends Parser { { _localctx = new InstanceofContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(290); + setState(287); if (!(precpred(_ctx, 10))) throw new FailedPredicateException(this, "precpred(_ctx, 10)"); - setState(291); + setState(288); match(INSTANCEOF); - setState(292); + setState(289); decltype(); } break; } - } + } } - setState(297); + setState(294); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,23,_ctx); + _alt = getInterpreter().adaptivePredict(_input,22,_ctx); } } } @@ -1982,7 +1963,7 @@ class PainlessParser extends Parser { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_unary; } - + public UnaryContext() { } public void copyFrom(UnaryContext ctx) { super.copyFrom(ctx); @@ -2062,21 +2043,21 @@ class PainlessParser extends Parser { enterRule(_localctx, 32, RULE_unary); int _la; try { - setState(311); + setState(308); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,24,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,23,_ctx) ) { case 1: _localctx = new PreContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(298); + setState(295); _la = _input.LA(1); if ( !(_la==INCR || _la==DECR) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(299); + setState(296); chain(); } break; @@ -2084,9 +2065,9 @@ class PainlessParser extends Parser { _localctx = new PostContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(300); + setState(297); chain(); - setState(301); + setState(298); _la = _input.LA(1); if ( !(_la==INCR || _la==DECR) ) { _errHandler.recoverInline(this); @@ -2099,7 +2080,7 @@ class PainlessParser extends Parser { _localctx = new ReadContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(303); + setState(300); chain(); } break; @@ -2107,14 +2088,14 @@ class PainlessParser extends Parser { _localctx = new OperatorContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(304); + setState(301); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(305); + setState(302); unary(); } break; @@ -2122,13 +2103,13 @@ class PainlessParser extends Parser { _localctx = new CastContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(306); + setState(303); match(LP); - setState(307); + setState(304); decltype(); - setState(308); + setState(305); match(RP); - setState(309); + setState(306); unary(); } break; @@ -2150,7 +2131,7 @@ class PainlessParser extends Parser { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_chain; } - + public ChainContext() { } public void copyFrom(ChainContext ctx) { super.copyFrom(ctx); @@ -2210,30 +2191,30 @@ class PainlessParser extends Parser { enterRule(_localctx, 34, RULE_chain); try { int _alt; - setState(329); + setState(326); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,27,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,26,_ctx) ) { case 1: _localctx = new DynamicContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(313); + setState(310); primary(); - setState(317); + setState(314); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,25,_ctx); + _alt = getInterpreter().adaptivePredict(_input,24,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(314); + setState(311); postfix(); } - } + } } - setState(319); + setState(316); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,25,_ctx); + _alt = getInterpreter().adaptivePredict(_input,24,_ctx); } } break; @@ -2241,25 +2222,25 @@ class PainlessParser extends Parser { _localctx = new StaticContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(320); + setState(317); decltype(); - setState(321); + setState(318); postdot(); - setState(325); + setState(322); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,26,_ctx); + _alt = getInterpreter().adaptivePredict(_input,25,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(322); + setState(319); postfix(); } - } + } } - setState(327); + setState(324); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,26,_ctx); + _alt = getInterpreter().adaptivePredict(_input,25,_ctx); } } break; @@ -2267,7 +2248,7 @@ class PainlessParser extends Parser { _localctx = new NewarrayContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(328); + setState(325); arrayinitializer(); } break; @@ -2289,7 +2270,7 @@ class PainlessParser extends Parser { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_primary; } - + public PrimaryContext() { } public void copyFrom(PrimaryContext ctx) { super.copyFrom(ctx); @@ -2427,18 +2408,18 @@ class PainlessParser extends Parser { enterRule(_localctx, 36, RULE_primary); int _la; try { - setState(349); + setState(346); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,28,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,27,_ctx) ) { case 1: _localctx = new PrecedenceContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(331); + setState(328); match(LP); - setState(332); + setState(329); expression(0); - setState(333); + setState(330); match(RP); } break; @@ -2446,7 +2427,7 @@ class PainlessParser extends Parser { _localctx = new NumericContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(335); + setState(332); _la = _input.LA(1); if ( !(((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)))) != 0)) ) { _errHandler.recoverInline(this); @@ -2459,7 +2440,7 @@ class PainlessParser extends Parser { _localctx = new TrueContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(336); + setState(333); match(TRUE); } break; @@ -2467,7 +2448,7 @@ class PainlessParser extends Parser { _localctx = new FalseContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(337); + setState(334); match(FALSE); } break; @@ -2475,7 +2456,7 @@ class PainlessParser extends Parser { _localctx = new NullContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(338); + setState(335); match(NULL); } break; @@ -2483,7 +2464,7 @@ class PainlessParser extends Parser { _localctx = new StringContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(339); + setState(336); match(STRING); } break; @@ -2491,7 +2472,7 @@ class PainlessParser extends Parser { _localctx = new RegexContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(340); + setState(337); match(REGEX); } break; @@ -2499,7 +2480,7 @@ class PainlessParser extends Parser { _localctx = new ListinitContext(_localctx); enterOuterAlt(_localctx, 8); { - setState(341); + setState(338); listinitializer(); } break; @@ -2507,7 +2488,7 @@ class PainlessParser extends Parser { _localctx = new MapinitContext(_localctx); enterOuterAlt(_localctx, 9); { - setState(342); + setState(339); mapinitializer(); } break; @@ -2515,7 +2496,7 @@ class PainlessParser extends Parser { _localctx = new VariableContext(_localctx); enterOuterAlt(_localctx, 10); { - setState(343); + setState(340); match(ID); } break; @@ -2523,9 +2504,9 @@ class PainlessParser extends Parser { _localctx = new CalllocalContext(_localctx); enterOuterAlt(_localctx, 11); { - setState(344); + setState(341); match(ID); - setState(345); + setState(342); arguments(); } break; @@ -2533,11 +2514,11 @@ class PainlessParser extends Parser { _localctx = new NewobjectContext(_localctx); enterOuterAlt(_localctx, 12); { - setState(346); + setState(343); match(NEW); - setState(347); + setState(344); match(TYPE); - setState(348); + setState(345); arguments(); } break; @@ -2579,27 +2560,27 @@ class PainlessParser extends Parser { PostfixContext _localctx = new PostfixContext(_ctx, getState()); enterRule(_localctx, 38, RULE_postfix); try { - setState(354); + setState(351); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,29,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,28,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(351); + setState(348); callinvoke(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(352); + setState(349); fieldaccess(); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(353); + setState(350); braceaccess(); } break; @@ -2638,20 +2619,20 @@ class PainlessParser extends Parser { PostdotContext _localctx = new PostdotContext(_ctx, getState()); enterRule(_localctx, 40, RULE_postdot); try { - setState(358); + setState(355); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,30,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,29,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(356); + setState(353); callinvoke(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(357); + setState(354); fieldaccess(); } break; @@ -2693,16 +2674,16 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(360); + setState(357); _la = _input.LA(1); if ( !(_la==DOT || _la==NSDOT) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(361); + setState(358); match(DOTID); - setState(362); + setState(359); arguments(); } } @@ -2740,14 +2721,14 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(364); + setState(361); _la = _input.LA(1); if ( !(_la==DOT || _la==NSDOT) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(365); + setState(362); _la = _input.LA(1); if ( !(_la==DOTINTEGER || _la==DOTID) ) { _errHandler.recoverInline(this); @@ -2790,11 +2771,11 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(367); + setState(364); match(LBRACE); - setState(368); + setState(365); expression(0); - setState(369); + setState(366); match(RBRACE); } } @@ -2814,7 +2795,7 @@ class PainlessParser extends Parser { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_arrayinitializer; } - + public ArrayinitializerContext() { } public void copyFrom(ArrayinitializerContext ctx) { super.copyFrom(ctx); @@ -2890,18 +2871,18 @@ class PainlessParser extends Parser { int _la; try { int _alt; - setState(412); + setState(409); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,37,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,36,_ctx) ) { case 1: _localctx = new NewstandardarrayContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(371); + setState(368); match(NEW); - setState(372); + setState(369); match(TYPE); - setState(377); + setState(374); _errHandler.sync(this); _alt = 1; do { @@ -2909,11 +2890,11 @@ class PainlessParser extends Parser { case 1: { { - setState(373); + setState(370); match(LBRACE); - setState(374); + setState(371); expression(0); - setState(375); + setState(372); match(RBRACE); } } @@ -2921,32 +2902,32 @@ class PainlessParser extends Parser { default: throw new NoViableAltException(this); } - setState(379); + setState(376); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,31,_ctx); + _alt = getInterpreter().adaptivePredict(_input,30,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); - setState(388); + setState(385); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,33,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,32,_ctx) ) { case 1: { - setState(381); + setState(378); postdot(); - setState(385); + setState(382); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,32,_ctx); + _alt = getInterpreter().adaptivePredict(_input,31,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(382); + setState(379); postfix(); } - } + } } - setState(387); + setState(384); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,32,_ctx); + _alt = getInterpreter().adaptivePredict(_input,31,_ctx); } } break; @@ -2957,58 +2938,58 @@ class PainlessParser extends Parser { _localctx = new NewinitializedarrayContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(390); + setState(387); match(NEW); - setState(391); + setState(388); match(TYPE); - setState(392); + setState(389); match(LBRACE); - setState(393); + setState(390); match(RBRACE); - setState(394); + setState(391); match(LBRACK); - setState(403); + setState(400); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { { - setState(395); + setState(392); expression(0); - setState(400); + setState(397); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(396); + setState(393); match(COMMA); - setState(397); + setState(394); expression(0); } } - setState(402); + setState(399); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(405); + setState(402); match(RBRACK); - setState(409); + setState(406); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,36,_ctx); + _alt = getInterpreter().adaptivePredict(_input,35,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(406); + setState(403); postfix(); } - } + } } - setState(411); + setState(408); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,36,_ctx); + _alt = getInterpreter().adaptivePredict(_input,35,_ctx); } } break; @@ -3054,42 +3035,42 @@ class PainlessParser extends Parser { enterRule(_localctx, 50, RULE_listinitializer); int _la; try { - setState(427); + setState(424); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,39,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,38,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(414); + setState(411); match(LBRACE); - setState(415); + setState(412); expression(0); - setState(420); + setState(417); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(416); + setState(413); match(COMMA); - setState(417); + setState(414); expression(0); } } - setState(422); + setState(419); _errHandler.sync(this); _la = _input.LA(1); } - setState(423); + setState(420); match(RBRACE); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(425); + setState(422); match(LBRACE); - setState(426); + setState(423); match(RBRACE); } break; @@ -3136,44 +3117,44 @@ class PainlessParser extends Parser { enterRule(_localctx, 52, RULE_mapinitializer); int _la; try { - setState(443); + setState(440); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,41,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,40,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(429); + setState(426); match(LBRACE); - setState(430); + setState(427); maptoken(); - setState(435); + setState(432); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(431); + setState(428); match(COMMA); - setState(432); + setState(429); maptoken(); } } - setState(437); + setState(434); _errHandler.sync(this); _la = _input.LA(1); } - setState(438); + setState(435); match(RBRACE); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(440); + setState(437); match(LBRACE); - setState(441); + setState(438); match(COLON); - setState(442); + setState(439); match(RBRACE); } break; @@ -3215,11 +3196,11 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(445); + setState(442); expression(0); - setState(446); + setState(443); match(COLON); - setState(447); + setState(444); expression(0); } } @@ -3266,34 +3247,34 @@ class PainlessParser extends Parser { enterOuterAlt(_localctx, 1); { { - setState(449); + setState(446); match(LP); - setState(458); + setState(455); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << THIS) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { { - setState(450); + setState(447); argument(); - setState(455); + setState(452); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(451); + setState(448); match(COMMA); - setState(452); + setState(449); argument(); } } - setState(457); + setState(454); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(460); + setState(457); match(RP); } } @@ -3334,27 +3315,27 @@ class PainlessParser extends Parser { ArgumentContext _localctx = new ArgumentContext(_ctx, getState()); enterRule(_localctx, 58, RULE_argument); try { - setState(465); + setState(462); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,44,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,43,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(462); + setState(459); expression(0); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(463); + setState(460); lambda(); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(464); + setState(461); funcref(); } break; @@ -3409,58 +3390,58 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(480); + setState(477); switch (_input.LA(1)) { case TYPE: case ID: { - setState(467); + setState(464); lamtype(); } break; case LP: { - setState(468); + setState(465); match(LP); - setState(477); + setState(474); _la = _input.LA(1); if (_la==TYPE || _la==ID) { { - setState(469); + setState(466); lamtype(); - setState(474); + setState(471); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(470); + setState(467); match(COMMA); - setState(471); + setState(468); lamtype(); } } - setState(476); + setState(473); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(479); + setState(476); match(RP); } break; default: throw new NoViableAltException(this); } - setState(482); + setState(479); match(ARROW); - setState(485); + setState(482); switch (_input.LA(1)) { case LBRACK: { - setState(483); + setState(480); block(); } break; @@ -3485,7 +3466,7 @@ class PainlessParser extends Parser { case TYPE: case ID: { - setState(484); + setState(481); expression(0); } break; @@ -3528,16 +3509,16 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(488); + setState(485); _la = _input.LA(1); if (_la==TYPE) { { - setState(487); + setState(484); decltype(); } } - setState(490); + setState(487); match(ID); } } @@ -3557,7 +3538,7 @@ class PainlessParser extends Parser { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_funcref; } - + public FuncrefContext() { } public void copyFrom(FuncrefContext ctx) { super.copyFrom(ctx); @@ -3616,18 +3597,18 @@ class PainlessParser extends Parser { FuncrefContext _localctx = new FuncrefContext(_ctx, getState()); enterRule(_localctx, 64, RULE_funcref); try { - setState(505); + setState(502); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,50,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,49,_ctx) ) { case 1: _localctx = new ClassfuncrefContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(492); + setState(489); match(TYPE); - setState(493); + setState(490); match(REF); - setState(494); + setState(491); match(ID); } break; @@ -3635,11 +3616,11 @@ class PainlessParser extends Parser { _localctx = new ConstructorfuncrefContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(495); + setState(492); decltype(); - setState(496); + setState(493); match(REF); - setState(497); + setState(494); match(NEW); } break; @@ -3647,11 +3628,11 @@ class PainlessParser extends Parser { _localctx = new CapturingfuncrefContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(499); + setState(496); match(ID); - setState(500); + setState(497); match(REF); - setState(501); + setState(498); match(ID); } break; @@ -3659,11 +3640,11 @@ class PainlessParser extends Parser { _localctx = new LocalfuncrefContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(502); + setState(499); match(THIS); - setState(503); + setState(500); match(REF); - setState(504); + setState(501); match(ID); } break; @@ -3733,200 +3714,198 @@ class PainlessParser extends Parser { } public static final String _serializedATN = - "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3V\u01fe\4\2\t\2\4"+ + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3V\u01fb\4\2\t\2\4"+ "\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t"+ "\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+ "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+ "\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t \4!"+ "\t!\4\"\t\"\3\2\7\2F\n\2\f\2\16\2I\13\2\3\2\7\2L\n\2\f\2\16\2O\13\2\3"+ - "\2\5\2R\n\2\3\2\3\2\3\3\3\3\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7"+ - "\4b\n\4\f\4\16\4e\13\4\5\4g\n\4\3\4\3\4\3\5\3\5\3\5\3\5\5\5o\n\5\3\6\3"+ - "\6\3\6\3\6\3\6\3\6\3\6\3\6\5\6y\n\6\3\6\3\6\3\6\3\6\3\6\3\6\5\6\u0081"+ - "\n\6\3\6\3\6\3\6\5\6\u0086\n\6\3\6\3\6\5\6\u008a\n\6\3\6\3\6\5\6\u008e"+ - "\n\6\3\6\3\6\3\6\5\6\u0093\n\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6"+ - "\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\6\6\u00a9\n\6\r\6\16\6\u00aa"+ - "\5\6\u00ad\n\6\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7"+ - "\3\7\5\7\u00be\n\7\3\b\3\b\5\b\u00c2\n\b\3\t\3\t\7\t\u00c6\n\t\f\t\16"+ - "\t\u00c9\13\t\3\t\5\t\u00cc\n\t\3\t\3\t\3\n\3\n\3\13\3\13\5\13\u00d4\n"+ - "\13\3\f\3\f\3\r\3\r\3\r\3\r\7\r\u00dc\n\r\f\r\16\r\u00df\13\r\3\16\3\16"+ - "\3\16\7\16\u00e4\n\16\f\16\16\16\u00e7\13\16\3\17\3\17\3\17\5\17\u00ec"+ - "\n\17\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\21\3\21"+ + "\2\3\2\3\3\3\3\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4_\n\4\f\4\16"+ + "\4b\13\4\5\4d\n\4\3\4\3\4\3\5\3\5\3\5\3\5\5\5l\n\5\3\6\3\6\3\6\3\6\3\6"+ + "\3\6\3\6\3\6\5\6v\n\6\3\6\3\6\3\6\3\6\3\6\3\6\5\6~\n\6\3\6\3\6\3\6\5\6"+ + "\u0083\n\6\3\6\3\6\5\6\u0087\n\6\3\6\3\6\5\6\u008b\n\6\3\6\3\6\3\6\5\6"+ + "\u0090\n\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6"+ + "\3\6\3\6\3\6\3\6\3\6\6\6\u00a6\n\6\r\6\16\6\u00a7\5\6\u00aa\n\6\3\7\3"+ + "\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u00bb\n\7\3"+ + "\b\3\b\5\b\u00bf\n\b\3\t\3\t\7\t\u00c3\n\t\f\t\16\t\u00c6\13\t\3\t\5\t"+ + "\u00c9\n\t\3\t\3\t\3\n\3\n\3\13\3\13\5\13\u00d1\n\13\3\f\3\f\3\r\3\r\3"+ + "\r\3\r\7\r\u00d9\n\r\f\r\16\r\u00dc\13\r\3\16\3\16\3\16\7\16\u00e1\n\16"+ + "\f\16\16\16\u00e4\13\16\3\17\3\17\3\17\5\17\u00e9\n\17\3\20\3\20\3\20"+ + "\3\20\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ - "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ - "\3\21\3\21\3\21\7\21\u0128\n\21\f\21\16\21\u012b\13\21\3\22\3\22\3\22"+ - "\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\5\22\u013a\n\22\3\23"+ - "\3\23\7\23\u013e\n\23\f\23\16\23\u0141\13\23\3\23\3\23\3\23\7\23\u0146"+ - "\n\23\f\23\16\23\u0149\13\23\3\23\5\23\u014c\n\23\3\24\3\24\3\24\3\24"+ - "\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24"+ - "\5\24\u0160\n\24\3\25\3\25\3\25\5\25\u0165\n\25\3\26\3\26\5\26\u0169\n"+ - "\26\3\27\3\27\3\27\3\27\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\32\3\32\3"+ - "\32\3\32\3\32\3\32\6\32\u017c\n\32\r\32\16\32\u017d\3\32\3\32\7\32\u0182"+ - "\n\32\f\32\16\32\u0185\13\32\5\32\u0187\n\32\3\32\3\32\3\32\3\32\3\32"+ - "\3\32\3\32\3\32\7\32\u0191\n\32\f\32\16\32\u0194\13\32\5\32\u0196\n\32"+ - "\3\32\3\32\7\32\u019a\n\32\f\32\16\32\u019d\13\32\5\32\u019f\n\32\3\33"+ - "\3\33\3\33\3\33\7\33\u01a5\n\33\f\33\16\33\u01a8\13\33\3\33\3\33\3\33"+ - "\3\33\5\33\u01ae\n\33\3\34\3\34\3\34\3\34\7\34\u01b4\n\34\f\34\16\34\u01b7"+ - "\13\34\3\34\3\34\3\34\3\34\3\34\5\34\u01be\n\34\3\35\3\35\3\35\3\35\3"+ - "\36\3\36\3\36\3\36\7\36\u01c8\n\36\f\36\16\36\u01cb\13\36\5\36\u01cd\n"+ - "\36\3\36\3\36\3\37\3\37\3\37\5\37\u01d4\n\37\3 \3 \3 \3 \3 \7 \u01db\n"+ - " \f \16 \u01de\13 \5 \u01e0\n \3 \5 \u01e3\n \3 \3 \3 \5 \u01e8\n \3!"+ - "\5!\u01eb\n!\3!\3!\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\""+ - "\5\"\u01fc\n\"\3\"\2\3 #\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&("+ - "*,.\60\62\64\668:<>@B\2\16\3\2 \"\3\2#$\3\2:;\3\2%\'\3\2(+\3\2,/\3\2>"+ - "I\3\2<=\4\2\36\37#$\3\2JM\3\2\13\f\3\2UV\u0237\2G\3\2\2\2\4U\3\2\2\2\6"+ - "Z\3\2\2\2\bn\3\2\2\2\n\u00ac\3\2\2\2\f\u00bd\3\2\2\2\16\u00c1\3\2\2\2"+ - "\20\u00c3\3\2\2\2\22\u00cf\3\2\2\2\24\u00d3\3\2\2\2\26\u00d5\3\2\2\2\30"+ - "\u00d7\3\2\2\2\32\u00e0\3\2\2\2\34\u00e8\3\2\2\2\36\u00ed\3\2\2\2 \u00f4"+ - "\3\2\2\2\"\u0139\3\2\2\2$\u014b\3\2\2\2&\u015f\3\2\2\2(\u0164\3\2\2\2"+ - "*\u0168\3\2\2\2,\u016a\3\2\2\2.\u016e\3\2\2\2\60\u0171\3\2\2\2\62\u019e"+ - "\3\2\2\2\64\u01ad\3\2\2\2\66\u01bd\3\2\2\28\u01bf\3\2\2\2:\u01c3\3\2\2"+ - "\2<\u01d3\3\2\2\2>\u01e2\3\2\2\2@\u01ea\3\2\2\2B\u01fb\3\2\2\2DF\5\4\3"+ - "\2ED\3\2\2\2FI\3\2\2\2GE\3\2\2\2GH\3\2\2\2HM\3\2\2\2IG\3\2\2\2JL\5\b\5"+ - "\2KJ\3\2\2\2LO\3\2\2\2MK\3\2\2\2MN\3\2\2\2NQ\3\2\2\2OM\3\2\2\2PR\5\f\7"+ - "\2QP\3\2\2\2QR\3\2\2\2RS\3\2\2\2ST\7\2\2\3T\3\3\2\2\2UV\5\32\16\2VW\7"+ - "T\2\2WX\5\6\4\2XY\5\20\t\2Y\5\3\2\2\2Zf\7\t\2\2[\\\5\32\16\2\\c\7T\2\2"+ - "]^\7\r\2\2^_\5\32\16\2_`\7T\2\2`b\3\2\2\2a]\3\2\2\2be\3\2\2\2ca\3\2\2"+ - "\2cd\3\2\2\2dg\3\2\2\2ec\3\2\2\2f[\3\2\2\2fg\3\2\2\2gh\3\2\2\2hi\7\n\2"+ - "\2i\7\3\2\2\2jo\5\n\6\2kl\5\f\7\2lm\7\16\2\2mo\3\2\2\2nj\3\2\2\2nk\3\2"+ - "\2\2o\t\3\2\2\2pq\7\17\2\2qr\7\t\2\2rs\5 \21\2st\7\n\2\2tx\5\16\b\2uv"+ - "\7\21\2\2vy\5\16\b\2wy\6\6\2\2xu\3\2\2\2xw\3\2\2\2y\u00ad\3\2\2\2z{\7"+ - "\22\2\2{|\7\t\2\2|}\5 \21\2}\u0080\7\n\2\2~\u0081\5\16\b\2\177\u0081\5"+ - "\22\n\2\u0080~\3\2\2\2\u0080\177\3\2\2\2\u0081\u00ad\3\2\2\2\u0082\u0083"+ - "\7\24\2\2\u0083\u0085\7\t\2\2\u0084\u0086\5\24\13\2\u0085\u0084\3\2\2"+ - "\2\u0085\u0086\3\2\2\2\u0086\u0087\3\2\2\2\u0087\u0089\7\16\2\2\u0088"+ - "\u008a\5 \21\2\u0089\u0088\3\2\2\2\u0089\u008a\3\2\2\2\u008a\u008b\3\2"+ - "\2\2\u008b\u008d\7\16\2\2\u008c\u008e\5\26\f\2\u008d\u008c\3\2\2\2\u008d"+ - "\u008e\3\2\2\2\u008e\u008f\3\2\2\2\u008f\u0092\7\n\2\2\u0090\u0093\5\16"+ - "\b\2\u0091\u0093\5\22\n\2\u0092\u0090\3\2\2\2\u0092\u0091\3\2\2\2\u0093"+ - "\u00ad\3\2\2\2\u0094\u0095\7\24\2\2\u0095\u0096\7\t\2\2\u0096\u0097\5"+ - "\32\16\2\u0097\u0098\7T\2\2\u0098\u0099\7\66\2\2\u0099\u009a\5 \21\2\u009a"+ - "\u009b\7\n\2\2\u009b\u009c\5\16\b\2\u009c\u00ad\3\2\2\2\u009d\u009e\7"+ - "\24\2\2\u009e\u009f\7\t\2\2\u009f\u00a0\7T\2\2\u00a0\u00a1\7\20\2\2\u00a1"+ - "\u00a2\5 \21\2\u00a2\u00a3\7\n\2\2\u00a3\u00a4\5\16\b\2\u00a4\u00ad\3"+ - "\2\2\2\u00a5\u00a6\7\31\2\2\u00a6\u00a8\5\20\t\2\u00a7\u00a9\5\36\20\2"+ - "\u00a8\u00a7\3\2\2\2\u00a9\u00aa\3\2\2\2\u00aa\u00a8\3\2\2\2\u00aa\u00ab"+ - "\3\2\2\2\u00ab\u00ad\3\2\2\2\u00acp\3\2\2\2\u00acz\3\2\2\2\u00ac\u0082"+ - "\3\2\2\2\u00ac\u0094\3\2\2\2\u00ac\u009d\3\2\2\2\u00ac\u00a5\3\2\2\2\u00ad"+ - "\13\3\2\2\2\u00ae\u00af\7\23\2\2\u00af\u00b0\5\20\t\2\u00b0\u00b1\7\22"+ - "\2\2\u00b1\u00b2\7\t\2\2\u00b2\u00b3\5 \21\2\u00b3\u00b4\7\n\2\2\u00b4"+ - "\u00be\3\2\2\2\u00b5\u00be\5\30\r\2\u00b6\u00be\7\25\2\2\u00b7\u00be\7"+ - "\26\2\2\u00b8\u00b9\7\27\2\2\u00b9\u00be\5 \21\2\u00ba\u00bb\7\33\2\2"+ - "\u00bb\u00be\5 \21\2\u00bc\u00be\5 \21\2\u00bd\u00ae\3\2\2\2\u00bd\u00b5"+ - "\3\2\2\2\u00bd\u00b6\3\2\2\2\u00bd\u00b7\3\2\2\2\u00bd\u00b8\3\2\2\2\u00bd"+ - "\u00ba\3\2\2\2\u00bd\u00bc\3\2\2\2\u00be\r\3\2\2\2\u00bf\u00c2\5\20\t"+ - "\2\u00c0\u00c2\5\b\5\2\u00c1\u00bf\3\2\2\2\u00c1\u00c0\3\2\2\2\u00c2\17"+ - "\3\2\2\2\u00c3\u00c7\7\5\2\2\u00c4\u00c6\5\b\5\2\u00c5\u00c4\3\2\2\2\u00c6"+ - "\u00c9\3\2\2\2\u00c7\u00c5\3\2\2\2\u00c7\u00c8\3\2\2\2\u00c8\u00cb\3\2"+ - "\2\2\u00c9\u00c7\3\2\2\2\u00ca\u00cc\5\f\7\2\u00cb\u00ca\3\2\2\2\u00cb"+ - "\u00cc\3\2\2\2\u00cc\u00cd\3\2\2\2\u00cd\u00ce\7\6\2\2\u00ce\21\3\2\2"+ - "\2\u00cf\u00d0\7\16\2\2\u00d0\23\3\2\2\2\u00d1\u00d4\5\30\r\2\u00d2\u00d4"+ - "\5 \21\2\u00d3\u00d1\3\2\2\2\u00d3\u00d2\3\2\2\2\u00d4\25\3\2\2\2\u00d5"+ - "\u00d6\5 \21\2\u00d6\27\3\2\2\2\u00d7\u00d8\5\32\16\2\u00d8\u00dd\5\34"+ - "\17\2\u00d9\u00da\7\r\2\2\u00da\u00dc\5\34\17\2\u00db\u00d9\3\2\2\2\u00dc"+ - "\u00df\3\2\2\2\u00dd\u00db\3\2\2\2\u00dd\u00de\3\2\2\2\u00de\31\3\2\2"+ - "\2\u00df\u00dd\3\2\2\2\u00e0\u00e5\7S\2\2\u00e1\u00e2\7\7\2\2\u00e2\u00e4"+ - "\7\b\2\2\u00e3\u00e1\3\2\2\2\u00e4\u00e7\3\2\2\2\u00e5\u00e3\3\2\2\2\u00e5"+ - "\u00e6\3\2\2\2\u00e6\33\3\2\2\2\u00e7\u00e5\3\2\2\2\u00e8\u00eb\7T\2\2"+ - "\u00e9\u00ea\7>\2\2\u00ea\u00ec\5 \21\2\u00eb\u00e9\3\2\2\2\u00eb\u00ec"+ - "\3\2\2\2\u00ec\35\3\2\2\2\u00ed\u00ee\7\32\2\2\u00ee\u00ef\7\t\2\2\u00ef"+ - "\u00f0\7S\2\2\u00f0\u00f1\7T\2\2\u00f1\u00f2\7\n\2\2\u00f2\u00f3\5\20"+ - "\t\2\u00f3\37\3\2\2\2\u00f4\u00f5\b\21\1\2\u00f5\u00f6\5\"\22\2\u00f6"+ - "\u0129\3\2\2\2\u00f7\u00f8\f\21\2\2\u00f8\u00f9\t\2\2\2\u00f9\u0128\5"+ - " \21\22\u00fa\u00fb\f\20\2\2\u00fb\u00fc\t\3\2\2\u00fc\u0128\5 \21\21"+ - "\u00fd\u00fe\f\17\2\2\u00fe\u00ff\t\4\2\2\u00ff\u0128\5 \21\20\u0100\u0101"+ - "\f\16\2\2\u0101\u0102\t\5\2\2\u0102\u0128\5 \21\17\u0103\u0104\f\r\2\2"+ - "\u0104\u0105\t\6\2\2\u0105\u0128\5 \21\16\u0106\u0107\f\13\2\2\u0107\u0108"+ - "\t\7\2\2\u0108\u0128\5 \21\f\u0109\u010a\f\n\2\2\u010a\u010b\7\60\2\2"+ - "\u010b\u0128\5 \21\13\u010c\u010d\f\t\2\2\u010d\u010e\7\61\2\2\u010e\u0128"+ - "\5 \21\n\u010f\u0110\f\b\2\2\u0110\u0111\7\62\2\2\u0111\u0128\5 \21\t"+ - "\u0112\u0113\f\7\2\2\u0113\u0114\7\63\2\2\u0114\u0128\5 \21\b\u0115\u0116"+ - "\f\6\2\2\u0116\u0117\7\64\2\2\u0117\u0128\5 \21\7\u0118\u0119\f\5\2\2"+ - "\u0119\u011a\7\65\2\2\u011a\u011b\5 \21\2\u011b\u011c\7\66\2\2\u011c\u011d"+ - "\5 \21\5\u011d\u0128\3\2\2\2\u011e\u011f\f\4\2\2\u011f\u0120\7\67\2\2"+ - "\u0120\u0128\5 \21\4\u0121\u0122\f\3\2\2\u0122\u0123\t\b\2\2\u0123\u0128"+ - "\5 \21\3\u0124\u0125\f\f\2\2\u0125\u0126\7\35\2\2\u0126\u0128\5\32\16"+ - "\2\u0127\u00f7\3\2\2\2\u0127\u00fa\3\2\2\2\u0127\u00fd\3\2\2\2\u0127\u0100"+ - "\3\2\2\2\u0127\u0103\3\2\2\2\u0127\u0106\3\2\2\2\u0127\u0109\3\2\2\2\u0127"+ - "\u010c\3\2\2\2\u0127\u010f\3\2\2\2\u0127\u0112\3\2\2\2\u0127\u0115\3\2"+ - "\2\2\u0127\u0118\3\2\2\2\u0127\u011e\3\2\2\2\u0127\u0121\3\2\2\2\u0127"+ - "\u0124\3\2\2\2\u0128\u012b\3\2\2\2\u0129\u0127\3\2\2\2\u0129\u012a\3\2"+ - "\2\2\u012a!\3\2\2\2\u012b\u0129\3\2\2\2\u012c\u012d\t\t\2\2\u012d\u013a"+ - "\5$\23\2\u012e\u012f\5$\23\2\u012f\u0130\t\t\2\2\u0130\u013a\3\2\2\2\u0131"+ - "\u013a\5$\23\2\u0132\u0133\t\n\2\2\u0133\u013a\5\"\22\2\u0134\u0135\7"+ - "\t\2\2\u0135\u0136\5\32\16\2\u0136\u0137\7\n\2\2\u0137\u0138\5\"\22\2"+ - "\u0138\u013a\3\2\2\2\u0139\u012c\3\2\2\2\u0139\u012e\3\2\2\2\u0139\u0131"+ - "\3\2\2\2\u0139\u0132\3\2\2\2\u0139\u0134\3\2\2\2\u013a#\3\2\2\2\u013b"+ - "\u013f\5&\24\2\u013c\u013e\5(\25\2\u013d\u013c\3\2\2\2\u013e\u0141\3\2"+ - "\2\2\u013f\u013d\3\2\2\2\u013f\u0140\3\2\2\2\u0140\u014c\3\2\2\2\u0141"+ - "\u013f\3\2\2\2\u0142\u0143\5\32\16\2\u0143\u0147\5*\26\2\u0144\u0146\5"+ - "(\25\2\u0145\u0144\3\2\2\2\u0146\u0149\3\2\2\2\u0147\u0145\3\2\2\2\u0147"+ - "\u0148\3\2\2\2\u0148\u014c\3\2\2\2\u0149\u0147\3\2\2\2\u014a\u014c\5\62"+ - "\32\2\u014b\u013b\3\2\2\2\u014b\u0142\3\2\2\2\u014b\u014a\3\2\2\2\u014c"+ - "%\3\2\2\2\u014d\u014e\7\t\2\2\u014e\u014f\5 \21\2\u014f\u0150\7\n\2\2"+ - "\u0150\u0160\3\2\2\2\u0151\u0160\t\13\2\2\u0152\u0160\7P\2\2\u0153\u0160"+ - "\7Q\2\2\u0154\u0160\7R\2\2\u0155\u0160\7N\2\2\u0156\u0160\7O\2\2\u0157"+ - "\u0160\5\64\33\2\u0158\u0160\5\66\34\2\u0159\u0160\7T\2\2\u015a\u015b"+ - "\7T\2\2\u015b\u0160\5:\36\2\u015c\u015d\7\30\2\2\u015d\u015e\7S\2\2\u015e"+ - "\u0160\5:\36\2\u015f\u014d\3\2\2\2\u015f\u0151\3\2\2\2\u015f\u0152\3\2"+ - "\2\2\u015f\u0153\3\2\2\2\u015f\u0154\3\2\2\2\u015f\u0155\3\2\2\2\u015f"+ - "\u0156\3\2\2\2\u015f\u0157\3\2\2\2\u015f\u0158\3\2\2\2\u015f\u0159\3\2"+ - "\2\2\u015f\u015a\3\2\2\2\u015f\u015c\3\2\2\2\u0160\'\3\2\2\2\u0161\u0165"+ - "\5,\27\2\u0162\u0165\5.\30\2\u0163\u0165\5\60\31\2\u0164\u0161\3\2\2\2"+ - "\u0164\u0162\3\2\2\2\u0164\u0163\3\2\2\2\u0165)\3\2\2\2\u0166\u0169\5"+ - ",\27\2\u0167\u0169\5.\30\2\u0168\u0166\3\2\2\2\u0168\u0167\3\2\2\2\u0169"+ - "+\3\2\2\2\u016a\u016b\t\f\2\2\u016b\u016c\7V\2\2\u016c\u016d\5:\36\2\u016d"+ - "-\3\2\2\2\u016e\u016f\t\f\2\2\u016f\u0170\t\r\2\2\u0170/\3\2\2\2\u0171"+ - "\u0172\7\7\2\2\u0172\u0173\5 \21\2\u0173\u0174\7\b\2\2\u0174\61\3\2\2"+ - "\2\u0175\u0176\7\30\2\2\u0176\u017b\7S\2\2\u0177\u0178\7\7\2\2\u0178\u0179"+ - "\5 \21\2\u0179\u017a\7\b\2\2\u017a\u017c\3\2\2\2\u017b\u0177\3\2\2\2\u017c"+ - "\u017d\3\2\2\2\u017d\u017b\3\2\2\2\u017d\u017e\3\2\2\2\u017e\u0186\3\2"+ - "\2\2\u017f\u0183\5*\26\2\u0180\u0182\5(\25\2\u0181\u0180\3\2\2\2\u0182"+ - "\u0185\3\2\2\2\u0183\u0181\3\2\2\2\u0183\u0184\3\2\2\2\u0184\u0187\3\2"+ - "\2\2\u0185\u0183\3\2\2\2\u0186\u017f\3\2\2\2\u0186\u0187\3\2\2\2\u0187"+ - "\u019f\3\2\2\2\u0188\u0189\7\30\2\2\u0189\u018a\7S\2\2\u018a\u018b\7\7"+ - "\2\2\u018b\u018c\7\b\2\2\u018c\u0195\7\5\2\2\u018d\u0192\5 \21\2\u018e"+ - "\u018f\7\r\2\2\u018f\u0191\5 \21\2\u0190\u018e\3\2\2\2\u0191\u0194\3\2"+ - "\2\2\u0192\u0190\3\2\2\2\u0192\u0193\3\2\2\2\u0193\u0196\3\2\2\2\u0194"+ - "\u0192\3\2\2\2\u0195\u018d\3\2\2\2\u0195\u0196\3\2\2\2\u0196\u0197\3\2"+ - "\2\2\u0197\u019b\7\6\2\2\u0198\u019a\5(\25\2\u0199\u0198\3\2\2\2\u019a"+ - "\u019d\3\2\2\2\u019b\u0199\3\2\2\2\u019b\u019c\3\2\2\2\u019c\u019f\3\2"+ - "\2\2\u019d\u019b\3\2\2\2\u019e\u0175\3\2\2\2\u019e\u0188\3\2\2\2\u019f"+ - "\63\3\2\2\2\u01a0\u01a1\7\7\2\2\u01a1\u01a6\5 \21\2\u01a2\u01a3\7\r\2"+ - "\2\u01a3\u01a5\5 \21\2\u01a4\u01a2\3\2\2\2\u01a5\u01a8\3\2\2\2\u01a6\u01a4"+ - "\3\2\2\2\u01a6\u01a7\3\2\2\2\u01a7\u01a9\3\2\2\2\u01a8\u01a6\3\2\2\2\u01a9"+ - "\u01aa\7\b\2\2\u01aa\u01ae\3\2\2\2\u01ab\u01ac\7\7\2\2\u01ac\u01ae\7\b"+ - "\2\2\u01ad\u01a0\3\2\2\2\u01ad\u01ab\3\2\2\2\u01ae\65\3\2\2\2\u01af\u01b0"+ - "\7\7\2\2\u01b0\u01b5\58\35\2\u01b1\u01b2\7\r\2\2\u01b2\u01b4\58\35\2\u01b3"+ - "\u01b1\3\2\2\2\u01b4\u01b7\3\2\2\2\u01b5\u01b3\3\2\2\2\u01b5\u01b6\3\2"+ - "\2\2\u01b6\u01b8\3\2\2\2\u01b7\u01b5\3\2\2\2\u01b8\u01b9\7\b\2\2\u01b9"+ - "\u01be\3\2\2\2\u01ba\u01bb\7\7\2\2\u01bb\u01bc\7\66\2\2\u01bc\u01be\7"+ - "\b\2\2\u01bd\u01af\3\2\2\2\u01bd\u01ba\3\2\2\2\u01be\67\3\2\2\2\u01bf"+ - "\u01c0\5 \21\2\u01c0\u01c1\7\66\2\2\u01c1\u01c2\5 \21\2\u01c29\3\2\2\2"+ - "\u01c3\u01cc\7\t\2\2\u01c4\u01c9\5<\37\2\u01c5\u01c6\7\r\2\2\u01c6\u01c8"+ - "\5<\37\2\u01c7\u01c5\3\2\2\2\u01c8\u01cb\3\2\2\2\u01c9\u01c7\3\2\2\2\u01c9"+ - "\u01ca\3\2\2\2\u01ca\u01cd\3\2\2\2\u01cb\u01c9\3\2\2\2\u01cc\u01c4\3\2"+ - "\2\2\u01cc\u01cd\3\2\2\2\u01cd\u01ce\3\2\2\2\u01ce\u01cf\7\n\2\2\u01cf"+ - ";\3\2\2\2\u01d0\u01d4\5 \21\2\u01d1\u01d4\5> \2\u01d2\u01d4\5B\"\2\u01d3"+ - "\u01d0\3\2\2\2\u01d3\u01d1\3\2\2\2\u01d3\u01d2\3\2\2\2\u01d4=\3\2\2\2"+ - "\u01d5\u01e3\5@!\2\u01d6\u01df\7\t\2\2\u01d7\u01dc\5@!\2\u01d8\u01d9\7"+ - "\r\2\2\u01d9\u01db\5@!\2\u01da\u01d8\3\2\2\2\u01db\u01de\3\2\2\2\u01dc"+ - "\u01da\3\2\2\2\u01dc\u01dd\3\2\2\2\u01dd\u01e0\3\2\2\2\u01de\u01dc\3\2"+ - "\2\2\u01df\u01d7\3\2\2\2\u01df\u01e0\3\2\2\2\u01e0\u01e1\3\2\2\2\u01e1"+ - "\u01e3\7\n\2\2\u01e2\u01d5\3\2\2\2\u01e2\u01d6\3\2\2\2\u01e3\u01e4\3\2"+ - "\2\2\u01e4\u01e7\79\2\2\u01e5\u01e8\5\20\t\2\u01e6\u01e8\5 \21\2\u01e7"+ - "\u01e5\3\2\2\2\u01e7\u01e6\3\2\2\2\u01e8?\3\2\2\2\u01e9\u01eb\5\32\16"+ - "\2\u01ea\u01e9\3\2\2\2\u01ea\u01eb\3\2\2\2\u01eb\u01ec\3\2\2\2\u01ec\u01ed"+ - "\7T\2\2\u01edA\3\2\2\2\u01ee\u01ef\7S\2\2\u01ef\u01f0\78\2\2\u01f0\u01fc"+ - "\7T\2\2\u01f1\u01f2\5\32\16\2\u01f2\u01f3\78\2\2\u01f3\u01f4\7\30\2\2"+ - "\u01f4\u01fc\3\2\2\2\u01f5\u01f6\7T\2\2\u01f6\u01f7\78\2\2\u01f7\u01fc"+ - "\7T\2\2\u01f8\u01f9\7\34\2\2\u01f9\u01fa\78\2\2\u01fa\u01fc\7T\2\2\u01fb"+ - "\u01ee\3\2\2\2\u01fb\u01f1\3\2\2\2\u01fb\u01f5\3\2\2\2\u01fb\u01f8\3\2"+ - "\2\2\u01fcC\3\2\2\2\65GMQcfnx\u0080\u0085\u0089\u008d\u0092\u00aa\u00ac"+ - "\u00bd\u00c1\u00c7\u00cb\u00d3\u00dd\u00e5\u00eb\u0127\u0129\u0139\u013f"+ - "\u0147\u014b\u015f\u0164\u0168\u017d\u0183\u0186\u0192\u0195\u019b\u019e"+ - "\u01a6\u01ad\u01b5\u01bd\u01c9\u01cc\u01d3\u01dc\u01df\u01e2\u01e7\u01ea"+ - "\u01fb"; + "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\7\21"+ + "\u0125\n\21\f\21\16\21\u0128\13\21\3\22\3\22\3\22\3\22\3\22\3\22\3\22"+ + "\3\22\3\22\3\22\3\22\3\22\3\22\5\22\u0137\n\22\3\23\3\23\7\23\u013b\n"+ + "\23\f\23\16\23\u013e\13\23\3\23\3\23\3\23\7\23\u0143\n\23\f\23\16\23\u0146"+ + "\13\23\3\23\5\23\u0149\n\23\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3"+ + "\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\5\24\u015d\n\24\3\25"+ + "\3\25\3\25\5\25\u0162\n\25\3\26\3\26\5\26\u0166\n\26\3\27\3\27\3\27\3"+ + "\27\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\32\3\32\3\32\3\32\3\32\3\32\6"+ + "\32\u0179\n\32\r\32\16\32\u017a\3\32\3\32\7\32\u017f\n\32\f\32\16\32\u0182"+ + "\13\32\5\32\u0184\n\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\7\32\u018e"+ + "\n\32\f\32\16\32\u0191\13\32\5\32\u0193\n\32\3\32\3\32\7\32\u0197\n\32"+ + "\f\32\16\32\u019a\13\32\5\32\u019c\n\32\3\33\3\33\3\33\3\33\7\33\u01a2"+ + "\n\33\f\33\16\33\u01a5\13\33\3\33\3\33\3\33\3\33\5\33\u01ab\n\33\3\34"+ + "\3\34\3\34\3\34\7\34\u01b1\n\34\f\34\16\34\u01b4\13\34\3\34\3\34\3\34"+ + "\3\34\3\34\5\34\u01bb\n\34\3\35\3\35\3\35\3\35\3\36\3\36\3\36\3\36\7\36"+ + "\u01c5\n\36\f\36\16\36\u01c8\13\36\5\36\u01ca\n\36\3\36\3\36\3\37\3\37"+ + "\3\37\5\37\u01d1\n\37\3 \3 \3 \3 \3 \7 \u01d8\n \f \16 \u01db\13 \5 \u01dd"+ + "\n \3 \5 \u01e0\n \3 \3 \3 \5 \u01e5\n \3!\5!\u01e8\n!\3!\3!\3\"\3\"\3"+ + "\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\5\"\u01f9\n\"\3\"\2\3 #\2\4"+ + "\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,.\60\62\64\668:<>@B\2\17\3"+ + "\3\16\16\3\2 \"\3\2#$\3\2:;\3\2%\'\3\2(+\3\2,/\3\2>I\3\2<=\4\2\36\37#"+ + "$\3\2JM\3\2\13\f\3\2UV\u0233\2G\3\2\2\2\4R\3\2\2\2\6W\3\2\2\2\bk\3\2\2"+ + "\2\n\u00a9\3\2\2\2\f\u00ba\3\2\2\2\16\u00be\3\2\2\2\20\u00c0\3\2\2\2\22"+ + "\u00cc\3\2\2\2\24\u00d0\3\2\2\2\26\u00d2\3\2\2\2\30\u00d4\3\2\2\2\32\u00dd"+ + "\3\2\2\2\34\u00e5\3\2\2\2\36\u00ea\3\2\2\2 \u00f1\3\2\2\2\"\u0136\3\2"+ + "\2\2$\u0148\3\2\2\2&\u015c\3\2\2\2(\u0161\3\2\2\2*\u0165\3\2\2\2,\u0167"+ + "\3\2\2\2.\u016b\3\2\2\2\60\u016e\3\2\2\2\62\u019b\3\2\2\2\64\u01aa\3\2"+ + "\2\2\66\u01ba\3\2\2\28\u01bc\3\2\2\2:\u01c0\3\2\2\2<\u01d0\3\2\2\2>\u01df"+ + "\3\2\2\2@\u01e7\3\2\2\2B\u01f8\3\2\2\2DF\5\4\3\2ED\3\2\2\2FI\3\2\2\2G"+ + "E\3\2\2\2GH\3\2\2\2HM\3\2\2\2IG\3\2\2\2JL\5\b\5\2KJ\3\2\2\2LO\3\2\2\2"+ + "MK\3\2\2\2MN\3\2\2\2NP\3\2\2\2OM\3\2\2\2PQ\7\2\2\3Q\3\3\2\2\2RS\5\32\16"+ + "\2ST\7T\2\2TU\5\6\4\2UV\5\20\t\2V\5\3\2\2\2Wc\7\t\2\2XY\5\32\16\2Y`\7"+ + "T\2\2Z[\7\r\2\2[\\\5\32\16\2\\]\7T\2\2]_\3\2\2\2^Z\3\2\2\2_b\3\2\2\2`"+ + "^\3\2\2\2`a\3\2\2\2ad\3\2\2\2b`\3\2\2\2cX\3\2\2\2cd\3\2\2\2de\3\2\2\2"+ + "ef\7\n\2\2f\7\3\2\2\2gl\5\n\6\2hi\5\f\7\2ij\t\2\2\2jl\3\2\2\2kg\3\2\2"+ + "\2kh\3\2\2\2l\t\3\2\2\2mn\7\17\2\2no\7\t\2\2op\5 \21\2pq\7\n\2\2qu\5\16"+ + "\b\2rs\7\21\2\2sv\5\16\b\2tv\6\6\2\2ur\3\2\2\2ut\3\2\2\2v\u00aa\3\2\2"+ + "\2wx\7\22\2\2xy\7\t\2\2yz\5 \21\2z}\7\n\2\2{~\5\16\b\2|~\5\22\n\2}{\3"+ + "\2\2\2}|\3\2\2\2~\u00aa\3\2\2\2\177\u0080\7\24\2\2\u0080\u0082\7\t\2\2"+ + "\u0081\u0083\5\24\13\2\u0082\u0081\3\2\2\2\u0082\u0083\3\2\2\2\u0083\u0084"+ + "\3\2\2\2\u0084\u0086\7\16\2\2\u0085\u0087\5 \21\2\u0086\u0085\3\2\2\2"+ + "\u0086\u0087\3\2\2\2\u0087\u0088\3\2\2\2\u0088\u008a\7\16\2\2\u0089\u008b"+ + "\5\26\f\2\u008a\u0089\3\2\2\2\u008a\u008b\3\2\2\2\u008b\u008c\3\2\2\2"+ + "\u008c\u008f\7\n\2\2\u008d\u0090\5\16\b\2\u008e\u0090\5\22\n\2\u008f\u008d"+ + "\3\2\2\2\u008f\u008e\3\2\2\2\u0090\u00aa\3\2\2\2\u0091\u0092\7\24\2\2"+ + "\u0092\u0093\7\t\2\2\u0093\u0094\5\32\16\2\u0094\u0095\7T\2\2\u0095\u0096"+ + "\7\66\2\2\u0096\u0097\5 \21\2\u0097\u0098\7\n\2\2\u0098\u0099\5\16\b\2"+ + "\u0099\u00aa\3\2\2\2\u009a\u009b\7\24\2\2\u009b\u009c\7\t\2\2\u009c\u009d"+ + "\7T\2\2\u009d\u009e\7\20\2\2\u009e\u009f\5 \21\2\u009f\u00a0\7\n\2\2\u00a0"+ + "\u00a1\5\16\b\2\u00a1\u00aa\3\2\2\2\u00a2\u00a3\7\31\2\2\u00a3\u00a5\5"+ + "\20\t\2\u00a4\u00a6\5\36\20\2\u00a5\u00a4\3\2\2\2\u00a6\u00a7\3\2\2\2"+ + "\u00a7\u00a5\3\2\2\2\u00a7\u00a8\3\2\2\2\u00a8\u00aa\3\2\2\2\u00a9m\3"+ + "\2\2\2\u00a9w\3\2\2\2\u00a9\177\3\2\2\2\u00a9\u0091\3\2\2\2\u00a9\u009a"+ + "\3\2\2\2\u00a9\u00a2\3\2\2\2\u00aa\13\3\2\2\2\u00ab\u00ac\7\23\2\2\u00ac"+ + "\u00ad\5\20\t\2\u00ad\u00ae\7\22\2\2\u00ae\u00af\7\t\2\2\u00af\u00b0\5"+ + " \21\2\u00b0\u00b1\7\n\2\2\u00b1\u00bb\3\2\2\2\u00b2\u00bb\5\30\r\2\u00b3"+ + "\u00bb\7\25\2\2\u00b4\u00bb\7\26\2\2\u00b5\u00b6\7\27\2\2\u00b6\u00bb"+ + "\5 \21\2\u00b7\u00b8\7\33\2\2\u00b8\u00bb\5 \21\2\u00b9\u00bb\5 \21\2"+ + "\u00ba\u00ab\3\2\2\2\u00ba\u00b2\3\2\2\2\u00ba\u00b3\3\2\2\2\u00ba\u00b4"+ + "\3\2\2\2\u00ba\u00b5\3\2\2\2\u00ba\u00b7\3\2\2\2\u00ba\u00b9\3\2\2\2\u00bb"+ + "\r\3\2\2\2\u00bc\u00bf\5\20\t\2\u00bd\u00bf\5\b\5\2\u00be\u00bc\3\2\2"+ + "\2\u00be\u00bd\3\2\2\2\u00bf\17\3\2\2\2\u00c0\u00c4\7\5\2\2\u00c1\u00c3"+ + "\5\b\5\2\u00c2\u00c1\3\2\2\2\u00c3\u00c6\3\2\2\2\u00c4\u00c2\3\2\2\2\u00c4"+ + "\u00c5\3\2\2\2\u00c5\u00c8\3\2\2\2\u00c6\u00c4\3\2\2\2\u00c7\u00c9\5\f"+ + "\7\2\u00c8\u00c7\3\2\2\2\u00c8\u00c9\3\2\2\2\u00c9\u00ca\3\2\2\2\u00ca"+ + "\u00cb\7\6\2\2\u00cb\21\3\2\2\2\u00cc\u00cd\7\16\2\2\u00cd\23\3\2\2\2"+ + "\u00ce\u00d1\5\30\r\2\u00cf\u00d1\5 \21\2\u00d0\u00ce\3\2\2\2\u00d0\u00cf"+ + "\3\2\2\2\u00d1\25\3\2\2\2\u00d2\u00d3\5 \21\2\u00d3\27\3\2\2\2\u00d4\u00d5"+ + "\5\32\16\2\u00d5\u00da\5\34\17\2\u00d6\u00d7\7\r\2\2\u00d7\u00d9\5\34"+ + "\17\2\u00d8\u00d6\3\2\2\2\u00d9\u00dc\3\2\2\2\u00da\u00d8\3\2\2\2\u00da"+ + "\u00db\3\2\2\2\u00db\31\3\2\2\2\u00dc\u00da\3\2\2\2\u00dd\u00e2\7S\2\2"+ + "\u00de\u00df\7\7\2\2\u00df\u00e1\7\b\2\2\u00e0\u00de\3\2\2\2\u00e1\u00e4"+ + "\3\2\2\2\u00e2\u00e0\3\2\2\2\u00e2\u00e3\3\2\2\2\u00e3\33\3\2\2\2\u00e4"+ + "\u00e2\3\2\2\2\u00e5\u00e8\7T\2\2\u00e6\u00e7\7>\2\2\u00e7\u00e9\5 \21"+ + "\2\u00e8\u00e6\3\2\2\2\u00e8\u00e9\3\2\2\2\u00e9\35\3\2\2\2\u00ea\u00eb"+ + "\7\32\2\2\u00eb\u00ec\7\t\2\2\u00ec\u00ed\7S\2\2\u00ed\u00ee\7T\2\2\u00ee"+ + "\u00ef\7\n\2\2\u00ef\u00f0\5\20\t\2\u00f0\37\3\2\2\2\u00f1\u00f2\b\21"+ + "\1\2\u00f2\u00f3\5\"\22\2\u00f3\u0126\3\2\2\2\u00f4\u00f5\f\21\2\2\u00f5"+ + "\u00f6\t\3\2\2\u00f6\u0125\5 \21\22\u00f7\u00f8\f\20\2\2\u00f8\u00f9\t"+ + "\4\2\2\u00f9\u0125\5 \21\21\u00fa\u00fb\f\17\2\2\u00fb\u00fc\t\5\2\2\u00fc"+ + "\u0125\5 \21\20\u00fd\u00fe\f\16\2\2\u00fe\u00ff\t\6\2\2\u00ff\u0125\5"+ + " \21\17\u0100\u0101\f\r\2\2\u0101\u0102\t\7\2\2\u0102\u0125\5 \21\16\u0103"+ + "\u0104\f\13\2\2\u0104\u0105\t\b\2\2\u0105\u0125\5 \21\f\u0106\u0107\f"+ + "\n\2\2\u0107\u0108\7\60\2\2\u0108\u0125\5 \21\13\u0109\u010a\f\t\2\2\u010a"+ + "\u010b\7\61\2\2\u010b\u0125\5 \21\n\u010c\u010d\f\b\2\2\u010d\u010e\7"+ + "\62\2\2\u010e\u0125\5 \21\t\u010f\u0110\f\7\2\2\u0110\u0111\7\63\2\2\u0111"+ + "\u0125\5 \21\b\u0112\u0113\f\6\2\2\u0113\u0114\7\64\2\2\u0114\u0125\5"+ + " \21\7\u0115\u0116\f\5\2\2\u0116\u0117\7\65\2\2\u0117\u0118\5 \21\2\u0118"+ + "\u0119\7\66\2\2\u0119\u011a\5 \21\5\u011a\u0125\3\2\2\2\u011b\u011c\f"+ + "\4\2\2\u011c\u011d\7\67\2\2\u011d\u0125\5 \21\4\u011e\u011f\f\3\2\2\u011f"+ + "\u0120\t\t\2\2\u0120\u0125\5 \21\3\u0121\u0122\f\f\2\2\u0122\u0123\7\35"+ + "\2\2\u0123\u0125\5\32\16\2\u0124\u00f4\3\2\2\2\u0124\u00f7\3\2\2\2\u0124"+ + "\u00fa\3\2\2\2\u0124\u00fd\3\2\2\2\u0124\u0100\3\2\2\2\u0124\u0103\3\2"+ + "\2\2\u0124\u0106\3\2\2\2\u0124\u0109\3\2\2\2\u0124\u010c\3\2\2\2\u0124"+ + "\u010f\3\2\2\2\u0124\u0112\3\2\2\2\u0124\u0115\3\2\2\2\u0124\u011b\3\2"+ + "\2\2\u0124\u011e\3\2\2\2\u0124\u0121\3\2\2\2\u0125\u0128\3\2\2\2\u0126"+ + "\u0124\3\2\2\2\u0126\u0127\3\2\2\2\u0127!\3\2\2\2\u0128\u0126\3\2\2\2"+ + "\u0129\u012a\t\n\2\2\u012a\u0137\5$\23\2\u012b\u012c\5$\23\2\u012c\u012d"+ + "\t\n\2\2\u012d\u0137\3\2\2\2\u012e\u0137\5$\23\2\u012f\u0130\t\13\2\2"+ + "\u0130\u0137\5\"\22\2\u0131\u0132\7\t\2\2\u0132\u0133\5\32\16\2\u0133"+ + "\u0134\7\n\2\2\u0134\u0135\5\"\22\2\u0135\u0137\3\2\2\2\u0136\u0129\3"+ + "\2\2\2\u0136\u012b\3\2\2\2\u0136\u012e\3\2\2\2\u0136\u012f\3\2\2\2\u0136"+ + "\u0131\3\2\2\2\u0137#\3\2\2\2\u0138\u013c\5&\24\2\u0139\u013b\5(\25\2"+ + "\u013a\u0139\3\2\2\2\u013b\u013e\3\2\2\2\u013c\u013a\3\2\2\2\u013c\u013d"+ + "\3\2\2\2\u013d\u0149\3\2\2\2\u013e\u013c\3\2\2\2\u013f\u0140\5\32\16\2"+ + "\u0140\u0144\5*\26\2\u0141\u0143\5(\25\2\u0142\u0141\3\2\2\2\u0143\u0146"+ + "\3\2\2\2\u0144\u0142\3\2\2\2\u0144\u0145\3\2\2\2\u0145\u0149\3\2\2\2\u0146"+ + "\u0144\3\2\2\2\u0147\u0149\5\62\32\2\u0148\u0138\3\2\2\2\u0148\u013f\3"+ + "\2\2\2\u0148\u0147\3\2\2\2\u0149%\3\2\2\2\u014a\u014b\7\t\2\2\u014b\u014c"+ + "\5 \21\2\u014c\u014d\7\n\2\2\u014d\u015d\3\2\2\2\u014e\u015d\t\f\2\2\u014f"+ + "\u015d\7P\2\2\u0150\u015d\7Q\2\2\u0151\u015d\7R\2\2\u0152\u015d\7N\2\2"+ + "\u0153\u015d\7O\2\2\u0154\u015d\5\64\33\2\u0155\u015d\5\66\34\2\u0156"+ + "\u015d\7T\2\2\u0157\u0158\7T\2\2\u0158\u015d\5:\36\2\u0159\u015a\7\30"+ + "\2\2\u015a\u015b\7S\2\2\u015b\u015d\5:\36\2\u015c\u014a\3\2\2\2\u015c"+ + "\u014e\3\2\2\2\u015c\u014f\3\2\2\2\u015c\u0150\3\2\2\2\u015c\u0151\3\2"+ + "\2\2\u015c\u0152\3\2\2\2\u015c\u0153\3\2\2\2\u015c\u0154\3\2\2\2\u015c"+ + "\u0155\3\2\2\2\u015c\u0156\3\2\2\2\u015c\u0157\3\2\2\2\u015c\u0159\3\2"+ + "\2\2\u015d\'\3\2\2\2\u015e\u0162\5,\27\2\u015f\u0162\5.\30\2\u0160\u0162"+ + "\5\60\31\2\u0161\u015e\3\2\2\2\u0161\u015f\3\2\2\2\u0161\u0160\3\2\2\2"+ + "\u0162)\3\2\2\2\u0163\u0166\5,\27\2\u0164\u0166\5.\30\2\u0165\u0163\3"+ + "\2\2\2\u0165\u0164\3\2\2\2\u0166+\3\2\2\2\u0167\u0168\t\r\2\2\u0168\u0169"+ + "\7V\2\2\u0169\u016a\5:\36\2\u016a-\3\2\2\2\u016b\u016c\t\r\2\2\u016c\u016d"+ + "\t\16\2\2\u016d/\3\2\2\2\u016e\u016f\7\7\2\2\u016f\u0170\5 \21\2\u0170"+ + "\u0171\7\b\2\2\u0171\61\3\2\2\2\u0172\u0173\7\30\2\2\u0173\u0178\7S\2"+ + "\2\u0174\u0175\7\7\2\2\u0175\u0176\5 \21\2\u0176\u0177\7\b\2\2\u0177\u0179"+ + "\3\2\2\2\u0178\u0174\3\2\2\2\u0179\u017a\3\2\2\2\u017a\u0178\3\2\2\2\u017a"+ + "\u017b\3\2\2\2\u017b\u0183\3\2\2\2\u017c\u0180\5*\26\2\u017d\u017f\5("+ + "\25\2\u017e\u017d\3\2\2\2\u017f\u0182\3\2\2\2\u0180\u017e\3\2\2\2\u0180"+ + "\u0181\3\2\2\2\u0181\u0184\3\2\2\2\u0182\u0180\3\2\2\2\u0183\u017c\3\2"+ + "\2\2\u0183\u0184\3\2\2\2\u0184\u019c\3\2\2\2\u0185\u0186\7\30\2\2\u0186"+ + "\u0187\7S\2\2\u0187\u0188\7\7\2\2\u0188\u0189\7\b\2\2\u0189\u0192\7\5"+ + "\2\2\u018a\u018f\5 \21\2\u018b\u018c\7\r\2\2\u018c\u018e\5 \21\2\u018d"+ + "\u018b\3\2\2\2\u018e\u0191\3\2\2\2\u018f\u018d\3\2\2\2\u018f\u0190\3\2"+ + "\2\2\u0190\u0193\3\2\2\2\u0191\u018f\3\2\2\2\u0192\u018a\3\2\2\2\u0192"+ + "\u0193\3\2\2\2\u0193\u0194\3\2\2\2\u0194\u0198\7\6\2\2\u0195\u0197\5("+ + "\25\2\u0196\u0195\3\2\2\2\u0197\u019a\3\2\2\2\u0198\u0196\3\2\2\2\u0198"+ + "\u0199\3\2\2\2\u0199\u019c\3\2\2\2\u019a\u0198\3\2\2\2\u019b\u0172\3\2"+ + "\2\2\u019b\u0185\3\2\2\2\u019c\63\3\2\2\2\u019d\u019e\7\7\2\2\u019e\u01a3"+ + "\5 \21\2\u019f\u01a0\7\r\2\2\u01a0\u01a2\5 \21\2\u01a1\u019f\3\2\2\2\u01a2"+ + "\u01a5\3\2\2\2\u01a3\u01a1\3\2\2\2\u01a3\u01a4\3\2\2\2\u01a4\u01a6\3\2"+ + "\2\2\u01a5\u01a3\3\2\2\2\u01a6\u01a7\7\b\2\2\u01a7\u01ab\3\2\2\2\u01a8"+ + "\u01a9\7\7\2\2\u01a9\u01ab\7\b\2\2\u01aa\u019d\3\2\2\2\u01aa\u01a8\3\2"+ + "\2\2\u01ab\65\3\2\2\2\u01ac\u01ad\7\7\2\2\u01ad\u01b2\58\35\2\u01ae\u01af"+ + "\7\r\2\2\u01af\u01b1\58\35\2\u01b0\u01ae\3\2\2\2\u01b1\u01b4\3\2\2\2\u01b2"+ + "\u01b0\3\2\2\2\u01b2\u01b3\3\2\2\2\u01b3\u01b5\3\2\2\2\u01b4\u01b2\3\2"+ + "\2\2\u01b5\u01b6\7\b\2\2\u01b6\u01bb\3\2\2\2\u01b7\u01b8\7\7\2\2\u01b8"+ + "\u01b9\7\66\2\2\u01b9\u01bb\7\b\2\2\u01ba\u01ac\3\2\2\2\u01ba\u01b7\3"+ + "\2\2\2\u01bb\67\3\2\2\2\u01bc\u01bd\5 \21\2\u01bd\u01be\7\66\2\2\u01be"+ + "\u01bf\5 \21\2\u01bf9\3\2\2\2\u01c0\u01c9\7\t\2\2\u01c1\u01c6\5<\37\2"+ + "\u01c2\u01c3\7\r\2\2\u01c3\u01c5\5<\37\2\u01c4\u01c2\3\2\2\2\u01c5\u01c8"+ + "\3\2\2\2\u01c6\u01c4\3\2\2\2\u01c6\u01c7\3\2\2\2\u01c7\u01ca\3\2\2\2\u01c8"+ + "\u01c6\3\2\2\2\u01c9\u01c1\3\2\2\2\u01c9\u01ca\3\2\2\2\u01ca\u01cb\3\2"+ + "\2\2\u01cb\u01cc\7\n\2\2\u01cc;\3\2\2\2\u01cd\u01d1\5 \21\2\u01ce\u01d1"+ + "\5> \2\u01cf\u01d1\5B\"\2\u01d0\u01cd\3\2\2\2\u01d0\u01ce\3\2\2\2\u01d0"+ + "\u01cf\3\2\2\2\u01d1=\3\2\2\2\u01d2\u01e0\5@!\2\u01d3\u01dc\7\t\2\2\u01d4"+ + "\u01d9\5@!\2\u01d5\u01d6\7\r\2\2\u01d6\u01d8\5@!\2\u01d7\u01d5\3\2\2\2"+ + "\u01d8\u01db\3\2\2\2\u01d9\u01d7\3\2\2\2\u01d9\u01da\3\2\2\2\u01da\u01dd"+ + "\3\2\2\2\u01db\u01d9\3\2\2\2\u01dc\u01d4\3\2\2\2\u01dc\u01dd\3\2\2\2\u01dd"+ + "\u01de\3\2\2\2\u01de\u01e0\7\n\2\2\u01df\u01d2\3\2\2\2\u01df\u01d3\3\2"+ + "\2\2\u01e0\u01e1\3\2\2\2\u01e1\u01e4\79\2\2\u01e2\u01e5\5\20\t\2\u01e3"+ + "\u01e5\5 \21\2\u01e4\u01e2\3\2\2\2\u01e4\u01e3\3\2\2\2\u01e5?\3\2\2\2"+ + "\u01e6\u01e8\5\32\16\2\u01e7\u01e6\3\2\2\2\u01e7\u01e8\3\2\2\2\u01e8\u01e9"+ + "\3\2\2\2\u01e9\u01ea\7T\2\2\u01eaA\3\2\2\2\u01eb\u01ec\7S\2\2\u01ec\u01ed"+ + "\78\2\2\u01ed\u01f9\7T\2\2\u01ee\u01ef\5\32\16\2\u01ef\u01f0\78\2\2\u01f0"+ + "\u01f1\7\30\2\2\u01f1\u01f9\3\2\2\2\u01f2\u01f3\7T\2\2\u01f3\u01f4\78"+ + "\2\2\u01f4\u01f9\7T\2\2\u01f5\u01f6\7\34\2\2\u01f6\u01f7\78\2\2\u01f7"+ + "\u01f9\7T\2\2\u01f8\u01eb\3\2\2\2\u01f8\u01ee\3\2\2\2\u01f8\u01f2\3\2"+ + "\2\2\u01f8\u01f5\3\2\2\2\u01f9C\3\2\2\2\64GM`cku}\u0082\u0086\u008a\u008f"+ + "\u00a7\u00a9\u00ba\u00be\u00c4\u00c8\u00d0\u00da\u00e2\u00e8\u0124\u0126"+ + "\u0136\u013c\u0144\u0148\u015c\u0161\u0165\u017a\u0180\u0183\u018f\u0192"+ + "\u0198\u019b\u01a3\u01aa\u01b2\u01ba\u01c6\u01c9\u01d0\u01d9\u01dc\u01df"+ + "\u01e4\u01e7\u01f8"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index 6c8d3a62e06..dc5c164244d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -261,10 +261,6 @@ public final class Walker extends PainlessParserBaseVisitor { statements.add((AStatement)visit(statement)); } - if (ctx.dstatement() != null) { - statements.add((AStatement)visit(ctx.dstatement())); - } - return new SSource(scriptClassInfo, settings, sourceName, debugStream, (MainMethodReserved)reserved.pop(), location(ctx), functions, globals, statements); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessBinding.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessBinding.java new file mode 100644 index 00000000000..41178dd5d75 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessBinding.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless.lookup; + +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; +import java.util.List; + +public class PainlessBinding { + + public final Constructor javaConstructor; + public final Method javaMethod; + + public final Class returnType; + public final List> typeParameters; + + PainlessBinding(Constructor javaConstructor, Method javaMethod, Class returnType, List> typeParameters) { + this.javaConstructor = javaConstructor; + this.javaMethod = javaMethod; + + this.returnType = returnType; + this.typeParameters = typeParameters; + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java index 50bb79dcfbd..f5d6c97bb2f 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java @@ -24,6 +24,7 @@ import java.util.Collections; import java.util.Map; public final class PainlessClass { + public final Map constructors; public final Map staticMethods; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClassBuilder.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClassBuilder.java index a61215e9ed7..92100d1bda0 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClassBuilder.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClassBuilder.java @@ -24,6 +24,7 @@ import java.util.HashMap; import java.util.Map; final class PainlessClassBuilder { + final Map constructors; final Map staticMethods; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessConstructor.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessConstructor.java index 76597c1a29d..a3dc6c8122b 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessConstructor.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessConstructor.java @@ -25,6 +25,7 @@ import java.lang.reflect.Constructor; import java.util.List; public class PainlessConstructor { + public final Constructor javaConstructor; public final List> typeParameters; public final MethodHandle methodHandle; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessField.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessField.java index a55d6c3730e..9567e97331c 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessField.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessField.java @@ -23,6 +23,7 @@ import java.lang.invoke.MethodHandle; import java.lang.reflect.Field; public final class PainlessField { + public final Field javaField; public final Class typeParameter; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java index 55855a3cb1e..7be659d11a1 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java @@ -37,12 +37,24 @@ public final class PainlessLookup { private final Map> canonicalClassNamesToClasses; private final Map, PainlessClass> classesToPainlessClasses; - PainlessLookup(Map> canonicalClassNamesToClasses, Map, PainlessClass> classesToPainlessClasses) { + private final Map painlessMethodKeysToImportedPainlessMethods; + private final Map painlessMethodKeysToPainlessBindings; + + PainlessLookup(Map> canonicalClassNamesToClasses, Map, PainlessClass> classesToPainlessClasses, + Map painlessMethodKeysToImportedPainlessMethods, + Map painlessMethodKeysToPainlessBindings) { + Objects.requireNonNull(canonicalClassNamesToClasses); Objects.requireNonNull(classesToPainlessClasses); + Objects.requireNonNull(painlessMethodKeysToImportedPainlessMethods); + Objects.requireNonNull(painlessMethodKeysToPainlessBindings); + this.canonicalClassNamesToClasses = Collections.unmodifiableMap(canonicalClassNamesToClasses); this.classesToPainlessClasses = Collections.unmodifiableMap(classesToPainlessClasses); + + this.painlessMethodKeysToImportedPainlessMethods = Collections.unmodifiableMap(painlessMethodKeysToImportedPainlessMethods); + this.painlessMethodKeysToPainlessBindings = Collections.unmodifiableMap(painlessMethodKeysToPainlessBindings); } public boolean isValidCanonicalClassName(String canonicalClassName) { @@ -162,6 +174,22 @@ public final class PainlessLookup { return painlessField; } + public PainlessMethod lookupImportedPainlessMethod(String methodName, int arity) { + Objects.requireNonNull(methodName); + + String painlessMethodKey = buildPainlessMethodKey(methodName, arity); + + return painlessMethodKeysToImportedPainlessMethods.get(painlessMethodKey); + } + + public PainlessBinding lookupPainlessBinding(String methodName, int arity) { + Objects.requireNonNull(methodName); + + String painlessMethodKey = buildPainlessMethodKey(methodName, arity); + + return painlessMethodKeysToPainlessBindings.get(painlessMethodKey); + } + public PainlessMethod lookupFunctionalInterfacePainlessMethod(Class targetClass) { PainlessClass targetPainlessClass = classesToPainlessClasses.get(targetClass); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java index c8353b54c9f..b822bd47c7a 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java @@ -20,6 +20,7 @@ package org.elasticsearch.painless.lookup; import org.elasticsearch.painless.spi.Whitelist; +import org.elasticsearch.painless.spi.WhitelistBinding; import org.elasticsearch.painless.spi.WhitelistClass; import org.elasticsearch.painless.spi.WhitelistConstructor; import org.elasticsearch.painless.spi.WhitelistField; @@ -52,11 +53,11 @@ public final class PainlessLookupBuilder { private static class PainlessConstructorCacheKey { - private final Class targetType; + private final Class targetClass; private final List> typeParameters; - private PainlessConstructorCacheKey(Class targetType, List> typeParameters) { - this.targetType = targetType; + private PainlessConstructorCacheKey(Class targetClass, List> typeParameters) { + this.targetClass = targetClass; this.typeParameters = Collections.unmodifiableList(typeParameters); } @@ -72,25 +73,27 @@ public final class PainlessLookupBuilder { PainlessConstructorCacheKey that = (PainlessConstructorCacheKey)object; - return Objects.equals(targetType, that.targetType) && + return Objects.equals(targetClass, that.targetClass) && Objects.equals(typeParameters, that.typeParameters); } @Override public int hashCode() { - return Objects.hash(targetType, typeParameters); + return Objects.hash(targetClass, typeParameters); } } private static class PainlessMethodCacheKey { - private final Class targetType; + private final Class targetClass; private final String methodName; + private final Class returnType; private final List> typeParameters; - private PainlessMethodCacheKey(Class targetType, String methodName, List> typeParameters) { - this.targetType = targetType; + private PainlessMethodCacheKey(Class targetClass, String methodName, Class returnType, List> typeParameters) { + this.targetClass = targetClass; this.methodName = methodName; + this.returnType = returnType; this.typeParameters = Collections.unmodifiableList(typeParameters); } @@ -106,25 +109,26 @@ public final class PainlessLookupBuilder { PainlessMethodCacheKey that = (PainlessMethodCacheKey)object; - return Objects.equals(targetType, that.targetType) && + return Objects.equals(targetClass, that.targetClass) && Objects.equals(methodName, that.methodName) && + Objects.equals(returnType, that.returnType) && Objects.equals(typeParameters, that.typeParameters); } @Override public int hashCode() { - return Objects.hash(targetType, methodName, typeParameters); + return Objects.hash(targetClass, methodName, returnType, typeParameters); } } private static class PainlessFieldCacheKey { - private final Class targetType; + private final Class targetClass; private final String fieldName; private final Class typeParameter; - private PainlessFieldCacheKey(Class targetType, String fieldName, Class typeParameter) { - this.targetType = targetType; + private PainlessFieldCacheKey(Class targetClass, String fieldName, Class typeParameter) { + this.targetClass = targetClass; this.fieldName = fieldName; this.typeParameter = typeParameter; } @@ -141,20 +145,61 @@ public final class PainlessLookupBuilder { PainlessFieldCacheKey that = (PainlessFieldCacheKey) object; - return Objects.equals(targetType, that.targetType) && + return Objects.equals(targetClass, that.targetClass) && Objects.equals(fieldName, that.fieldName) && Objects.equals(typeParameter, that.typeParameter); } @Override public int hashCode() { - return Objects.hash(targetType, fieldName, typeParameter); + return Objects.hash(targetClass, fieldName, typeParameter); } } - private static final Map painlessConstuctorCache = new HashMap<>(); - private static final Map painlessMethodCache = new HashMap<>(); - private static final Map painlessFieldCache = new HashMap<>(); + private static class PainlessBindingCacheKey { + + private final Class targetClass; + private final String methodName; + private final Class methodReturnType; + private final List> methodTypeParameters; + + private PainlessBindingCacheKey(Class targetClass, + String methodName, Class returnType, List> typeParameters) { + + this.targetClass = targetClass; + this.methodName = methodName; + this.methodReturnType = returnType; + this.methodTypeParameters = Collections.unmodifiableList(typeParameters); + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + + if (object == null || getClass() != object.getClass()) { + return false; + } + + PainlessBindingCacheKey that = (PainlessBindingCacheKey)object; + + return Objects.equals(targetClass, that.targetClass) && + Objects.equals(methodName, that.methodName) && + Objects.equals(methodReturnType, that.methodReturnType) && + Objects.equals(methodTypeParameters, that.methodTypeParameters); + } + + @Override + public int hashCode() { + return Objects.hash(targetClass, methodName, methodReturnType, methodTypeParameters); + } + } + + private static final Map painlessConstructorCache = new HashMap<>(); + private static final Map painlessMethodCache = new HashMap<>(); + private static final Map painlessFieldCache = new HashMap<>(); + private static final Map painlessBindingCache = new HashMap<>(); private static final Pattern CLASS_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][._a-zA-Z0-9]*$"); private static final Pattern METHOD_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][_a-zA-Z0-9]*$"); @@ -197,6 +242,22 @@ public final class PainlessLookupBuilder { targetCanonicalClassName, whitelistField.fieldName, whitelistField.canonicalTypeNameParameter); } } + + for (WhitelistMethod whitelistStatic : whitelist.whitelistImportedMethods) { + origin = whitelistStatic.origin; + painlessLookupBuilder.addImportedPainlessMethod( + whitelist.classLoader, whitelistStatic.augmentedCanonicalClassName, + whitelistStatic.methodName, whitelistStatic.returnCanonicalTypeName, + whitelistStatic.canonicalTypeNameParameters); + } + + for (WhitelistBinding whitelistBinding : whitelist.whitelistBindings) { + origin = whitelistBinding.origin; + painlessLookupBuilder.addPainlessBinding( + whitelist.classLoader, whitelistBinding.targetJavaClassName, + whitelistBinding.methodName, whitelistBinding.returnCanonicalTypeName, + whitelistBinding.canonicalTypeNameParameters); + } } } catch (Exception exception) { throw new IllegalArgumentException("error loading whitelist(s) " + origin, exception); @@ -208,9 +269,15 @@ public final class PainlessLookupBuilder { private final Map> canonicalClassNamesToClasses; private final Map, PainlessClassBuilder> classesToPainlessClassBuilders; + private final Map painlessMethodKeysToImportedPainlessMethods; + private final Map painlessMethodKeysToPainlessBindings; + public PainlessLookupBuilder() { canonicalClassNamesToClasses = new HashMap<>(); classesToPainlessClassBuilders = new HashMap<>(); + + painlessMethodKeysToImportedPainlessMethods = new HashMap<>(); + painlessMethodKeysToPainlessBindings = new HashMap<>(); } private Class canonicalTypeNameToType(String canonicalTypeName) { @@ -392,7 +459,7 @@ public final class PainlessLookupBuilder { MethodType methodType = methodHandle.type(); - painlessConstructor = painlessConstuctorCache.computeIfAbsent( + painlessConstructor = painlessConstructorCache.computeIfAbsent( new PainlessConstructorCacheKey(targetClass, typeParameters), key -> new PainlessConstructor(javaConstructor, typeParameters, methodHandle, methodType) ); @@ -439,7 +506,7 @@ public final class PainlessLookupBuilder { Class typeParameter = canonicalTypeNameToType(canonicalTypeNameParameter); if (typeParameter == null) { - throw new IllegalArgumentException("parameter type [" + canonicalTypeNameParameter + "] not found for method " + + throw new IllegalArgumentException("type parameter [" + canonicalTypeNameParameter + "] not found for method " + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); } @@ -449,15 +516,16 @@ public final class PainlessLookupBuilder { Class returnType = canonicalTypeNameToType(returnCanonicalTypeName); if (returnType == null) { - throw new IllegalArgumentException("parameter type [" + returnCanonicalTypeName + "] not found for method " + + throw new IllegalArgumentException("return type [" + returnCanonicalTypeName + "] not found for method " + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); } addPainlessMethod(targetClass, augmentedClass, methodName, returnType, typeParameters); } - public void addPainlessMethod(Class targetClass, Class augmentedClass, String methodName, - Class returnType, List> typeParameters) { + public void addPainlessMethod(Class targetClass, Class augmentedClass, + String methodName, Class returnType, List> typeParameters) { + Objects.requireNonNull(targetClass); Objects.requireNonNull(methodName); Objects.requireNonNull(returnType); @@ -516,6 +584,12 @@ public final class PainlessLookupBuilder { } else { try { javaMethod = augmentedClass.getMethod(methodName, javaTypeParameters.toArray(new Class[typeParametersSize])); + + if (Modifier.isStatic(javaMethod.getModifiers()) == false) { + throw new IllegalArgumentException("method [[" + targetCanonicalClassName + "], [" + methodName + "], " + + typesToCanonicalTypeNames(typeParameters) + "] with augmented class " + + "[" + typeToCanonicalTypeName(augmentedClass) + "] must be static"); + } } catch (NoSuchMethodException nsme) { throw new IllegalArgumentException("method reflection object [[" + targetCanonicalClassName + "], " + "[" + methodName + "], " + typesToCanonicalTypeNames(typeParameters) + "] not found " + @@ -548,7 +622,7 @@ public final class PainlessLookupBuilder { MethodType methodType = methodHandle.type(); painlessMethod = painlessMethodCache.computeIfAbsent( - new PainlessMethodCacheKey(targetClass, methodName, typeParameters), + new PainlessMethodCacheKey(targetClass, methodName, returnType, typeParameters), key -> new PainlessMethod(javaMethod, targetClass, returnType, typeParameters, methodHandle, methodType)); painlessClassBuilder.staticMethods.put(painlessMethodKey, painlessMethod); @@ -563,7 +637,7 @@ public final class PainlessLookupBuilder { "with the same arity and different return type or type parameters"); } } else { - PainlessMethod painlessMethod = painlessClassBuilder.staticMethods.get(painlessMethodKey); + PainlessMethod painlessMethod = painlessClassBuilder.methods.get(painlessMethodKey); if (painlessMethod == null) { MethodHandle methodHandle; @@ -588,7 +662,7 @@ public final class PainlessLookupBuilder { MethodType methodType = methodHandle.type(); painlessMethod = painlessMethodCache.computeIfAbsent( - new PainlessMethodCacheKey(targetClass, methodName, typeParameters), + new PainlessMethodCacheKey(targetClass, methodName, returnType, typeParameters), key -> new PainlessMethod(javaMethod, targetClass, returnType, typeParameters, methodHandle, methodType)); painlessClassBuilder.methods.put(painlessMethodKey, painlessMethod); @@ -731,6 +805,328 @@ public final class PainlessLookupBuilder { } } + public void addImportedPainlessMethod(ClassLoader classLoader, String targetCanonicalClassName, + String methodName, String returnCanonicalTypeName, List canonicalTypeNameParameters) { + + Objects.requireNonNull(classLoader); + Objects.requireNonNull(targetCanonicalClassName); + Objects.requireNonNull(methodName); + Objects.requireNonNull(returnCanonicalTypeName); + Objects.requireNonNull(canonicalTypeNameParameters); + + Class targetClass = canonicalClassNamesToClasses.get(targetCanonicalClassName); + + if (targetClass == null) { + throw new IllegalArgumentException("target class [" + targetCanonicalClassName + "] not found for imported method " + + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); + } + + List> typeParameters = new ArrayList<>(canonicalTypeNameParameters.size()); + + for (String canonicalTypeNameParameter : canonicalTypeNameParameters) { + Class typeParameter = canonicalTypeNameToType(canonicalTypeNameParameter); + + if (typeParameter == null) { + throw new IllegalArgumentException("type parameter [" + canonicalTypeNameParameter + "] not found for imported method " + + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); + } + + typeParameters.add(typeParameter); + } + + Class returnType = canonicalTypeNameToType(returnCanonicalTypeName); + + if (returnType == null) { + throw new IllegalArgumentException("return type [" + returnCanonicalTypeName + "] not found for imported method " + + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); + } + + addImportedPainlessMethod(targetClass, methodName, returnType, typeParameters); + } + + public void addImportedPainlessMethod(Class targetClass, String methodName, Class returnType, List> typeParameters) { + Objects.requireNonNull(targetClass); + Objects.requireNonNull(methodName); + Objects.requireNonNull(returnType); + Objects.requireNonNull(typeParameters); + + if (targetClass == def.class) { + throw new IllegalArgumentException("cannot add imported method from reserved class [" + DEF_CLASS_NAME + "]"); + } + + String targetCanonicalClassName = typeToCanonicalTypeName(targetClass); + + if (METHOD_NAME_PATTERN.matcher(methodName).matches() == false) { + throw new IllegalArgumentException( + "invalid imported method name [" + methodName + "] for target class [" + targetCanonicalClassName + "]."); + } + + PainlessClassBuilder painlessClassBuilder = classesToPainlessClassBuilders.get(targetClass); + + if (painlessClassBuilder == null) { + throw new IllegalArgumentException("target class [" + targetCanonicalClassName + "] not found for imported method " + + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + int typeParametersSize = typeParameters.size(); + List> javaTypeParameters = new ArrayList<>(typeParametersSize); + + for (Class typeParameter : typeParameters) { + if (isValidType(typeParameter) == false) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] " + + "not found for imported method [[" + targetCanonicalClassName + "], [" + methodName + "], " + + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + javaTypeParameters.add(typeToJavaType(typeParameter)); + } + + if (isValidType(returnType) == false) { + throw new IllegalArgumentException("return type [" + typeToCanonicalTypeName(returnType) + "] not found for imported method " + + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + Method javaMethod; + + try { + javaMethod = targetClass.getMethod(methodName, javaTypeParameters.toArray(new Class[typeParametersSize])); + } catch (NoSuchMethodException nsme) { + throw new IllegalArgumentException("imported method reflection object [[" + targetCanonicalClassName + "], " + + "[" + methodName + "], " + typesToCanonicalTypeNames(typeParameters) + "] not found", nsme); + } + + if (javaMethod.getReturnType() != typeToJavaType(returnType)) { + throw new IllegalArgumentException("return type [" + typeToCanonicalTypeName(javaMethod.getReturnType()) + "] " + + "does not match the specified returned type [" + typeToCanonicalTypeName(returnType) + "] " + + "for imported method [[" + targetClass.getCanonicalName() + "], [" + methodName + "], " + + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + if (Modifier.isStatic(javaMethod.getModifiers()) == false) { + throw new IllegalArgumentException("imported method [[" + targetClass.getCanonicalName() + "], [" + methodName + "], " + + typesToCanonicalTypeNames(typeParameters) + "] must be static"); + } + + String painlessMethodKey = buildPainlessMethodKey(methodName, typeParametersSize); + + if (painlessMethodKeysToPainlessBindings.containsKey(painlessMethodKey)) { + throw new IllegalArgumentException("imported method and binding cannot have the same name [" + methodName + "]"); + } + + PainlessMethod importedPainlessMethod = painlessMethodKeysToImportedPainlessMethods.get(painlessMethodKey); + + if (importedPainlessMethod == null) { + MethodHandle methodHandle; + + try { + methodHandle = MethodHandles.publicLookup().in(targetClass).unreflect(javaMethod); + } catch (IllegalAccessException iae) { + throw new IllegalArgumentException("imported method handle [[" + targetClass.getCanonicalName() + "], " + + "[" + methodName + "], " + typesToCanonicalTypeNames(typeParameters) + "] not found", iae); + } + + MethodType methodType = methodHandle.type(); + + importedPainlessMethod = painlessMethodCache.computeIfAbsent( + new PainlessMethodCacheKey(targetClass, methodName, returnType, typeParameters), + key -> new PainlessMethod(javaMethod, targetClass, returnType, typeParameters, methodHandle, methodType)); + + painlessMethodKeysToImportedPainlessMethods.put(painlessMethodKey, importedPainlessMethod); + } else if (importedPainlessMethod.returnType == returnType && + importedPainlessMethod.typeParameters.equals(typeParameters) == false) { + throw new IllegalArgumentException("cannot have imported methods " + + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + + "[" + typeToCanonicalTypeName(returnType) + "], " + + typesToCanonicalTypeNames(typeParameters) + "] and " + + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + + "[" + typeToCanonicalTypeName(importedPainlessMethod.returnType) + "], " + + typesToCanonicalTypeNames(importedPainlessMethod.typeParameters) + "] " + + "with the same arity and different return type or type parameters"); + } + } + + public void addPainlessBinding(ClassLoader classLoader, String targetJavaClassName, + String methodName, String returnCanonicalTypeName, List canonicalTypeNameParameters) { + + Objects.requireNonNull(classLoader); + Objects.requireNonNull(targetJavaClassName); + Objects.requireNonNull(methodName); + Objects.requireNonNull(returnCanonicalTypeName); + Objects.requireNonNull(canonicalTypeNameParameters); + + Class targetClass; + + try { + targetClass = Class.forName(targetJavaClassName, true, classLoader); + } catch (ClassNotFoundException cnfe) { + throw new IllegalArgumentException("class [" + targetJavaClassName + "] not found", cnfe); + } + + String targetCanonicalClassName = typeToCanonicalTypeName(targetClass); + List> typeParameters = new ArrayList<>(canonicalTypeNameParameters.size()); + + for (String canonicalTypeNameParameter : canonicalTypeNameParameters) { + Class typeParameter = canonicalTypeNameToType(canonicalTypeNameParameter); + + if (typeParameter == null) { + throw new IllegalArgumentException("type parameter [" + canonicalTypeNameParameter + "] not found for binding " + + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); + } + + typeParameters.add(typeParameter); + } + + Class returnType = canonicalTypeNameToType(returnCanonicalTypeName); + + if (returnType == null) { + throw new IllegalArgumentException("return type [" + returnCanonicalTypeName + "] not found for binding " + + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); + } + + addPainlessBinding(targetClass, methodName, returnType, typeParameters); + } + + public void addPainlessBinding(Class targetClass, String methodName, Class returnType, List> typeParameters) { + + Objects.requireNonNull(targetClass); + Objects.requireNonNull(methodName); + Objects.requireNonNull(returnType); + Objects.requireNonNull(typeParameters); + + if (targetClass == def.class) { + throw new IllegalArgumentException("cannot add binding as reserved class [" + DEF_CLASS_NAME + "]"); + } + + String targetCanonicalClassName = typeToCanonicalTypeName(targetClass); + + Constructor[] javaConstructors = targetClass.getConstructors(); + Constructor javaConstructor = null; + + for (Constructor eachJavaConstructor : javaConstructors) { + if (eachJavaConstructor.getDeclaringClass() == targetClass) { + if (javaConstructor != null) { + throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] cannot have multiple constructors"); + } + + javaConstructor = eachJavaConstructor; + } + } + + if (javaConstructor == null) { + throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] must have exactly one constructor"); + } + + int constructorTypeParametersSize = javaConstructor.getParameterCount(); + + for (int typeParameterIndex = 0; typeParameterIndex < constructorTypeParametersSize; ++typeParameterIndex) { + Class typeParameter = typeParameters.get(typeParameterIndex); + + if (isValidType(typeParameter) == false) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " + + "for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + Class javaTypeParameter = javaConstructor.getParameterTypes()[typeParameterIndex]; + + if (isValidType(javaTypeParameter) == false) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " + + "for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + if (javaTypeParameter != typeToJavaType(typeParameter)) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(javaTypeParameter) + "] " + + "does not match the specified type parameter [" + typeToCanonicalTypeName(typeParameter) + "] " + + "for binding [[" + targetClass.getCanonicalName() + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + } + + if (METHOD_NAME_PATTERN.matcher(methodName).matches() == false) { + throw new IllegalArgumentException( + "invalid method name [" + methodName + "] for binding [" + targetCanonicalClassName + "]."); + } + + Method[] javaMethods = targetClass.getMethods(); + Method javaMethod = null; + + for (Method eachJavaMethod : javaMethods) { + if (eachJavaMethod.getDeclaringClass() == targetClass) { + if (javaMethod != null) { + throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] cannot have multiple methods"); + } + + javaMethod = eachJavaMethod; + } + } + + if (javaMethod == null) { + throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] must have exactly one method"); + } + + int methodTypeParametersSize = javaMethod.getParameterCount(); + + for (int typeParameterIndex = 0; typeParameterIndex < methodTypeParametersSize; ++typeParameterIndex) { + Class typeParameter = typeParameters.get(constructorTypeParametersSize + typeParameterIndex); + + if (isValidType(typeParameter) == false) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " + + "for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + Class javaTypeParameter = javaMethod.getParameterTypes()[typeParameterIndex]; + + if (isValidType(javaTypeParameter) == false) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " + + "for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + if (javaTypeParameter != typeToJavaType(typeParameter)) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(javaTypeParameter) + "] " + + "does not match the specified type parameter [" + typeToCanonicalTypeName(typeParameter) + "] " + + "for binding [[" + targetClass.getCanonicalName() + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + } + + if (javaMethod.getReturnType() != typeToJavaType(returnType)) { + throw new IllegalArgumentException("return type [" + typeToCanonicalTypeName(javaMethod.getReturnType()) + "] " + + "does not match the specified returned type [" + typeToCanonicalTypeName(returnType) + "] " + + "for binding [[" + targetClass.getCanonicalName() + "], [" + methodName + "], " + + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + String painlessMethodKey = buildPainlessMethodKey(methodName, constructorTypeParametersSize + methodTypeParametersSize); + + if (painlessMethodKeysToImportedPainlessMethods.containsKey(painlessMethodKey)) { + throw new IllegalArgumentException("binding and imported method cannot have the same name [" + methodName + "]"); + } + + PainlessBinding painlessBinding = painlessMethodKeysToPainlessBindings.get(painlessMethodKey); + + if (painlessBinding == null) { + Constructor finalJavaConstructor = javaConstructor; + Method finalJavaMethod = javaMethod; + + painlessBinding = painlessBindingCache.computeIfAbsent( + new PainlessBindingCacheKey(targetClass, methodName, returnType, typeParameters), + key -> new PainlessBinding(finalJavaConstructor, finalJavaMethod, returnType, typeParameters)); + + painlessMethodKeysToPainlessBindings.put(painlessMethodKey, painlessBinding); + } else if (painlessBinding.javaConstructor.equals(javaConstructor) == false || + painlessBinding.javaMethod.equals(javaMethod) == false || + painlessBinding.returnType != returnType || + painlessBinding.typeParameters.equals(typeParameters) == false) { + throw new IllegalArgumentException("cannot have bindings " + + "[[" + targetCanonicalClassName + "], " + + "[" + methodName + "], " + + "[" + typeToCanonicalTypeName(returnType) + "], " + + typesToCanonicalTypeNames(typeParameters) + "] and " + + "[[" + targetCanonicalClassName + "], " + + "[" + methodName + "], " + + "[" + typeToCanonicalTypeName(painlessBinding.returnType) + "], " + + typesToCanonicalTypeNames(painlessBinding.typeParameters) + "] and " + + "with the same name and arity but different constructors or methods"); + } + } + public PainlessLookup build() { copyPainlessClassMembers(); cacheRuntimeHandles(); @@ -742,7 +1138,8 @@ public final class PainlessLookupBuilder { classesToPainlessClasses.put(painlessClassBuilderEntry.getKey(), painlessClassBuilderEntry.getValue().build()); } - return new PainlessLookup(canonicalClassNamesToClasses, classesToPainlessClasses); + return new PainlessLookup(canonicalClassNamesToClasses, classesToPainlessClasses, + painlessMethodKeysToImportedPainlessMethods, painlessMethodKeysToPainlessBindings); } private void copyPainlessClassMembers() { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessMethod.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessMethod.java index 9dd143a4028..89462170ae5 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessMethod.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessMethod.java @@ -26,6 +26,7 @@ import java.util.Collections; import java.util.List; public class PainlessMethod { + public final Method javaMethod; public final Class targetClass; public final Class returnType; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java index 1f9973df192..d161296d90a 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java @@ -24,8 +24,13 @@ import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Locals.LocalMethod; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.lookup.PainlessBinding; +import org.elasticsearch.painless.lookup.PainlessMethod; +import org.objectweb.asm.Label; +import org.objectweb.asm.Type; import org.objectweb.asm.commons.Method; +import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.Set; @@ -41,6 +46,8 @@ public final class ECallLocal extends AExpression { private final List arguments; private LocalMethod method = null; + private PainlessMethod imported = null; + private PainlessBinding binding = null; public ECallLocal(Location location, String name, List arguments) { super(location); @@ -61,31 +68,95 @@ public final class ECallLocal extends AExpression { method = locals.getMethod(name, arguments.size()); if (method == null) { - throw createError(new IllegalArgumentException("Unknown call [" + name + "] with [" + arguments.size() + "] arguments.")); + imported = locals.getPainlessLookup().lookupImportedPainlessMethod(name, arguments.size()); + + if (imported == null) { + binding = locals.getPainlessLookup().lookupPainlessBinding(name, arguments.size()); + + if (binding == null) { + throw createError( + new IllegalArgumentException("Unknown call [" + name + "] with [" + arguments.size() + "] arguments.")); + } + } + } + + List> typeParameters; + + if (method != null) { + typeParameters = new ArrayList<>(method.typeParameters); + actual = method.returnType; + } else if (imported != null) { + typeParameters = new ArrayList<>(imported.typeParameters); + actual = imported.returnType; + } else if (binding != null) { + typeParameters = new ArrayList<>(binding.typeParameters); + actual = binding.returnType; + } else { + throw new IllegalStateException("Illegal tree structure."); } for (int argument = 0; argument < arguments.size(); ++argument) { AExpression expression = arguments.get(argument); - expression.expected = method.typeParameters.get(argument); + expression.expected = typeParameters.get(argument); expression.internal = true; expression.analyze(locals); arguments.set(argument, expression.cast(locals)); } statement = true; - actual = method.returnType; } @Override void write(MethodWriter writer, Globals globals) { writer.writeDebugInfo(location); - for (AExpression argument : arguments) { - argument.write(writer, globals); - } + if (method != null) { + for (AExpression argument : arguments) { + argument.write(writer, globals); + } - writer.invokeStatic(CLASS_TYPE, new Method(method.name, method.methodType.toMethodDescriptorString())); + writer.invokeStatic(CLASS_TYPE, new Method(method.name, method.methodType.toMethodDescriptorString())); + } else if (imported != null) { + for (AExpression argument : arguments) { + argument.write(writer, globals); + } + + writer.invokeStatic(Type.getType(imported.targetClass), + new Method(imported.javaMethod.getName(), imported.methodType.toMethodDescriptorString())); + } else if (binding != null) { + String name = globals.addBinding(binding.javaConstructor.getDeclaringClass()); + Type type = Type.getType(binding.javaConstructor.getDeclaringClass()); + int javaConstructorParameterCount = binding.javaConstructor.getParameterCount(); + + Label nonNull = new Label(); + + writer.loadThis(); + writer.getField(CLASS_TYPE, name, type); + writer.ifNonNull(nonNull); + writer.loadThis(); + writer.newInstance(type); + writer.dup(); + + for (int argument = 0; argument < javaConstructorParameterCount; ++argument) { + arguments.get(argument).write(writer, globals); + } + + writer.invokeConstructor(type, Method.getMethod(binding.javaConstructor)); + writer.putField(CLASS_TYPE, name, type); + + writer.mark(nonNull); + writer.loadThis(); + writer.getField(CLASS_TYPE, name, type); + + for (int argument = 0; argument < binding.javaMethod.getParameterCount(); ++argument) { + arguments.get(argument + javaConstructorParameterCount).write(writer, globals); + } + + writer.invokeVirtual(type, Method.getMethod(binding.javaMethod)); + } else { + throw new IllegalStateException("Illegal tree structure."); + } } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java index 0f7445a38c4..8abd3c7185d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java @@ -359,6 +359,13 @@ public final class SSource extends AStatement { clinit.endMethod(); } + // Write binding variables + for (Map.Entry> binding : globals.getBindings().entrySet()) { + String name = binding.getKey(); + String descriptor = Type.getType(binding.getValue()).getDescriptor(); + visitor.visitField(Opcodes.ACC_PRIVATE, name, descriptor, null, null).visitEnd(); + } + // Write any needsVarName methods for used variables for (org.objectweb.asm.commons.Method needsMethod : scriptClassInfo.getNeedsMethods()) { String name = needsMethod.getName(); diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt index a3ff479533b..81009de9979 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt @@ -132,24 +132,6 @@ class org.elasticsearch.index.mapper.IpFieldMapper$IpFieldType$IpScriptDocValues List getValues() } -# for testing. -# currently FeatureTest exposes overloaded constructor, field load store, and overloaded static methods -class org.elasticsearch.painless.FeatureTest no_import { - int z - () - (int,int) - int getX() - int getY() - void setX(int) - void setY(int) - boolean overloadedStatic() - boolean overloadedStatic(boolean) - Object twoFunctionsOfX(Function,Function) - void listInput(List) - int org.elasticsearch.painless.FeatureTestAugmentation getTotal() - int org.elasticsearch.painless.FeatureTestAugmentation addToTotal(int) -} - class org.elasticsearch.search.lookup.FieldLookup { def getValue() List getValues() @@ -174,4 +156,27 @@ class org.elasticsearch.index.similarity.ScriptedSimilarity$Term { class org.elasticsearch.index.similarity.ScriptedSimilarity$Doc { int getLength() float getFreq() +} + +# for testing +class org.elasticsearch.painless.FeatureTest no_import { + int z + () + (int,int) + int getX() + int getY() + void setX(int) + void setY(int) + boolean overloadedStatic() + boolean overloadedStatic(boolean) + Object twoFunctionsOfX(Function,Function) + void listInput(List) + int org.elasticsearch.painless.FeatureTestAugmentation getTotal() + int org.elasticsearch.painless.FeatureTestAugmentation addToTotal(int) +} + +# for testing +static_import { + float staticAddFloatsTest(float, float) from_class org.elasticsearch.painless.FeatureTest + int testAddWithState(int, int, int, double) bound_to org.elasticsearch.painless.BindingTest } \ No newline at end of file diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicAPITests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicAPITests.java index 0b13694524b..9863db0b21e 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicAPITests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicAPITests.java @@ -129,4 +129,12 @@ public class BasicAPITests extends ScriptTestCase { assertEquals(5, exec("org.elasticsearch.painless.FeatureTest ft = new org.elasticsearch.painless.FeatureTest();" + "ft.z = 5; return ft.z;")); } + + public void testNoSemicolon() { + assertEquals(true, exec("def x = true; if (x) return x")); + } + + public void testStatic() { + assertEquals(15.5f, exec("staticAddFloatsTest(6.5f, 9.0f)")); + } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java new file mode 100644 index 00000000000..4bcc557d3dc --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.elasticsearch.script.ExecutableScript; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class BindingsTests extends ScriptTestCase { + + public void testBasicBinding() { + assertEquals(15, exec("testAddWithState(4, 5, 6, 0.0)")); + } + + public void testRepeatedBinding() { + String script = "testAddWithState(4, 5, params.test, 0.0)"; + Map params = new HashMap<>(); + ExecutableScript.Factory factory = scriptEngine.compile(null, script, ExecutableScript.CONTEXT, Collections.emptyMap()); + ExecutableScript executableScript = factory.newInstance(params); + + executableScript.setNextVar("test", 5); + assertEquals(14, executableScript.run()); + + executableScript.setNextVar("test", 4); + assertEquals(13, executableScript.run()); + + executableScript.setNextVar("test", 7); + assertEquals(16, executableScript.run()); + } + + public void testBoundBinding() { + String script = "testAddWithState(4, params.bound, params.test, 0.0)"; + Map params = new HashMap<>(); + ExecutableScript.Factory factory = scriptEngine.compile(null, script, ExecutableScript.CONTEXT, Collections.emptyMap()); + ExecutableScript executableScript = factory.newInstance(params); + + executableScript.setNextVar("test", 5); + executableScript.setNextVar("bound", 1); + assertEquals(10, executableScript.run()); + + executableScript.setNextVar("test", 4); + executableScript.setNextVar("bound", 2); + assertEquals(9, executableScript.run()); + } +} diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java index 8143c39ce6f..81c139662e7 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java @@ -278,6 +278,6 @@ public class RegexTests extends ScriptTestCase { IllegalArgumentException e = expectScriptThrows(IllegalArgumentException.class, () -> { exec("/asdf/b", false); // Not picky so we get a non-assertion error }); - assertEquals("invalid sequence of tokens near ['b'].", e.getMessage()); + assertEquals("unexpected token ['b'] was expecting one of [{, ';'}].", e.getMessage()); } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScoreTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScoreTests.java index 567f4620461..3d19dedd3b0 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScoreTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScoreTests.java @@ -19,34 +19,25 @@ package org.elasticsearch.painless; -import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; -import java.io.IOException; import java.util.Collections; public class ScoreTests extends ScriptTestCase { /** Most of a dummy scorer impl that requires overriding just score(). */ - abstract class MockScorer extends Scorer { - MockScorer() { - super(null); - } + abstract class MockScorer extends Scorable { @Override public int docID() { return 0; } - @Override - public DocIdSetIterator iterator() { - throw new UnsupportedOperationException(); - } } public void testScoreWorks() { assertEquals(2.5, exec("_score", Collections.emptyMap(), Collections.emptyMap(), new MockScorer() { @Override - public float score() throws IOException { + public float score() { return 2.5f; } }, @@ -57,7 +48,7 @@ public class ScoreTests extends ScriptTestCase { assertEquals(3.5, exec("3.5", Collections.emptyMap(), Collections.emptyMap(), new MockScorer() { @Override - public float score() throws IOException { + public float score() { throw new AssertionError("score() should not be called"); } }, @@ -69,7 +60,7 @@ public class ScoreTests extends ScriptTestCase { new MockScorer() { private boolean used = false; @Override - public float score() throws IOException { + public float score() { if (used == false) { return 4.5f; } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java index 96cc296a1af..577b120fc90 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java @@ -20,7 +20,7 @@ package org.elasticsearch.painless; import junit.framework.AssertionFailedError; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.common.lucene.ScorerAware; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.painless.antlr.Walker; @@ -47,6 +47,8 @@ import static org.hamcrest.Matchers.hasSize; * Typically just asserts the output of {@code exec()} */ public abstract class ScriptTestCase extends ESTestCase { + private static final PainlessLookup PAINLESS_LOOKUP = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS); + protected PainlessScriptEngine scriptEngine; @Before @@ -89,15 +91,15 @@ public abstract class ScriptTestCase extends ESTestCase { } /** Compiles and returns the result of {@code script} with access to {@code vars} and compile-time parameters */ - public Object exec(String script, Map vars, Map compileParams, Scorer scorer, boolean picky) { + public Object exec(String script, Map vars, Map compileParams, Scorable scorer, boolean picky) { // test for ambiguity errors before running the actual script if picky is true if (picky) { - PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS); - ScriptClassInfo scriptClassInfo = new ScriptClassInfo(painlessLookup, GenericElasticsearchScript.class); + ScriptClassInfo scriptClassInfo = new ScriptClassInfo(PAINLESS_LOOKUP, GenericElasticsearchScript.class); CompilerSettings pickySettings = new CompilerSettings(); pickySettings.setPicky(true); pickySettings.setRegexesEnabled(CompilerSettings.REGEX_ENABLED.get(scriptEngineSettings())); - Walker.buildPainlessTree(scriptClassInfo, new MainMethodReserved(), getTestName(), script, pickySettings, painlessLookup, null); + Walker.buildPainlessTree( + scriptClassInfo, new MainMethodReserved(), getTestName(), script, pickySettings, PAINLESS_LOOKUP, null); } // test actual script execution ExecutableScript.Factory factory = scriptEngine.compile(null, script, ExecutableScript.CONTEXT, compileParams); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java index 6ee021c695f..5c6fbc54667 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java @@ -19,8 +19,7 @@ package org.elasticsearch.painless; -import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptedMetricAggContexts; @@ -65,15 +64,12 @@ public class ScriptedMetricAggContextsTests extends ScriptTestCase { Map params = new HashMap<>(); Map state = new HashMap<>(); - Scorer scorer = new Scorer(null) { + Scorable scorer = new Scorable() { @Override public int docID() { return 0; } @Override public float score() { return 0.5f; } - - @Override - public DocIdSetIterator iterator() { return null; } }; ScriptedMetricAggContexts.MapScript.LeafFactory leafFactory = factory.newFactory(params, state, null); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java index 0795ab77775..1b4c4eb0ff6 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java @@ -89,7 +89,7 @@ public class SimilarityScriptTests extends ScriptTestCase { .add(new TermQuery(new Term("match", "yes")), Occur.FILTER) .build(), 3.2f); TopDocs topDocs = searcher.search(query, 1); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); assertEquals((float) (3.2 * 2 / 3), topDocs.scoreDocs[0].score, 0); w.close(); dir.close(); @@ -128,7 +128,7 @@ public class SimilarityScriptTests extends ScriptTestCase { .add(new TermQuery(new Term("match", "yes")), Occur.FILTER) .build(), 3.2f); TopDocs topDocs = searcher.search(query, 1); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); assertEquals((float) (3.2 * 2 / 3), topDocs.scoreDocs[0].score, 0); w.close(); dir.close(); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java index f2d93aa759d..79d2fe0c53d 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java @@ -255,7 +255,7 @@ public class WhenThingsGoWrongTests extends ScriptTestCase { // We don't want PICKY here so we get the normal error message exec("def i = 1} return 1", emptyMap(), emptyMap(), null, false); }); - assertEquals("invalid sequence of tokens near ['}'].", e.getMessage()); + assertEquals("unexpected token ['}'] was expecting one of [{, ';'}].", e.getMessage()); } public void testBadBoxingCast() { diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml index a9aa00aa5e0..9a43e1f9aa4 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml @@ -161,7 +161,7 @@ "script_score": { "script": { "lang": "painless", - "source": "-doc['num1'].value" + "source": "3 - doc['num1'].value" } } }] diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml index 4c3c204d2d9..617b8df61b6 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml @@ -95,7 +95,7 @@ setup: field: script: source: "doc.date.get(0)" - - match: { hits.hits.0.fields.field.0: '2017-01-01T12:11:12Z' } + - match: { hits.hits.0.fields.field.0: '2017-01-01T12:11:12.000Z' } - do: search: @@ -104,7 +104,7 @@ setup: field: script: source: "doc.date.value" - - match: { hits.hits.0.fields.field.0: '2017-01-01T12:11:12Z' } + - match: { hits.hits.0.fields.field.0: '2017-01-01T12:11:12.000Z' } --- "geo_point": diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureVectorFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureVectorFieldMapper.java index 1394768aaa8..b6b962a200a 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureVectorFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureVectorFieldMapper.java @@ -135,7 +135,7 @@ public class FeatureVectorFieldMapper extends FieldMapper { } @Override - public FieldMapper parse(ParseContext context) throws IOException { + public void parse(ParseContext context) throws IOException { if (context.externalValueSet()) { throw new IllegalArgumentException("[feature_vector] fields can't be used in multi-fields"); } @@ -164,7 +164,6 @@ public class FeatureVectorFieldMapper extends FieldMapper { "float, but got unexpected token " + token); } } - return null; // no mapping update } @Override diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java index b555afce67a..064d1d1e597 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java @@ -21,9 +21,10 @@ package org.elasticsearch.join.aggregations; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedSetDocValues; -import org.apache.lucene.search.ConstantScoreScorer; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorable; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; @@ -78,8 +79,8 @@ public class ParentToChildrenAggregator extends BucketsAggregator implements Sin throws IOException { super(name, factories, context, parent, pipelineAggregators, metaData); // these two filters are cached in the parser - this.childFilter = context.searcher().createNormalizedWeight(childFilter, false); - this.parentFilter = context.searcher().createNormalizedWeight(parentFilter, false); + this.childFilter = context.searcher().createWeight(context.searcher().rewrite(childFilter), ScoreMode.COMPLETE_NO_SCORES, 1f); + this.parentFilter = context.searcher().createWeight(context.searcher().rewrite(parentFilter), ScoreMode.COMPLETE_NO_SCORES, 1f); this.parentOrdToBuckets = context.bigArrays().newLongArray(maxOrd, false); this.parentOrdToBuckets.fill(0, maxOrd, -1); this.parentOrdToOtherBuckets = new LongObjectPagedHashMap<>(context.bigArrays()); @@ -147,7 +148,17 @@ public class ParentToChildrenAggregator extends BucketsAggregator implements Sin final SortedSetDocValues globalOrdinals = valuesSource.globalOrdinalsValues(ctx); // Set the scorer, since we now replay only the child docIds - sub.setScorer(new ConstantScoreScorer(null, 1f, childDocsIter)); + sub.setScorer(new Scorable() { + @Override + public float score() { + return 1f; + } + + @Override + public int docID() { + return childDocsIter.docID(); + } + }); final Bits liveDocs = ctx.reader().getLiveDocs(); for (int docId = childDocsIter diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java index 3e6f8eac814..cd5d37df2f9 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java @@ -375,7 +375,7 @@ public final class ParentJoinFieldMapper extends FieldMapper { } @Override - public Mapper parse(ParseContext context) throws IOException { + public void parse(ParseContext context) throws IOException { context.path().add(simpleName()); XContentParser.Token token = context.parser().currentToken(); String name = null; @@ -437,7 +437,6 @@ public final class ParentJoinFieldMapper extends FieldMapper { context.doc().add(field); context.doc().add(new SortedDocValuesField(fieldType().name(), binaryValue)); context.path().remove(); - return null; } @Override diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java index 3381356da41..e37a7960091 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.join.JoinUtil; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; @@ -125,15 +124,7 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder topDocsCollector; + MaxScoreCollector maxScoreCollector = null; if (sort() != null) { - topDocsCollector = TopFieldCollector.create(sort().sort, topN, true, trackScores(), trackScores(), true); + topDocsCollector = TopFieldCollector.create(sort().sort, topN, Integer.MAX_VALUE); + if (trackScores()) { + maxScoreCollector = new MaxScoreCollector(); + } } else { - topDocsCollector = TopScoreDocCollector.create(topN); + topDocsCollector = TopScoreDocCollector.create(topN, Integer.MAX_VALUE); + maxScoreCollector = new MaxScoreCollector(); } try { for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) { - intersect(weight, innerHitQueryWeight, topDocsCollector, ctx); + intersect(weight, innerHitQueryWeight, MultiCollector.wrap(topDocsCollector, maxScoreCollector), ctx); } } finally { clearReleasables(Lifetime.COLLECTION); } - result[i] = topDocsCollector.topDocs(from(), size()); + TopDocs topDocs = topDocsCollector.topDocs(from(), size()); + float maxScore = Float.NaN; + if (maxScoreCollector != null) { + maxScore = maxScoreCollector.getMaxScore(); + } + result[i] = new TopDocsAndMaxScore(topDocs, maxScore); } } return result; diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenIT.java b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenIT.java index 869019ac0ff..f7f3b89773b 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenIT.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenIT.java @@ -30,8 +30,8 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; -import org.elasticsearch.search.aggregations.metrics.tophits.TopHits; +import org.elasticsearch.search.aggregations.metrics.Sum; +import org.elasticsearch.search.aggregations.metrics.TopHits; import org.elasticsearch.search.sort.SortOrder; import org.junit.Before; diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregatorTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregatorTests.java index d6557256ce0..452fe1b490b 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregatorTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregatorTests.java @@ -49,8 +49,8 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.join.mapper.MetaJoinFieldMapper; import org.elasticsearch.join.mapper.ParentJoinFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.metrics.min.InternalMin; -import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.InternalMin; +import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; import java.io.IOException; import java.util.Arrays; diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java index 546677a2be4..6e4e79d16e5 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java @@ -196,10 +196,6 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase documents, - Query candidateMatchesQuery, IndexSearcher percolatorIndexSearcher, Query verifiedMatchesQuery) { + Query candidateMatchesQuery, IndexSearcher percolatorIndexSearcher, + Query nonNestedDocsFilter, Query verifiedMatchesQuery) { this.name = name; this.documents = Objects.requireNonNull(documents); this.candidateMatchesQuery = Objects.requireNonNull(candidateMatchesQuery); this.queryStore = Objects.requireNonNull(queryStore); this.percolatorIndexSearcher = Objects.requireNonNull(percolatorIndexSearcher); + this.nonNestedDocsFilter = nonNestedDocsFilter; this.verifiedMatchesQuery = Objects.requireNonNull(verifiedMatchesQuery); } @@ -68,16 +74,17 @@ final class PercolateQuery extends Query implements Accountable { public Query rewrite(IndexReader reader) throws IOException { Query rewritten = candidateMatchesQuery.rewrite(reader); if (rewritten != candidateMatchesQuery) { - return new PercolateQuery(name, queryStore, documents, rewritten, percolatorIndexSearcher, verifiedMatchesQuery); + return new PercolateQuery(name, queryStore, documents, rewritten, percolatorIndexSearcher, + nonNestedDocsFilter, verifiedMatchesQuery); } else { return this; } } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { - final Weight verifiedMatchesWeight = verifiedMatchesQuery.createWeight(searcher, false, boost); - final Weight candidateMatchesWeight = candidateMatchesQuery.createWeight(searcher, false, boost); + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + final Weight verifiedMatchesWeight = verifiedMatchesQuery.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost); + final Weight candidateMatchesWeight = candidateMatchesQuery.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost); return new Weight(this) { @Override public void extractTerms(Set set) { @@ -91,7 +98,7 @@ final class PercolateQuery extends Query implements Accountable { int result = twoPhaseIterator.approximation().advance(docId); if (result == docId) { if (twoPhaseIterator.matches()) { - if (needsScores) { + if (scoreMode.needsScores()) { CheckedFunction percolatorQueries = queryStore.getQueries(leafReaderContext); Query query = percolatorQueries.apply(docId); Explanation detail = percolatorIndexSearcher.explain(query, 0); @@ -112,9 +119,9 @@ final class PercolateQuery extends Query implements Accountable { return null; } - final CheckedFunction queries = queryStore.getQueries(leafReaderContext); - if (needsScores) { - return new BaseScorer(this, approximation, queries, percolatorIndexSearcher) { + final CheckedFunction percolatorQueries = queryStore.getQueries(leafReaderContext); + if (scoreMode.needsScores()) { + return new BaseScorer(this, approximation) { float score; @@ -122,8 +129,14 @@ final class PercolateQuery extends Query implements Accountable { boolean matchDocId(int docId) throws IOException { Query query = percolatorQueries.apply(docId); if (query != null) { + if (nonNestedDocsFilter != null) { + query = new BooleanQuery.Builder() + .add(query, Occur.MUST) + .add(nonNestedDocsFilter, Occur.FILTER) + .build(); + } TopDocs topDocs = percolatorIndexSearcher.search(query, 1); - if (topDocs.totalHits > 0) { + if (topDocs.scoreDocs.length > 0) { score = topDocs.scoreDocs[0].score; return true; } else { @@ -142,7 +155,7 @@ final class PercolateQuery extends Query implements Accountable { } else { ScorerSupplier verifiedDocsScorer = verifiedMatchesWeight.scorerSupplier(leafReaderContext); Bits verifiedDocsBits = Lucene.asSequentialAccessBits(leafReaderContext.reader().maxDoc(), verifiedDocsScorer); - return new BaseScorer(this, approximation, queries, percolatorIndexSearcher) { + return new BaseScorer(this, approximation) { @Override public float score() throws IOException { @@ -159,7 +172,16 @@ final class PercolateQuery extends Query implements Accountable { return true; } Query query = percolatorQueries.apply(docId); - return query != null && Lucene.exists(percolatorIndexSearcher, query); + if (query == null) { + return false; + } + if (nonNestedDocsFilter != null) { + query = new BooleanQuery.Builder() + .add(query, Occur.MUST) + .add(nonNestedDocsFilter, Occur.FILTER) + .build(); + } + return Lucene.exists(percolatorIndexSearcher, query); } }; } @@ -182,6 +204,10 @@ final class PercolateQuery extends Query implements Accountable { return percolatorIndexSearcher; } + boolean excludesNestedDocs() { + return nonNestedDocsFilter != null; + } + List getDocuments() { return documents; } @@ -241,15 +267,10 @@ final class PercolateQuery extends Query implements Accountable { abstract static class BaseScorer extends Scorer { final Scorer approximation; - final CheckedFunction percolatorQueries; - final IndexSearcher percolatorIndexSearcher; - BaseScorer(Weight weight, Scorer approximation, CheckedFunction percolatorQueries, - IndexSearcher percolatorIndexSearcher) { + BaseScorer(Weight weight, Scorer approximation) { super(weight); this.approximation = approximation; - this.percolatorQueries = percolatorQueries; - this.percolatorIndexSearcher = percolatorIndexSearcher; } @Override @@ -279,6 +300,10 @@ final class PercolateQuery extends Query implements Accountable { abstract boolean matchDocId(int docId) throws IOException; + @Override + public float getMaxScore(int upTo) throws IOException { + return Float.MAX_VALUE; + } } } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index f18efe4585b..09cc04458ec 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -29,10 +29,9 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.index.memory.MemoryIndex; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.search.join.BitSetProducer; @@ -56,7 +55,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; @@ -272,11 +270,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder 1 || docs.get(0).docs().size() > 1) { assert docs.size() != 1 || docMapper.hasNestedObjects(); docSearcher = createMultiDocumentSearcher(analyzer, docs); + excludeNestedDocuments = docMapper.hasNestedObjects() && docs.stream() + .map(ParsedDocument::docs) + .mapToInt(List::size) + .anyMatch(size -> size > 1); } else { MemoryIndex memoryIndex = MemoryIndex.fromDocument(docs.get(0).rootDoc(), analyzer, true, false); docSearcher = memoryIndex.createSearcher(); docSearcher.setQueryCache(null); + excludeNestedDocuments = false; } PercolatorFieldMapper.FieldType pft = (PercolatorFieldMapper.FieldType) fieldType; @@ -625,7 +625,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder documents, - IndexSearcher searcher, Version indexVersion) throws IOException { + IndexSearcher searcher, boolean excludeNestedDocuments, Version indexVersion) throws IOException { IndexReader indexReader = searcher.getIndexReader(); Tuple t = createCandidateQuery(indexReader, indexVersion); Query candidateQuery = t.v1(); @@ -261,7 +262,11 @@ public class PercolatorFieldMapper extends FieldMapper { } else { verifiedMatchesQuery = new MatchNoDocsQuery("multiple or nested docs or CoveringQuery could not be used"); } - return new PercolateQuery(name, queryStore, documents, candidateQuery, searcher, verifiedMatchesQuery); + Query filter = null; + if (excludeNestedDocuments) { + filter = Queries.newNonNestedFilter(indexVersion); + } + return new PercolateQuery(name, queryStore, documents, candidateQuery, searcher, filter, verifiedMatchesQuery); } Tuple createCandidateQuery(IndexReader indexReader, Version indexVersion) throws IOException { @@ -383,7 +388,7 @@ public class PercolatorFieldMapper extends FieldMapper { } @Override - public Mapper parse(ParseContext context) throws IOException { + public void parse(ParseContext context) throws IOException { QueryShardContext queryShardContext = this.queryShardContext.get(); if (context.doc().getField(queryBuilderField.name()) != null) { // If a percolator query has been defined in an array object then multiple percolator queries @@ -406,7 +411,6 @@ public class PercolatorFieldMapper extends FieldMapper { createQueryBuilderField(indexVersion, queryBuilderField, queryBuilder, context); Query query = toQuery(queryShardContext, isMapUnmappedFieldAsText(), queryBuilder); processQuery(query, context); - return null; } static void createQueryBuilderField(Version indexVersion, BinaryFieldMapper qbField, diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java index 4d5e3d2a988..fdcc9156b41 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java @@ -22,6 +22,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; @@ -74,7 +75,8 @@ final class PercolatorMatchedSlotSubFetchPhase implements FetchSubPhase { // See https://issues.apache.org/jira/browse/LUCENE-8055 // for now we just use version 6.0 version to find nested parent final Version version = Version.V_6_0_0; //context.mapperService().getIndexSettings().getIndexVersionCreated(); - Weight weight = percolatorIndexSearcher.createNormalizedWeight(Queries.newNonNestedFilter(version), false); + Weight weight = percolatorIndexSearcher.createWeight(percolatorIndexSearcher.rewrite(Queries.newNonNestedFilter(version)), + ScoreMode.COMPLETE_NO_SCORES, 1f); Scorer s = weight.scorer(percolatorIndexSearcher.getIndexReader().leaves().get(0)); int memoryIndexMaxDoc = percolatorIndexSearcher.getIndexReader().maxDoc(); BitSet rootDocs = BitSet.of(s.iterator(), memoryIndexMaxDoc); @@ -96,7 +98,7 @@ final class PercolatorMatchedSlotSubFetchPhase implements FetchSubPhase { } TopDocs topDocs = percolatorIndexSearcher.search(query, memoryIndexMaxDoc, new Sort(SortField.FIELD_DOC)); - if (topDocs.totalHits == 0) { + if (topDocs.totalHits.value == 0) { // This hit didn't match with a percolate query, // likely to happen when percolating multiple documents continue; diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index e6d637aabb1..3d9a8fb8ebb 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -61,6 +61,7 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; @@ -77,6 +78,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -87,6 +89,7 @@ import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; @@ -593,51 +596,52 @@ public class CandidateQueryTests extends ESSingleNodeTestCase { Version v = Version.V_6_1_0; MemoryIndex memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new IntPoint("int_field", 3)), new WhitespaceAnalyzer()); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); - Query query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v); + Query query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), + percolateSearcher, false, v); TopDocs topDocs = shardSearcher.search(query, 1); - assertEquals(1L, topDocs.totalHits); + assertEquals(1L, topDocs.totalHits.value); assertEquals(1, topDocs.scoreDocs.length); assertEquals(0, topDocs.scoreDocs[0].doc); memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new LongPoint("long_field", 7L)), new WhitespaceAnalyzer()); percolateSearcher = memoryIndex.createSearcher(); - query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v); + query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v); topDocs = shardSearcher.search(query, 1); - assertEquals(1L, topDocs.totalHits); + assertEquals(1L, topDocs.totalHits.value); assertEquals(1, topDocs.scoreDocs.length); assertEquals(1, topDocs.scoreDocs[0].doc); memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new HalfFloatPoint("half_float_field", 12)), new WhitespaceAnalyzer()); percolateSearcher = memoryIndex.createSearcher(); - query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v); + query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v); topDocs = shardSearcher.search(query, 1); - assertEquals(1L, topDocs.totalHits); + assertEquals(1L, topDocs.totalHits.value); assertEquals(1, topDocs.scoreDocs.length); assertEquals(2, topDocs.scoreDocs[0].doc); memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new FloatPoint("float_field", 17)), new WhitespaceAnalyzer()); percolateSearcher = memoryIndex.createSearcher(); - query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v); + query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v); topDocs = shardSearcher.search(query, 1); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); assertEquals(1, topDocs.scoreDocs.length); assertEquals(3, topDocs.scoreDocs[0].doc); memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new DoublePoint("double_field", 21)), new WhitespaceAnalyzer()); percolateSearcher = memoryIndex.createSearcher(); - query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v); + query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v); topDocs = shardSearcher.search(query, 1); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); assertEquals(1, topDocs.scoreDocs.length); assertEquals(4, topDocs.scoreDocs[0].doc); memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new InetAddressPoint("ip_field", forString("192.168.0.4"))), new WhitespaceAnalyzer()); percolateSearcher = memoryIndex.createSearcher(); - query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v); + query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v); topDocs = shardSearcher.search(query, 1); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); assertEquals(1, topDocs.scoreDocs.length); assertEquals(5, topDocs.scoreDocs[0].doc); } @@ -775,16 +779,16 @@ public class CandidateQueryTests extends ESSingleNodeTestCase { memoryIndex.addField("field", "value1", new WhitespaceAnalyzer()); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, - Collections.singletonList(new BytesArray("{}")), percolateSearcher, Version.CURRENT); - TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true); - assertEquals(3L, topDocs.totalHits); + Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, Version.CURRENT); + TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC)); + assertEquals(3L, topDocs.totalHits.value); assertEquals(3, topDocs.scoreDocs.length); assertEquals(0, topDocs.scoreDocs[0].doc); assertEquals(1, topDocs.scoreDocs[1].doc); assertEquals(4, topDocs.scoreDocs[2].doc); topDocs = shardSearcher.search(new ConstantScoreQuery(query), 10); - assertEquals(3L, topDocs.totalHits); + assertEquals(3L, topDocs.totalHits.value); assertEquals(3, topDocs.scoreDocs.length); assertEquals(0, topDocs.scoreDocs[0].doc); assertEquals(1, topDocs.scoreDocs[1].doc); @@ -808,9 +812,9 @@ public class CandidateQueryTests extends ESSingleNodeTestCase { memoryIndex.addField("field", "value", new WhitespaceAnalyzer()); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, - Collections.singletonList(new BytesArray("{}")), percolateSearcher, Version.CURRENT); - TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true); - assertEquals(2L, topDocs.totalHits); + Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, Version.CURRENT); + TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC)); + assertEquals(2L, topDocs.totalHits.value); assertEquals(2, topDocs.scoreDocs.length); assertEquals(0, topDocs.scoreDocs[0].doc); assertEquals(2, topDocs.scoreDocs[1].doc); @@ -858,17 +862,18 @@ public class CandidateQueryTests extends ESSingleNodeTestCase { try (IndexReader ir = DirectoryReader.open(directory)){ IndexSearcher percolateSearcher = new IndexSearcher(ir); PercolateQuery query = (PercolateQuery) - fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v); + fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), + percolateSearcher, false, v); BooleanQuery candidateQuery = (BooleanQuery) query.getCandidateMatchesQuery(); assertThat(candidateQuery.clauses().get(0).getQuery(), instanceOf(CoveringQuery.class)); TopDocs topDocs = shardSearcher.search(query, 10); - assertEquals(2L, topDocs.totalHits); + assertEquals(2L, topDocs.totalHits.value); assertEquals(2, topDocs.scoreDocs.length); assertEquals(0, topDocs.scoreDocs[0].doc); assertEquals(2, topDocs.scoreDocs[1].doc); topDocs = shardSearcher.search(new ConstantScoreQuery(query), 10); - assertEquals(2L, topDocs.totalHits); + assertEquals(2L, topDocs.totalHits.value); assertEquals(2, topDocs.scoreDocs.length); assertEquals(0, topDocs.scoreDocs[0].doc); assertEquals(2, topDocs.scoreDocs[1].doc); @@ -888,18 +893,19 @@ public class CandidateQueryTests extends ESSingleNodeTestCase { try (IndexReader ir = DirectoryReader.open(directory)){ IndexSearcher percolateSearcher = new IndexSearcher(ir); PercolateQuery query = (PercolateQuery) - fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v); + fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), + percolateSearcher, false, v); BooleanQuery candidateQuery = (BooleanQuery) query.getCandidateMatchesQuery(); assertThat(candidateQuery.clauses().get(0).getQuery(), instanceOf(TermInSetQuery.class)); TopDocs topDocs = shardSearcher.search(query, 10); - assertEquals(2L, topDocs.totalHits); + assertEquals(2L, topDocs.totalHits.value); assertEquals(2, topDocs.scoreDocs.length); assertEquals(1, topDocs.scoreDocs[0].doc); assertEquals(2, topDocs.scoreDocs[1].doc); topDocs = shardSearcher.search(new ConstantScoreQuery(query), 10); - assertEquals(2L, topDocs.totalHits); + assertEquals(2L, topDocs.totalHits.value); assertEquals(2, topDocs.scoreDocs.length); assertEquals(1, topDocs.scoreDocs[0].doc); assertEquals(2, topDocs.scoreDocs[1].doc); @@ -949,9 +955,9 @@ public class CandidateQueryTests extends ESSingleNodeTestCase { MemoryIndex memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value1 value2 value3", new WhitespaceAnalyzer()); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); - PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v); - TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true); - assertEquals(2L, topDocs.totalHits); + PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, false, v); + TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC)); + assertEquals(2L, topDocs.totalHits.value); assertEquals(0, topDocs.scoreDocs[0].doc); assertEquals(1, topDocs.scoreDocs[1].doc); } @@ -983,25 +989,25 @@ public class CandidateQueryTests extends ESSingleNodeTestCase { MemoryIndex memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value1 value4 value5", new WhitespaceAnalyzer()); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); - PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v); - TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true); - assertEquals(1L, topDocs.totalHits); + PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, false, v); + TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC)); + assertEquals(1L, topDocs.totalHits.value); assertEquals(0, topDocs.scoreDocs[0].doc); memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value1 value2", new WhitespaceAnalyzer()); percolateSearcher = memoryIndex.createSearcher(); - query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v); - topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true); - assertEquals(1L, topDocs.totalHits); + query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, false, v); + topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC)); + assertEquals(1L, topDocs.totalHits.value); assertEquals(0, topDocs.scoreDocs[0].doc); memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value3", new WhitespaceAnalyzer()); percolateSearcher = memoryIndex.createSearcher(); - query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v); - topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true); - assertEquals(1L, topDocs.totalHits); + query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, false, v); + topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC)); + assertEquals(1L, topDocs.totalHits.value); assertEquals(0, topDocs.scoreDocs[0].doc); } @@ -1034,9 +1040,9 @@ public class CandidateQueryTests extends ESSingleNodeTestCase { document.add(new IntPoint("int_field", 7)); MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); - PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v); - TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true); - assertEquals(1L, topDocs.totalHits); + PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, false, v); + TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC)); + assertEquals(1L, topDocs.totalHits.value); assertEquals(0, topDocs.scoreDocs[0].doc); } @@ -1044,7 +1050,7 @@ public class CandidateQueryTests extends ESSingleNodeTestCase { boolean requireScore = randomBoolean(); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); Query percolateQuery = fieldType.percolateQuery("_name", queryStore, - Collections.singletonList(new BytesArray("{}")), percolateSearcher, Version.CURRENT); + Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, Version.CURRENT); Query query = requireScore ? percolateQuery : new ConstantScoreQuery(percolateQuery); TopDocs topDocs = shardSearcher.search(query, 100); @@ -1053,7 +1059,7 @@ public class CandidateQueryTests extends ESSingleNodeTestCase { TopDocs controlTopDocs = shardSearcher.search(controlQuery, 100); try { - assertThat(topDocs.totalHits, equalTo(controlTopDocs.totalHits)); + assertThat(topDocs.totalHits.value, equalTo(controlTopDocs.totalHits.value)); assertThat(topDocs.scoreDocs.length, equalTo(controlTopDocs.scoreDocs.length)); for (int j = 0; j < topDocs.scoreDocs.length; j++) { assertThat(topDocs.scoreDocs[j].doc, equalTo(controlTopDocs.scoreDocs[j].doc)); @@ -1109,7 +1115,11 @@ public class CandidateQueryTests extends ESSingleNodeTestCase { } private void addQuery(Query query, List docs) { - ParseContext.InternalParseContext parseContext = new ParseContext.InternalParseContext(Settings.EMPTY, + IndexMetaData build = IndexMetaData.builder("") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(0).build(); + IndexSettings settings = new IndexSettings(build, Settings.EMPTY); + ParseContext.InternalParseContext parseContext = new ParseContext.InternalParseContext(settings, mapperService.documentMapperParser(), documentMapper, null, null); fieldMapper.processQuery(query, parseContext); ParseContext.Document queryDocument = parseContext.doc(); @@ -1124,7 +1134,7 @@ public class CandidateQueryTests extends ESSingleNodeTestCase { IndexSearcher shardSearcher) throws IOException { IndexSearcher percolateSearcher = memoryIndex.createSearcher(); Query percolateQuery = fieldType.percolateQuery("_name", queryStore, - Collections.singletonList(new BytesArray("{}")), percolateSearcher, Version.CURRENT); + Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, Version.CURRENT); return shardSearcher.search(percolateQuery, 10); } @@ -1168,7 +1178,7 @@ public class CandidateQueryTests extends ESSingleNodeTestCase { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) { final IndexSearcher percolatorIndexSearcher = memoryIndex.createSearcher(); return new Weight(this) { @@ -1204,8 +1214,8 @@ public class CandidateQueryTests extends ESSingleNodeTestCase { try { Query query = leaf.apply(doc); TopDocs topDocs = percolatorIndexSearcher.search(query, 1); - if (topDocs.totalHits > 0) { - if (needsScores) { + if (topDocs.scoreDocs.length > 0) { + if (scoreMode.needsScores()) { _score[0] = topDocs.scoreDocs[0].score; } return true; @@ -1233,6 +1243,11 @@ public class CandidateQueryTests extends ESSingleNodeTestCase { public float score() throws IOException { return _score[0]; } + + @Override + public float getMaxScore(int upTo) throws IOException { + return _score[0]; + } }; } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index e7163edef94..be9c3f83f3f 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -19,15 +19,8 @@ package org.elasticsearch.percolator; -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.core.WhitespaceAnalyzer; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; @@ -36,14 +29,11 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParseContext; -import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.Rewriteable; @@ -57,7 +47,6 @@ import java.io.IOException; import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Base64; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -66,7 +55,6 @@ import java.util.Map; import java.util.Set; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.sameInstance; public class PercolateQueryBuilderTests extends AbstractQueryTestCase { @@ -75,8 +63,8 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase parseQuery("{\"percolate\" : { \"document\": {}, \"documents\": [{}, {}], \"field\":\"" + queryField + "\"}}")); } - public void testCreateNestedDocumentSearcher() throws Exception { - int numNestedDocs = randomIntBetween(2, 8); - List docs = new ArrayList<>(numNestedDocs); - for (int i = 0; i < numNestedDocs; i++) { - docs.add(new ParseContext.Document()); - } - - Collection parsedDocument = Collections.singleton( - new ParsedDocument(null, null, "_id", "_type", null, docs, null, null, null)); - Analyzer analyzer = new WhitespaceAnalyzer(); - IndexSearcher indexSearcher = PercolateQueryBuilder.createMultiDocumentSearcher(analyzer, parsedDocument); - assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(numNestedDocs)); - - // ensure that any query get modified so that the nested docs are never included as hits: - Query query = new MatchAllDocsQuery(); - BooleanQuery result = (BooleanQuery) indexSearcher.createNormalizedWeight(query, true).getQuery(); - assertThat(result.clauses().size(), equalTo(2)); - assertThat(result.clauses().get(0).getQuery(), sameInstance(query)); - assertThat(result.clauses().get(0).getOccur(), equalTo(BooleanClause.Occur.MUST)); - assertThat(result.clauses().get(1).getOccur(), equalTo(BooleanClause.Occur.MUST_NOT)); - } - - public void testCreateMultiDocumentSearcher() throws Exception { - int numDocs = randomIntBetween(2, 8); - List docs = new ArrayList<>(); - for (int i = 0; i < numDocs; i++) { - docs.add(new ParsedDocument(null, null, "_id", "_type", null, - Collections.singletonList(new ParseContext.Document()), null, null, null)); - } - Analyzer analyzer = new WhitespaceAnalyzer(); - IndexSearcher indexSearcher = PercolateQueryBuilder.createMultiDocumentSearcher(analyzer, docs); - assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(numDocs)); - - // ensure that any query get modified so that the nested docs are never included as hits: - Query query = new MatchAllDocsQuery(); - BooleanQuery result = (BooleanQuery) indexSearcher.createNormalizedWeight(query, true).getQuery(); - assertThat(result.clauses().size(), equalTo(2)); - assertThat(result.clauses().get(0).getQuery(), sameInstance(query)); - assertThat(result.clauses().get(0).getOccur(), equalTo(BooleanClause.Occur.MUST)); - assertThat(result.clauses().get(1).getOccur(), equalTo(BooleanClause.Occur.MUST_NOT)); - } - - public void testSerializationBwc() throws IOException { - final byte[] data = Base64.getDecoder().decode("P4AAAAAFZmllbGQEdHlwZQAAAAAAAA57ImZvbyI6ImJhciJ9AAAAAA=="); - final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2, - Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0); - try (StreamInput in = StreamInput.wrap(data)) { - in.setVersion(version); - PercolateQueryBuilder queryBuilder = new PercolateQueryBuilder(in); - assertEquals("type", queryBuilder.getDocumentType()); - assertEquals("field", queryBuilder.getField()); - assertEquals("{\"foo\":\"bar\"}", queryBuilder.getDocuments().iterator().next().utf8ToString()); - assertEquals(XContentType.JSON, queryBuilder.getXContentType()); - - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(version); - queryBuilder.writeTo(out); - assertArrayEquals(data, out.bytes().toBytesRef().bytes); - } - } - } - private static BytesReference randomSource(Set usedFields) { try { // If we create two source that have the same field, but these fields have different kind of values (str vs. lng) then @@ -375,4 +301,5 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase null, Collections.singletonList(new BytesArray("{}")), - new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery()); + new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), null, new MatchAllDocsQuery()); PercolatorHighlightSubFetchPhase subFetchPhase = new PercolatorHighlightSubFetchPhase(Settings.EMPTY, emptyMap()); SearchContext searchContext = Mockito.mock(SearchContext.class); @@ -60,7 +60,7 @@ public class PercolatorHighlightSubFetchPhaseTests extends ESTestCase { public void testLocatePercolatorQuery() { PercolateQuery percolateQuery = new PercolateQuery("_name", ctx -> null, Collections.singletonList(new BytesArray("{}")), - new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery()); + new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), null, new MatchAllDocsQuery()); assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(new MatchAllDocsQuery()).size(), equalTo(0)); BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(new MatchAllDocsQuery(), BooleanClause.Occur.FILTER); @@ -94,7 +94,7 @@ public class PercolatorHighlightSubFetchPhaseTests extends ESTestCase { assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(disjunctionMaxQuery).get(0), sameInstance(percolateQuery)); PercolateQuery percolateQuery2 = new PercolateQuery("_name", ctx -> null, Collections.singletonList(new BytesArray("{}")), - new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery()); + new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), null, new MatchAllDocsQuery()); bq = new BooleanQuery.Builder(); bq.add(new MatchAllDocsQuery(), BooleanClause.Occur.FILTER); assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(bq.build()).size(), equalTo(0)); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java index a428726225b..89356bf274d 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java @@ -30,6 +30,7 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.Directory; import org.apache.lucene.util.FixedBitSet; import org.elasticsearch.search.SearchHit; @@ -58,7 +59,7 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase { MemoryIndex memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value", new WhitespaceAnalyzer()); PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(), - new MatchAllDocsQuery(), memoryIndex.createSearcher(), new MatchNoDocsQuery()); + new MatchAllDocsQuery(), memoryIndex.createSearcher(), null, new MatchNoDocsQuery()); PercolatorMatchedSlotSubFetchPhase.innerHitsExecute(percolateQuery, indexSearcher, hits); assertNotNull(hits[0].field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX)); @@ -72,7 +73,7 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase { MemoryIndex memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value1", new WhitespaceAnalyzer()); PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(), - new MatchAllDocsQuery(), memoryIndex.createSearcher(), new MatchNoDocsQuery()); + new MatchAllDocsQuery(), memoryIndex.createSearcher(), null, new MatchNoDocsQuery()); PercolatorMatchedSlotSubFetchPhase.innerHitsExecute(percolateQuery, indexSearcher, hits); assertNull(hits[0].field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX)); @@ -85,7 +86,7 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase { MemoryIndex memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value", new WhitespaceAnalyzer()); PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(), - new MatchAllDocsQuery(), memoryIndex.createSearcher(), new MatchNoDocsQuery()); + new MatchAllDocsQuery(), memoryIndex.createSearcher(), null, new MatchNoDocsQuery()); PercolatorMatchedSlotSubFetchPhase.innerHitsExecute(percolateQuery, indexSearcher, hits); assertNull(hits[0].field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX)); @@ -100,7 +101,7 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase { scoreDocs[i] = new ScoreDoc(i, 1f); } - TopDocs topDocs = new TopDocs(scoreDocs.length, scoreDocs, 1f); + TopDocs topDocs = new TopDocs(new TotalHits(scoreDocs.length, TotalHits.Relation.EQUAL_TO), scoreDocs); IntStream stream = PercolatorMatchedSlotSubFetchPhase.convertTopDocsToSlots(topDocs, null); int[] result = stream.toArray(); @@ -117,7 +118,7 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase { scoreDocs[2] = new ScoreDoc(8, 1f); scoreDocs[3] = new ScoreDoc(11, 1f); scoreDocs[4] = new ScoreDoc(14, 1f); - TopDocs topDocs = new TopDocs(scoreDocs.length, scoreDocs, 1f); + TopDocs topDocs = new TopDocs(new TotalHits(scoreDocs.length, TotalHits.Relation.EQUAL_TO), scoreDocs); FixedBitSet bitSet = new FixedBitSet(15); bitSet.set(2); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java index 5e97eadae83..1c7ae3681ac 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java @@ -74,7 +74,7 @@ public class QueryBuilderStoreTests extends ESTestCase { BinaryFieldMapper fieldMapper = PercolatorFieldMapper.Builder.createQueryBuilderFieldBuilder( new Mapper.BuilderContext(settings, new ContentPath(0))); - Version version = randomBoolean() ? Version.V_5_6_0 : Version.V_6_0_0_beta2; + Version version = Version.V_6_0_0_beta2; try (IndexWriter indexWriter = new IndexWriter(directory, config)) { for (int i = 0; i < queryBuilders.length; i++) { queryBuilders[i] = new TermQueryBuilder(randomAlphaOfLength(4), randomAlphaOfLength(8)); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java index e0ebaa85193..be232ca7c40 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.RestController; @@ -56,7 +55,7 @@ public class RestDeleteByQueryAction extends AbstractBulkByQueryRestHandler> consumers = new HashMap<>(); consumers.put("conflicts", o -> internal.setConflicts((String) o)); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java index a5520c90b0f..50d01535d7f 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; @@ -118,7 +117,7 @@ public class RestReindexAction extends AbstractBaseReindexRestHandler> consumers = new HashMap<>(); consumers.put("conflicts", o -> internal.setConflicts((String) o)); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java index e8e3760882e..d20be747980 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java @@ -61,7 +61,8 @@ final class RemoteRequestBuilders { if (searchRequest.scroll() != null) { TimeValue keepAlive = searchRequest.scroll().keepAlive(); - if (remoteVersion.before(Version.V_5_0_0)) { + // V_5_0_0 + if (remoteVersion.before(Version.fromId(5000099))) { /* Versions of Elasticsearch before 5.0 couldn't parse nanos or micros * so we toss out that resolution, rounding up because more scroll * timeout seems safer than less. */ @@ -117,7 +118,8 @@ final class RemoteRequestBuilders { for (int i = 1; i < searchRequest.source().storedFields().fieldNames().size(); i++) { fields.append(',').append(searchRequest.source().storedFields().fieldNames().get(i)); } - String storedFieldsParamName = remoteVersion.before(Version.V_5_0_0_alpha4) ? "fields" : "stored_fields"; + // V_5_0_0 + String storedFieldsParamName = remoteVersion.before(Version.fromId(5000099)) ? "fields" : "stored_fields"; request.addParameter(storedFieldsParamName, fields.toString()); } @@ -186,7 +188,8 @@ final class RemoteRequestBuilders { static Request scroll(String scroll, TimeValue keepAlive, Version remoteVersion) { Request request = new Request("POST", "/_search/scroll"); - if (remoteVersion.before(Version.V_5_0_0)) { + // V_5_0_0 + if (remoteVersion.before(Version.fromId(5000099))) { /* Versions of Elasticsearch before 5.0 couldn't parse nanos or micros * so we toss out that resolution, rounding up so we shouldn't end up * with 0s. */ diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexMetadataTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexMetadataTests.java index 4611f9dcbcd..ec34da777b5 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexMetadataTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexMetadataTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.index.reindex.ScrollableHitSource.Hit; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.settings.Settings; /** @@ -73,7 +72,7 @@ public class ReindexMetadataTests extends AbstractAsyncBulkByScrollActionMetadat @Override protected ReindexRequest request() { - return new ReindexRequest(new SearchRequest(), new IndexRequest()); + return new ReindexRequest(); } private class TestAction extends TransportReindexAction.AsyncIndexBySearchAction { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexScriptTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexScriptTests.java index 6d3ce558c75..a90b60357c4 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexScriptTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexScriptTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.ScriptService; @@ -100,7 +99,7 @@ public class ReindexScriptTests extends AbstractAsyncBulkByScrollActionScriptTes @Override protected ReindexRequest request() { - return new ReindexRequest(new SearchRequest(), new IndexRequest()); + return new ReindexRequest(); } @Override diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexWithoutContentIT.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexWithoutContentIT.java index f580b1400c3..73745ca690d 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexWithoutContentIT.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexWithoutContentIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.reindex; +import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; import org.elasticsearch.test.rest.ESRestTestCase; @@ -30,7 +31,7 @@ public class ReindexWithoutContentIT extends ESRestTestCase { public void testReindexMissingBody() throws IOException { ResponseException responseException = expectThrows(ResponseException.class, () -> client().performRequest( - "POST", "/_reindex")); + new Request("POST", "/_reindex"))); assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode()); assertThat(responseException.getMessage(), containsString("request body is required")); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java index b06948b9058..70e29ed12c5 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; @@ -144,7 +142,7 @@ public class RestReindexActionTests extends ESTestCase { request = BytesReference.bytes(b); } try (XContentParser p = createParser(JsonXContent.jsonXContent, request)) { - ReindexRequest r = new ReindexRequest(new SearchRequest(), new IndexRequest()); + ReindexRequest r = new ReindexRequest(); RestReindexAction.PARSER.parse(p, r, null); assertEquals("localhost", r.getRemoteInfo().getHost()); assertArrayEquals(new String[] {"source"}, r.getSearchRequest().indices()); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java index 97809c9bc8d..574a41181e5 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java @@ -20,8 +20,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.Version; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -47,7 +45,7 @@ import static org.elasticsearch.common.unit.TimeValue.parseTimeValue; */ public class RoundTripTests extends ESTestCase { public void testReindexRequest() throws IOException { - ReindexRequest reindex = new ReindexRequest(new SearchRequest(), new IndexRequest()); + ReindexRequest reindex = new ReindexRequest(); randomRequest(reindex); reindex.getDestination().version(randomFrom(Versions.MATCH_ANY, Versions.MATCH_DELETED, 12L, 1L, 123124L, 12L)); reindex.getDestination().index("test"); @@ -82,7 +80,7 @@ public class RoundTripTests extends ESTestCase { } public void testUpdateByQueryRequest() throws IOException { - UpdateByQueryRequest update = new UpdateByQueryRequest(new SearchRequest()); + UpdateByQueryRequest update = new UpdateByQueryRequest(); randomRequest(update); if (randomBoolean()) { update.setPipeline(randomAlphaOfLength(5)); @@ -104,7 +102,7 @@ public class RoundTripTests extends ESTestCase { } public void testDeleteByQueryRequest() throws IOException { - DeleteByQueryRequest delete = new DeleteByQueryRequest(new SearchRequest()); + DeleteByQueryRequest delete = new DeleteByQueryRequest(); randomRequest(delete); DeleteByQueryRequest tripped = new DeleteByQueryRequest(toInputByteStream(delete)); assertRequestEquals(delete, tripped); @@ -155,13 +153,8 @@ public class RoundTripTests extends ESTestCase { assertEquals(request.getRemoteInfo().getUsername(), tripped.getRemoteInfo().getUsername()); assertEquals(request.getRemoteInfo().getPassword(), tripped.getRemoteInfo().getPassword()); assertEquals(request.getRemoteInfo().getHeaders(), tripped.getRemoteInfo().getHeaders()); - if (version.onOrAfter(Version.V_5_2_0)) { - assertEquals(request.getRemoteInfo().getSocketTimeout(), tripped.getRemoteInfo().getSocketTimeout()); - assertEquals(request.getRemoteInfo().getConnectTimeout(), tripped.getRemoteInfo().getConnectTimeout()); - } else { - assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, tripped.getRemoteInfo().getSocketTimeout()); - assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, tripped.getRemoteInfo().getConnectTimeout()); - } + assertEquals(request.getRemoteInfo().getSocketTimeout(), tripped.getRemoteInfo().getSocketTimeout()); + assertEquals(request.getRemoteInfo().getConnectTimeout(), tripped.getRemoteInfo().getConnectTimeout()); } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java index b688ce019e3..d3f62af907d 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.index.reindex.ScrollableHitSource.Hit; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.settings.Settings; public class UpdateByQueryMetadataTests @@ -39,7 +38,7 @@ public class UpdateByQueryMetadataTests @Override protected UpdateByQueryRequest request() { - return new UpdateByQueryRequest(new SearchRequest()); + return new UpdateByQueryRequest(); } private class TestAction extends TransportUpdateByQueryAction.AsyncIndexBySearchAction { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java index 4006d16fbcb..8c9744aa0dd 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.ScriptService; @@ -50,7 +49,7 @@ public class UpdateByQueryWithScriptTests @Override protected UpdateByQueryRequest request() { - return new UpdateByQueryRequest(new SearchRequest()); + return new UpdateByQueryRequest(); } @Override diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java index b51525f20e3..2f801811327 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -136,13 +136,15 @@ public class RemoteRequestBuildersTests extends ESTestCase { // Test stored_fields for versions that support it searchRequest = new SearchRequest().source(new SearchSourceBuilder()); searchRequest.source().storedField("_source").storedField("_id"); - remoteVersion = Version.fromId(between(Version.V_5_0_0_alpha4_ID, Version.CURRENT.id)); + // V_5_0_0_alpha4 => current + remoteVersion = Version.fromId(between(5000004, Version.CURRENT.id)); assertThat(initialSearch(searchRequest, query, remoteVersion).getParameters(), hasEntry("stored_fields", "_source,_id")); // Test fields for versions that support it searchRequest = new SearchRequest().source(new SearchSourceBuilder()); searchRequest.source().storedField("_source").storedField("_id"); - remoteVersion = Version.fromId(between(2000099, Version.V_5_0_0_alpha4_ID - 1)); + // V_2_0_0 => V_5_0_0_alpha3 + remoteVersion = Version.fromId(between(2000099, 5000003)); assertThat(initialSearch(searchRequest, query, remoteVersion).getParameters(), hasEntry("fields", "_source,_id")); // Test extra fields for versions that need it @@ -190,7 +192,8 @@ public class RemoteRequestBuildersTests extends ESTestCase { } private void assertScroll(Version remoteVersion, Map params, TimeValue requested) { - if (remoteVersion.before(Version.V_5_0_0)) { + // V_5_0_0 + if (remoteVersion.before(Version.fromId(5000099))) { // Versions of Elasticsearch prior to 5.0 can't parse nanos or micros in TimeValue. assertThat(params.get("scroll"), not(either(endsWith("nanos")).or(endsWith("micros")))); if (requested.getStringRep().endsWith("nanos") || requested.getStringRep().endsWith("micros")) { @@ -242,7 +245,7 @@ public class RemoteRequestBuildersTests extends ESTestCase { public void testScrollEntity() throws IOException { String scroll = randomAlphaOfLength(30); - HttpEntity entity = scroll(scroll, timeValueMillis(between(1, 1000)), Version.V_5_0_0).getEntity(); + HttpEntity entity = scroll(scroll, timeValueMillis(between(1, 1000)), Version.fromString("5.0.0")).getEntity(); assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); assertThat(Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8)), containsString("\"" + scroll + "\"")); @@ -255,7 +258,7 @@ public class RemoteRequestBuildersTests extends ESTestCase { public void testClearScroll() throws IOException { String scroll = randomAlphaOfLength(30); - Request request = clearScroll(scroll, Version.V_5_0_0); + Request request = clearScroll(scroll, Version.fromString("5.0.0")); assertEquals(ContentType.APPLICATION_JSON.toString(), request.getEntity().getContentType().getValue()); assertThat(Streams.copyToString(new InputStreamReader(request.getEntity().getContent(), StandardCharsets.UTF_8)), containsString("\"" + scroll + "\"")); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java index 92f370f8f63..d3d3cefea45 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java @@ -150,13 +150,15 @@ public class RemoteScrollableHitSourceTests extends ESTestCase { assertTrue(called.get()); called.set(false); sourceWithMockedRemoteCall(false, ContentType.APPLICATION_JSON, "main/5_0_0_alpha_3.json").lookupRemoteVersion(v -> { - assertEquals(Version.V_5_0_0_alpha3, v); + // V_5_0_0_alpha3 + assertEquals(Version.fromId(5000003), v); called.set(true); }); assertTrue(called.get()); called.set(false); sourceWithMockedRemoteCall(false, ContentType.APPLICATION_JSON, "main/with_unknown_fields.json").lookupRemoteVersion(v -> { - assertEquals(Version.V_5_0_0_alpha3, v); + // V_5_0_0_alpha3 + assertEquals(Version.fromId(5000003), v); called.set(true); }); assertTrue(called.get()); diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 12ce5ce7d4a..e7c36ff506e 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -83,7 +83,6 @@ thirdPartyAudit.excludes = [ 'io.netty.internal.tcnative.SSLContext', // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) - 'org.bouncycastle.asn1.x500.X500Name', 'org.bouncycastle.cert.X509v3CertificateBuilder', 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', 'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder', @@ -163,3 +162,11 @@ thirdPartyAudit.excludes = [ 'org.conscrypt.Conscrypt', 'org.conscrypt.HandshakeListener' ] + +if (project.inFipsJvm == false) { + // BouncyCastleFIPS provides this class, so the exclusion is invalid when running CI in + // a FIPS JVM with BouncyCastleFIPS Provider + thirdPartyAudit.excludes += [ + 'org.bouncycastle.asn1.x500.X500Name' + ] +} diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java index 981a417449f..73135c2a145 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java @@ -21,11 +21,11 @@ package org.elasticsearch.http.netty4; import io.netty.channel.Channel; import io.netty.channel.ChannelPromise; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.concurrent.CompletableContext; import org.elasticsearch.http.HttpChannel; import org.elasticsearch.http.HttpResponse; -import org.elasticsearch.transport.netty4.Netty4Utils; import java.net.InetSocketAddress; @@ -42,7 +42,7 @@ public class Netty4HttpChannel implements HttpChannel { } else { Throwable cause = f.cause(); if (cause instanceof Error) { - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); closeContext.completeExceptionally(new Exception(cause)); } else { closeContext.completeExceptionally((Exception) cause); @@ -59,7 +59,7 @@ public class Netty4HttpChannel implements HttpChannel { listener.onResponse(null); } else { final Throwable cause = f.cause(); - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); if (cause instanceof Error) { listener.onFailure(new Exception(cause)); } else { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java index ab078ad10d3..472e34d09fc 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java @@ -27,7 +27,6 @@ import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.FullHttpRequest; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.http.HttpPipelinedRequest; -import org.elasticsearch.transport.netty4.Netty4Utils; @ChannelHandler.Sharable class Netty4HttpRequestHandler extends SimpleChannelInboundHandler> { @@ -58,7 +57,7 @@ class Netty4HttpRequestHandler extends SimpleChannelInboundHandler= 2) { - final StringBuilder sb = new StringBuilder(); - sb.append(ctx.channel().toString()); - final int offset = arg.readerIndex(); - // this might be an ES message, check the header - if (arg.getByte(offset) == (byte) 'E' && arg.getByte(offset + 1) == (byte) 'S') { - if (readableBytes == TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE) { - final int length = arg.getInt(offset + MESSAGE_LENGTH_OFFSET); - if (length == TcpTransport.PING_DATA_SIZE) { - sb.append(" [ping]").append(' ').append(eventName).append(": ").append(readableBytes).append('B'); - return sb.toString(); - } - } - else if (readableBytes >= TcpHeader.HEADER_SIZE) { - // we are going to try to decode this as an ES message - final int length = arg.getInt(offset + MESSAGE_LENGTH_OFFSET); - final long requestId = arg.getLong(offset + REQUEST_ID_OFFSET); - final byte status = arg.getByte(offset + STATUS_OFFSET); - final boolean isRequest = TransportStatus.isRequest(status); - final String type = isRequest ? "request" : "response"; - final String version = Version.fromId(arg.getInt(offset + VERSION_ID_OFFSET)).toString(); - sb.append(" [length: ").append(length); - sb.append(", request id: ").append(requestId); - sb.append(", type: ").append(type); - sb.append(", version: ").append(version); - if (isRequest) { - // it looks like an ES request, try to decode the action - final int remaining = readableBytes - ACTION_OFFSET; - final ByteBuf slice = arg.slice(offset + ACTION_OFFSET, remaining); - // the stream might be compressed - try (StreamInput in = in(status, slice, remaining)) { - // the first bytes in the message is the context headers - try (ThreadContext context = new ThreadContext(Settings.EMPTY)) { - context.readHeaders(in); - } - // now we decode the features - if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - in.readStringArray(); - } - // now we can decode the action name - sb.append(", action: ").append(in.readString()); - } - } - sb.append(']'); - sb.append(' ').append(eventName).append(": ").append(readableBytes).append('B'); - return sb.toString(); - } - } - } - // we could not decode this as an ES message, use the default formatting - return super.format(ctx, eventName, arg); - } - - private StreamInput in(final Byte status, final ByteBuf slice, final int remaining) throws IOException { - final ByteBufStreamInput in = new ByteBufStreamInput(slice, remaining); - if (TransportStatus.isCompress(status)) { - final Compressor compressor = CompressorFactory.compressor(Netty4Utils.toBytesReference(slice)); - return compressor.streamInput(in); - } else { - return in; - } - } - } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java index 698c86d048c..29ae47df06f 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java @@ -26,8 +26,6 @@ import io.netty.channel.ChannelHandlerContext; import io.netty.util.Attribute; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.transport.TcpHeader; import org.elasticsearch.transport.Transports; @@ -46,29 +44,21 @@ final class Netty4MessageChannelHandler extends ChannelDuplexHandler { @Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { Transports.assertTransportThread(); - if (!(msg instanceof ByteBuf)) { - ctx.fireChannelRead(msg); - return; - } + assert msg instanceof ByteBuf : "Expected message type ByteBuf, found: " + msg.getClass(); + final ByteBuf buffer = (ByteBuf) msg; - final int remainingMessageSize = buffer.getInt(buffer.readerIndex() - TcpHeader.MESSAGE_LENGTH_SIZE); - final int expectedReaderIndex = buffer.readerIndex() + remainingMessageSize; try { Channel channel = ctx.channel(); - // netty always copies a buffer, either in NioWorker in its read handler, where it copies to a fresh - // buffer, or in the cumulative buffer, which is cleaned each time so it could be bigger than the actual size - BytesReference reference = Netty4Utils.toBytesReference(buffer, remainingMessageSize); Attribute channelAttribute = channel.attr(Netty4Transport.CHANNEL_KEY); - transport.messageReceived(reference, channelAttribute.get()); + transport.inboundMessage(channelAttribute.get(), Netty4Utils.toBytesReference(buffer)); } finally { - // Set the expected position of the buffer, no matter what happened - buffer.readerIndex(expectedReaderIndex); + buffer.release(); } } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); final Throwable unwrapped = ExceptionsHelper.unwrap(cause, ElasticsearchException.class); final Throwable newCause = unwrapped != null ? unwrapped : cause; Netty4TcpChannel tcpChannel = ctx.channel().attr(Netty4Transport.CHANNEL_KEY).get(); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoder.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoder.java index 40eabfc1263..1951d789b65 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoder.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoder.java @@ -23,7 +23,6 @@ import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.ByteToMessageDecoder; import io.netty.handler.codec.TooLongFrameException; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.transport.TcpHeader; import org.elasticsearch.transport.TcpTransport; @@ -36,17 +35,20 @@ final class Netty4SizeHeaderFrameDecoder extends ByteToMessageDecoder { @Override protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) throws Exception { try { - BytesReference networkBytes = Netty4Utils.toBytesReference(in); - int messageLength = TcpTransport.readMessageLength(networkBytes); - // If the message length is -1, we have not read a complete header. - if (messageLength != -1) { - int messageLengthWithHeader = messageLength + HEADER_SIZE; - // If the message length is greater than the network bytes available, we have not read a complete frame. - if (messageLengthWithHeader <= networkBytes.length()) { - final ByteBuf message = in.skipBytes(HEADER_SIZE); - // 6 bytes would mean it is a ping. And we should ignore. - if (messageLengthWithHeader != 6) { + boolean continueDecode = true; + while (continueDecode) { + int messageLength = TcpTransport.readMessageLength(Netty4Utils.toBytesReference(in)); + if (messageLength == -1) { + continueDecode = false; + } else { + int messageLengthWithHeader = messageLength + HEADER_SIZE; + // If the message length is greater than the network bytes available, we have not read a complete frame. + if (messageLengthWithHeader > in.readableBytes()) { + continueDecode = false; + } else { + final ByteBuf message = in.retainedSlice(in.readerIndex() + HEADER_SIZE, messageLength); out.add(message); + in.readerIndex(in.readerIndex() + messageLengthWithHeader); } } } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpChannel.java index 51821c73329..d6132b26b08 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpChannel.java @@ -22,6 +22,7 @@ package org.elasticsearch.transport.netty4; import io.netty.channel.Channel; import io.netty.channel.ChannelOption; import io.netty.channel.ChannelPromise; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.concurrent.CompletableContext; @@ -45,7 +46,7 @@ public class Netty4TcpChannel implements TcpChannel { } else { Throwable cause = f.cause(); if (cause instanceof Error) { - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); closeContext.completeExceptionally(new Exception(cause)); } else { closeContext.completeExceptionally((Exception) cause); @@ -97,7 +98,7 @@ public class Netty4TcpChannel implements TcpChannel { listener.onResponse(null); } else { final Throwable cause = f.cause(); - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); if (cause instanceof Error) { listener.onFailure(new Exception(cause)); } else { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpServerChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpServerChannel.java index 873a6c33fba..9ef3f296f06 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpServerChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpServerChannel.java @@ -20,6 +20,7 @@ package org.elasticsearch.transport.netty4; import io.netty.channel.Channel; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.concurrent.CompletableContext; import org.elasticsearch.transport.TcpServerChannel; @@ -41,7 +42,7 @@ public class Netty4TcpServerChannel implements TcpServerChannel { } else { Throwable cause = f.cause(); if (cause instanceof Error) { - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); closeContext.completeExceptionally(new Exception(cause)); } else { closeContext.completeExceptionally((Exception) cause); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index e310f3012a9..009a75b3e33 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -38,7 +38,9 @@ import io.netty.util.AttributeKey; import io.netty.util.concurrent.Future; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -95,12 +97,12 @@ public class Netty4Transport extends TcpTransport { intSetting("transport.netty.boss_count", 1, 1, Property.NodeScope); - protected final RecvByteBufAllocator recvByteBufAllocator; - protected final int workerCount; - protected final ByteSizeValue receivePredictorMin; - protected final ByteSizeValue receivePredictorMax; - protected volatile Bootstrap bootstrap; - protected final Map serverBootstraps = newConcurrentMap(); + private final RecvByteBufAllocator recvByteBufAllocator; + private final int workerCount; + private final ByteSizeValue receivePredictorMin; + private final ByteSizeValue receivePredictorMax; + private volatile Bootstrap clientBootstrap; + private final Map serverBootstraps = newConcurrentMap(); public Netty4Transport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { @@ -123,7 +125,7 @@ public class Netty4Transport extends TcpTransport { protected void doStart() { boolean success = false; try { - bootstrap = createBootstrap(); + clientBootstrap = createClientBootstrap(); if (NetworkService.NETWORK_SERVER.get(settings)) { for (ProfileSettings profileSettings : profileSettings) { createServerBootstrap(profileSettings); @@ -139,13 +141,11 @@ public class Netty4Transport extends TcpTransport { } } - private Bootstrap createBootstrap() { + private Bootstrap createClientBootstrap() { final Bootstrap bootstrap = new Bootstrap(); bootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings, TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX))); bootstrap.channel(NioSocketChannel.class); - bootstrap.handler(getClientChannelInitializer()); - bootstrap.option(ChannelOption.TCP_NODELAY, TCP_NO_DELAY.get(settings)); bootstrap.option(ChannelOption.SO_KEEPALIVE, TCP_KEEP_ALIVE.get(settings)); @@ -164,8 +164,6 @@ public class Netty4Transport extends TcpTransport { final boolean reuseAddress = TCP_REUSE_ADDRESS.get(settings); bootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress); - bootstrap.validate(); - return bootstrap; } @@ -214,7 +212,7 @@ public class Netty4Transport extends TcpTransport { return new ServerChannelInitializer(name); } - protected ChannelHandler getClientChannelInitializer() { + protected ChannelHandler getClientChannelInitializer(DiscoveryNode node) { return new ClientChannelInitializer(); } @@ -222,11 +220,16 @@ public class Netty4Transport extends TcpTransport { static final AttributeKey SERVER_CHANNEL_KEY = AttributeKey.newInstance("es-server-channel"); @Override - protected Netty4TcpChannel initiateChannel(InetSocketAddress address, ActionListener listener) throws IOException { - ChannelFuture channelFuture = bootstrap.connect(address); + protected Netty4TcpChannel initiateChannel(DiscoveryNode node, ActionListener listener) throws IOException { + InetSocketAddress address = node.getAddress().address(); + Bootstrap bootstrapWithHandler = clientBootstrap.clone(); + bootstrapWithHandler.handler(getClientChannelInitializer(node)); + bootstrapWithHandler.remoteAddress(address); + ChannelFuture channelFuture = bootstrapWithHandler.connect(); + Channel channel = channelFuture.channel(); if (channel == null) { - Netty4Utils.maybeDie(channelFuture.cause()); + ExceptionsHelper.maybeDieOnAnotherThread(channelFuture.cause()); throw new IOException(channelFuture.cause()); } addClosedExceptionLogger(channel); @@ -240,7 +243,7 @@ public class Netty4Transport extends TcpTransport { } else { Throwable cause = f.cause(); if (cause instanceof Error) { - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); listener.onFailure(new Exception(cause)); } else { listener.onFailure((Exception) cause); @@ -286,9 +289,9 @@ public class Netty4Transport extends TcpTransport { } serverBootstraps.clear(); - if (bootstrap != null) { - bootstrap.config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS).awaitUninterruptibly(); - bootstrap = null; + if (clientBootstrap != null) { + clientBootstrap.config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS).awaitUninterruptibly(); + clientBootstrap = null; } }); } @@ -305,7 +308,7 @@ public class Netty4Transport extends TcpTransport { @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); super.exceptionCaught(ctx, cause); } } @@ -331,7 +334,7 @@ public class Netty4Transport extends TcpTransport { @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); super.exceptionCaught(ctx, cause); } } @@ -349,7 +352,7 @@ public class Netty4Transport extends TcpTransport { @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); Netty4TcpServerChannel serverChannel = ctx.channel().attr(SERVER_CHANNEL_KEY).get(); if (cause instanceof Error) { onServerException(serverChannel, new Exception(cause)); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java index 9470424b381..76d7864c716 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java @@ -27,20 +27,16 @@ import io.netty.channel.ChannelFuture; import io.netty.util.NettyRuntime; import io.netty.util.internal.logging.InternalLogger; import io.netty.util.internal.logging.InternalLoggerFactory; -import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.ESLoggerFactory; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Locale; -import java.util.Optional; import java.util.concurrent.atomic.AtomicBoolean; public class Netty4Utils { @@ -160,35 +156,4 @@ public class Netty4Utils { throw closingExceptions; } } - - /** - * If the specified cause is an unrecoverable error, this method will rethrow the cause on a separate thread so that it can not be - * caught and bubbles up to the uncaught exception handler. - * - * @param cause the throwable to test - */ - public static void maybeDie(final Throwable cause) { - final Logger logger = ESLoggerFactory.getLogger(Netty4Utils.class); - final Optional maybeError = ExceptionsHelper.maybeError(cause, logger); - if (maybeError.isPresent()) { - /* - * Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, Netty wraps too many - * invocations of user-code in try/catch blocks that swallow all throwables. This means that a rethrow here will not bubble up - * to where we want it to. So, we fork a thread and throw the exception from there where Netty can not get to it. We do not wrap - * the exception so as to not lose the original cause during exit. - */ - try { - // try to log the current stack trace - final String formatted = ExceptionsHelper.formatStackTrace(Thread.currentThread().getStackTrace()); - logger.error("fatal error on the network layer\n{}", formatted); - } finally { - new Thread( - () -> { - throw maybeError.get(); - }) - .start(); - } - } - } - } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java index 17a62b3a440..cfda71f1009 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java @@ -32,7 +32,6 @@ import org.elasticsearch.test.rest.yaml.ObjectPath; import java.io.IOException; import java.nio.charset.Charset; -import java.util.Collections; import java.util.Map; import static org.elasticsearch.rest.RestStatus.BAD_REQUEST; @@ -71,7 +70,7 @@ public class Netty4BadRequestIT extends ESRestTestCase { final ResponseException e = expectThrows( ResponseException.class, - () -> client().performRequest(randomFrom("GET", "POST", "PUT"), path, Collections.emptyMap())); + () -> client().performRequest(new Request(randomFrom("GET", "POST", "PUT"), path))); assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(BAD_REQUEST.getStatus())); assertThat(e, hasToString(containsString("too_long_frame_exception"))); assertThat(e, hasToString(matches("An HTTP line is larger than \\d+ bytes"))); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java index acd71749e23..abe02cdf4c1 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java @@ -26,9 +26,10 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.transport.TransportLogger; @ESIntegTestCase.ClusterScope(numDataNodes = 2) -@TestLogging(value = "org.elasticsearch.transport.netty4.ESLoggingHandler:trace") +@TestLogging(value = "org.elasticsearch.transport.netty4.ESLoggingHandler:trace,org.elasticsearch.transport.TransportLogger:trace") public class ESLoggingHandlerIT extends ESNetty4IntegTestCase { private MockLogAppender appender; @@ -37,11 +38,13 @@ public class ESLoggingHandlerIT extends ESNetty4IntegTestCase { super.setUp(); appender = new MockLogAppender(); Loggers.addAppender(Loggers.getLogger(ESLoggingHandler.class), appender); + Loggers.addAppender(Loggers.getLogger(TransportLogger.class), appender); appender.start(); } public void tearDown() throws Exception { Loggers.removeAppender(Loggers.getLogger(ESLoggingHandler.class), appender); + Loggers.removeAppender(Loggers.getLogger(TransportLogger.class), appender); appender.stop(); super.tearDown(); } @@ -56,7 +59,7 @@ public class ESLoggingHandlerIT extends ESNetty4IntegTestCase { " WRITE: \\d+B"; final MockLogAppender.LoggingExpectation writeExpectation = new MockLogAppender.PatternSeenEventExcpectation( - "hot threads request", ESLoggingHandler.class.getCanonicalName(), Level.TRACE, writePattern); + "hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, writePattern); final MockLogAppender.LoggingExpectation flushExpectation = new MockLogAppender.SeenEventExpectation("flush", ESLoggingHandler.class.getCanonicalName(), Level.TRACE, "*FLUSH*"); @@ -71,7 +74,7 @@ public class ESLoggingHandlerIT extends ESNetty4IntegTestCase { final MockLogAppender.LoggingExpectation readExpectation = new MockLogAppender.PatternSeenEventExcpectation( - "hot threads request", ESLoggingHandler.class.getCanonicalName(), Level.TRACE, readPattern); + "hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, readPattern); appender.addExpectation(writeExpectation); appender.addExpectation(flushExpectation); diff --git a/plugins/analysis-icu/build.gradle b/plugins/analysis-icu/build.gradle index 1883e3bf1b9..a42a28cad4e 100644 --- a/plugins/analysis-icu/build.gradle +++ b/plugins/analysis-icu/build.gradle @@ -1,3 +1,5 @@ +import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -20,9 +22,10 @@ esplugin { description 'The ICU Analysis plugin integrates Lucene ICU module into elasticsearch, adding ICU relates analysis components.' classname 'org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin' + hasClientJar = true } -forbiddenApis { +tasks.withType(ForbiddenApisCliTask) { signatures += [ "com.ibm.icu.text.Collator#getInstance() @ Don't use default locale, use getInstance(ULocale) instead" ] diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 1e79e1e70ef..00000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a010e852be8d56efe1906e6da5292e4541239724 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-66c671ea80.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..7369f427ab2 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +f009ee188453aabae77fad55aea08bc60323bb3e \ No newline at end of file diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java index c4c44222f47..0235e6e8136 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java @@ -25,7 +25,6 @@ import com.ibm.icu.text.RuleBasedCollator; import com.ibm.icu.util.ULocale; import org.apache.lucene.document.Field; -import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; @@ -35,7 +34,6 @@ import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; @@ -56,7 +54,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.function.BiFunction; import java.util.function.LongSupplier; public class ICUCollationKeywordFieldMapper extends FieldMapper { @@ -571,7 +568,6 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper { private final String variableTop; private final boolean hiraganaQuaternaryMode; private final Collator collator; - private final BiFunction getDVField; protected ICUCollationKeywordFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo, String rules, String language, @@ -593,11 +589,6 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper { this.variableTop = variableTop; this.hiraganaQuaternaryMode = hiraganaQuaternaryMode; this.collator = collator; - if (indexCreatedVersion.onOrAfter(Version.V_5_6_0)) { - getDVField = SortedSetDocValuesField::new; - } else { - getDVField = SortedDocValuesField::new; - } } @Override @@ -754,7 +745,7 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper { } if (fieldType().hasDocValues()) { - fields.add(getDVField.apply(fieldType().name(), binaryValue)); + fields.add(new SortedSetDocValuesField(fieldType().name(), binaryValue)); } else if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { createFieldNamesField(context, fields); } diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java index fff25597011..f39ae886dc4 100644 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java @@ -28,11 +28,9 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableFieldType; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; @@ -106,50 +104,6 @@ public class ICUCollationKeywordFieldMapperTests extends ESSingleNodeTestCase { assertEquals(DocValuesType.SORTED_SET, fieldType.docValuesType()); } - public void testBackCompat() throws Exception { - indexService = createIndex("oldindex", Settings.builder().put("index.version.created", Version.V_5_5_0).build()); - parser = indexService.mapperService().documentMapperParser(); - - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field").field("type", FIELD_TYPE).endObject().endObject() - .endObject().endObject()); - - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); - - assertEquals(mapping, mapper.mappingSource().toString()); - - ParsedDocument doc = mapper.parse(SourceToParse.source("oldindex", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() - .startObject() - .field("field", "1234") - .endObject()), - XContentType.JSON)); - - IndexableField[] fields = doc.rootDoc().getFields("field"); - assertEquals(2, fields.length); - - Collator collator = Collator.getInstance(ULocale.ROOT); - RawCollationKey key = collator.getRawCollationKey("1234", null); - BytesRef expected = new BytesRef(key.bytes, 0, key.size); - - assertEquals(expected, fields[0].binaryValue()); - IndexableFieldType fieldType = fields[0].fieldType(); - assertThat(fieldType.omitNorms(), equalTo(true)); - assertFalse(fieldType.tokenized()); - assertFalse(fieldType.stored()); - assertThat(fieldType.indexOptions(), equalTo(IndexOptions.DOCS)); - assertThat(fieldType.storeTermVectors(), equalTo(false)); - assertThat(fieldType.storeTermVectorOffsets(), equalTo(false)); - assertThat(fieldType.storeTermVectorPositions(), equalTo(false)); - assertThat(fieldType.storeTermVectorPayloads(), equalTo(false)); - assertEquals(DocValuesType.NONE, fieldType.docValuesType()); - - assertEquals(expected, fields[1].binaryValue()); - fieldType = fields[1].fieldType(); - assertThat(fieldType.indexOptions(), equalTo(IndexOptions.NONE)); - assertEquals(DocValuesType.SORTED, fieldType.docValuesType()); - } - public void testNullValue() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field").field("type", FIELD_TYPE).endObject().endObject() diff --git a/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/20_search.yml b/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/20_search.yml index 67ff1dab984..89ef510c72b 100644 --- a/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/20_search.yml +++ b/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/20_search.yml @@ -12,7 +12,7 @@ analyzer: my_analyzer: tokenizer: standard - filter: ["standard", "lowercase", "my_collator"] + filter: ["lowercase", "my_collator"] filter: my_collator: type: icu_collation diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 2d9669e4362..00000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -88e0ed90d433a9088528485cd4f59311735d92a4 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-66c671ea80.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..16417bbebd1 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +af3d2ae975e3560c1ea69222d6c46072857952ba \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index f7b8fdd4bc1..00000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0daec9ac3c4bba5f91b1bc413c651b7a98313982 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-66c671ea80.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..9c3524a6789 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +f17bc5e532d9dc2786a13bd577df64023d1baae1 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 80cf627011b..00000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f5af81eec04c1da0d6969cff18f360ff379b1bf7 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-66c671ea80.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..ac81fdd07c2 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +7ad89d33c1cd960c91afa05b22024137fe108567 \ No newline at end of file diff --git a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/10_metaphone.yml b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/10_metaphone.yml index 1f326fe3776..1be0d8525a1 100644 --- a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/10_metaphone.yml +++ b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/10_metaphone.yml @@ -13,7 +13,7 @@ analyzer: my_analyzer: tokenizer: standard - filter: ["standard", "lowercase", "my_metaphone"] + filter: ["lowercase", "my_metaphone"] filter: my_metaphone: type: phonetic diff --git a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/20_double_metaphone.yml b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/20_double_metaphone.yml index 5af9f48aa80..84b0129414c 100644 --- a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/20_double_metaphone.yml +++ b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/20_double_metaphone.yml @@ -13,7 +13,7 @@ analyzer: my_analyzer: tokenizer: standard - filter: ["standard", "lowercase", "my_metaphone"] + filter: ["lowercase", "my_metaphone"] filter: my_metaphone: type: phonetic diff --git a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/30_beider_morse.yml b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/30_beider_morse.yml index 259b0adea74..bdd1ddef388 100644 --- a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/30_beider_morse.yml +++ b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/30_beider_morse.yml @@ -13,7 +13,7 @@ analyzer: my_analyzer: tokenizer: standard - filter: ["standard", "lowercase", "beider_morse"] + filter: ["lowercase", "beider_morse"] filter: beider_morse: type: phonetic diff --git a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/40_search.yml b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/40_search.yml index 75c67217239..34a5bfa1da1 100644 --- a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/40_search.yml +++ b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/40_search.yml @@ -12,7 +12,7 @@ analyzer: my_analyzer: tokenizer: standard - filter: ["standard", "lowercase", "my_metaphone"] + filter: ["lowercase", "my_metaphone"] filter: my_metaphone: type: phonetic diff --git a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/50_daitch_mokotoff.yml b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/50_daitch_mokotoff.yml index c67b6892bc9..bee4c8bf5f4 100644 --- a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/50_daitch_mokotoff.yml +++ b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/50_daitch_mokotoff.yml @@ -13,7 +13,7 @@ analyzer: my_analyzer: tokenizer: standard - filter: ["standard", "lowercase", "daitch_mokotoff"] + filter: ["lowercase", "daitch_mokotoff"] filter: daitch_mokotoff: type: phonetic diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 14be684b96f..00000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9e649088ee298293aa95a05391dff9cb0582648e \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-66c671ea80.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..f00a29e7816 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +3f11fb254256d74e911b953994b47e7a95915954 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index ea55c790537..00000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -47fb370054ba7413d050f13c177edf01180c31ca \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-66c671ea80.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..76fa8e90eae --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +b2348d140ef0c3e674cb81173f61c5e5f430facb \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 2d6f580c35a..00000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bc0708acbac195772b67b5ad2e9c4683d27ff450 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-66c671ea80.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..0e2c4d34ef0 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +485a0c3be58a5942b4a28639f1019181ef4cd0e3 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 6f177f7b7f5..3dae3d3642c 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -128,7 +128,7 @@ thirdPartyAudit.excludes = [ ] // jarhell with jdk (intentionally, because jaxb was removed from default modules in java 9) -if (JavaVersion.current() <= JavaVersion.VERSION_1_8) { +if (project.runtimeJavaVersion <= JavaVersion.VERSION_1_8) { thirdPartyAudit.excludes += [ 'javax.xml.bind.Binder', 'javax.xml.bind.ContextFinder$1', diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index b1c3b62fd6e..e32ba6948d6 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -87,7 +87,7 @@ thirdPartyAudit.excludes = [ 'org.apache.log.Logger', ] -if (JavaVersion.current() > JavaVersion.VERSION_1_8) { +if (project.runtimeJavaVersion > JavaVersion.VERSION_1_8) { thirdPartyAudit.excludes += [ 'javax.xml.bind.DatatypeConverter', 'javax.xml.bind.JAXBContext' diff --git a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java index 4d264470785..48fa49b9a8a 100644 --- a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java +++ b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java @@ -19,39 +19,33 @@ package org.elasticsearch.discovery.file; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.zen.UnicastHostsProvider; -import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.transport.TransportService; -import java.nio.file.Path; import java.util.Collections; import java.util.Map; import java.util.function.Supplier; -/** - * Plugin for providing file-based unicast hosts discovery. The list of unicast hosts - * is obtained by reading the {@link FileBasedUnicastHostsProvider#UNICAST_HOSTS_FILE} in - * the {@link Environment#configFile()}/discovery-file directory. - */ public class FileBasedDiscoveryPlugin extends Plugin implements DiscoveryPlugin { - private final Settings settings; - private final Path configPath; + private final DeprecationLogger deprecationLogger; + static final String DEPRECATION_MESSAGE + = "File-based discovery is now built into Elasticsearch and does not require the discovery-file plugin"; - public FileBasedDiscoveryPlugin(Settings settings, Path configPath) { - this.settings = settings; - this.configPath = configPath; + public FileBasedDiscoveryPlugin(Settings settings) { + deprecationLogger = new DeprecationLogger(Loggers.getLogger(this.getClass(), settings)); } @Override public Map> getZenHostsProviders(TransportService transportService, NetworkService networkService) { - return Collections.singletonMap( - "file", - () -> new FileBasedUnicastHostsProvider(new Environment(settings, configPath))); + deprecationLogger.deprecated(DEPRECATION_MESSAGE); + return Collections.emptyMap(); } } diff --git a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java deleted file mode 100644 index 584ae4de5a2..00000000000 --- a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.file; - -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.discovery.zen.UnicastHostsProvider; -import org.elasticsearch.env.Environment; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.NoSuchFileException; -import java.nio.file.Path; -import java.util.Collections; -import java.util.List; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -/** - * An implementation of {@link UnicastHostsProvider} that reads hosts/ports - * from {@link #UNICAST_HOSTS_FILE}. - * - * Each unicast host/port that is part of the discovery process must be listed on - * a separate line. If the port is left off an entry, a default port of 9300 is - * assumed. An example unicast hosts file could read: - * - * 67.81.244.10 - * 67.81.244.11:9305 - * 67.81.244.15:9400 - */ -class FileBasedUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { - - static final String UNICAST_HOSTS_FILE = "unicast_hosts.txt"; - - private final Path unicastHostsFilePath; - - FileBasedUnicastHostsProvider(Environment environment) { - super(environment.settings()); - this.unicastHostsFilePath = environment.configFile().resolve("discovery-file").resolve(UNICAST_HOSTS_FILE); - } - - @Override - public List buildDynamicHosts(HostsResolver hostsResolver) { - List hostsList; - try (Stream lines = Files.lines(unicastHostsFilePath)) { - hostsList = lines.filter(line -> line.startsWith("#") == false) // lines starting with `#` are comments - .collect(Collectors.toList()); - } catch (FileNotFoundException | NoSuchFileException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[discovery-file] Failed to find unicast hosts file [{}]", - unicastHostsFilePath), e); - hostsList = Collections.emptyList(); - } catch (IOException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[discovery-file] Error reading unicast hosts file [{}]", - unicastHostsFilePath), e); - hostsList = Collections.emptyList(); - } - - final List dynamicHosts = hostsResolver.resolveHosts(hostsList, 1); - logger.debug("[discovery-file] Using dynamic discovery nodes {}", dynamicHosts); - return dynamicHosts; - } - -} diff --git a/server/src/main/java/org/elasticsearch/index/analysis/StandardTokenFilterFactory.java b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginDeprecationTests.java similarity index 58% rename from server/src/main/java/org/elasticsearch/index/analysis/StandardTokenFilterFactory.java rename to plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginDeprecationTests.java index 2339815b558..643c7b2c95c 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/StandardTokenFilterFactory.java +++ b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginDeprecationTests.java @@ -17,23 +17,16 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.discovery.file; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.standard.StandardFilter; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.test.ESTestCase; +import static org.elasticsearch.discovery.file.FileBasedDiscoveryPlugin.DEPRECATION_MESSAGE; -public class StandardTokenFilterFactory extends AbstractTokenFilterFactory { - - public StandardTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, name, settings); +public class FileBasedDiscoveryPluginDeprecationTests extends ESTestCase { + public void testDeprecationWarning() { + new FileBasedDiscoveryPlugin(Settings.EMPTY).getZenHostsProviders(null, null); + assertWarnings(DEPRECATION_MESSAGE); } - - @Override - public TokenStream create(TokenStream tokenStream) { - return new StandardFilter(tokenStream); - } -} \ No newline at end of file +} diff --git a/plugins/examples/build.gradle b/plugins/examples/build.gradle index 47db55b3b33..2b9f3c6433d 100644 --- a/plugins/examples/build.gradle +++ b/plugins/examples/build.gradle @@ -3,8 +3,7 @@ gradle.projectsEvaluated { subprojects { Task assemble = project.tasks.findByName('assemble') if (assemble) { - project.tasks.remove(assemble) - project.build.dependsOn.remove('assemble') + assemble.enabled = false } } } diff --git a/plugins/examples/rescore/src/test/java/org/elasticsearch/example/rescore/ExampleRescoreBuilderTests.java b/plugins/examples/rescore/src/test/java/org/elasticsearch/example/rescore/ExampleRescoreBuilderTests.java index d9fc4521a35..36b5bea411a 100644 --- a/plugins/examples/rescore/src/test/java/org/elasticsearch/example/rescore/ExampleRescoreBuilderTests.java +++ b/plugins/examples/rescore/src/test/java/org/elasticsearch/example/rescore/ExampleRescoreBuilderTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.example.rescore; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.search.rescore.RescoreContext; import org.elasticsearch.test.AbstractWireSerializingTestCase; @@ -68,7 +69,7 @@ public class ExampleRescoreBuilderTests extends AbstractWireSerializingTestCase< String fieldFactor = null; ExampleRescoreBuilder builder = new ExampleRescoreBuilder(factor, fieldFactor).windowSize(2); RescoreContext context = builder.buildContext(null); - TopDocs docs = new TopDocs(10, new ScoreDoc[3], 0); + TopDocs docs = new TopDocs(new TotalHits(10, TotalHits.Relation.EQUAL_TO), new ScoreDoc[3]); docs.scoreDocs[0] = new ScoreDoc(0, 1.0f); docs.scoreDocs[1] = new ScoreDoc(1, 1.0f); docs.scoreDocs[2] = new ScoreDoc(2, 1.0f); diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 1a6aa809de0..f55104f2a96 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -2106,7 +2106,27 @@ thirdPartyAudit.excludes = [ 'ucar.nc2.dataset.NetcdfDataset' ] -if (JavaVersion.current() > JavaVersion.VERSION_1_8) { +if (project.runtimeJavaVersion == JavaVersion.VERSION_1_8) { + thirdPartyAudit.excludes += [ + // TODO: Why is this needed ? + 'com.sun.javadoc.ClassDoc', + 'com.sun.javadoc.Doc', + 'com.sun.javadoc.Doclet', + 'com.sun.javadoc.ExecutableMemberDoc', + 'com.sun.javadoc.FieldDoc', + 'com.sun.javadoc.MethodDoc', + 'com.sun.javadoc.PackageDoc', + 'com.sun.javadoc.Parameter', + 'com.sun.javadoc.ProgramElementDoc', + 'com.sun.javadoc.RootDoc', + 'com.sun.javadoc.SourcePosition', + 'com.sun.javadoc.Tag', + 'com.sun.javadoc.Type', + 'com.sun.tools.javadoc.Main' + ] +} + +if (project.runtimeJavaVersion > JavaVersion.VERSION_1_8) { thirdPartyAudit.excludes += [ 'javax.activation.ActivationDataFlavor', 'javax.activation.CommandMap', @@ -2121,3 +2141,9 @@ if (JavaVersion.current() > JavaVersion.VERSION_1_8) { 'javax.xml.bind.Unmarshaller' ] } + +if (project.inFipsJvm) { + // FIPS JVM includes manny classes from bouncycastle which count as jar hell for the third party audit, + // rather than provide a long list of exclusions, disable the check on FIPS. + thirdPartyAudit.enabled = false +} diff --git a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java index 9fb2debcb54..c8a24ad3c87 100644 --- a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java +++ b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java @@ -73,13 +73,13 @@ public final class AttachmentProcessor extends AbstractProcessor { } @Override - public void execute(IngestDocument ingestDocument) { + public IngestDocument execute(IngestDocument ingestDocument) { Map additionalFields = new HashMap<>(); byte[] input = ingestDocument.getFieldValueAsBytes(field, ignoreMissing); if (input == null && ignoreMissing) { - return; + return ingestDocument; } else if (input == null) { throw new IllegalArgumentException("field [" + field + "] is null, cannot parse."); } @@ -164,6 +164,7 @@ public final class AttachmentProcessor extends AbstractProcessor { } ingestDocument.setFieldValue(targetField, additionalFields); + return ingestDocument; } @Override diff --git a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpCache.java b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpCache.java deleted file mode 100644 index 83a3374b504..00000000000 --- a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpCache.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.ingest.geoip; - -import com.fasterxml.jackson.databind.JsonNode; -import com.maxmind.db.NodeCache; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.cache.Cache; -import org.elasticsearch.common.cache.CacheBuilder; - -import java.io.IOException; -import java.util.concurrent.ExecutionException; - -final class GeoIpCache implements NodeCache { - private final Cache cache; - - GeoIpCache(long maxSize) { - this.cache = CacheBuilder.builder().setMaximumWeight(maxSize).build(); - } - - @Override - public JsonNode get(int key, Loader loader) throws IOException { - try { - return cache.computeIfAbsent(key, loader::load); - } catch (ExecutionException e) { - Throwable cause = e.getCause() != null ? e.getCause() : e; - throw new ElasticsearchException(cause); - } - } -} diff --git a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java index 366b6ffc1d2..a0be7557a5a 100644 --- a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java +++ b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; +import org.elasticsearch.ingest.geoip.IngestGeoIpPlugin.GeoIpCache; import java.net.InetAddress; import java.security.AccessController; @@ -66,14 +67,18 @@ public final class GeoIpProcessor extends AbstractProcessor { private final DatabaseReader dbReader; private final Set properties; private final boolean ignoreMissing; + private final GeoIpCache cache; - GeoIpProcessor(String tag, String field, DatabaseReader dbReader, String targetField, Set properties, boolean ignoreMissing) { + + GeoIpProcessor(String tag, String field, DatabaseReader dbReader, String targetField, Set properties, boolean ignoreMissing, + GeoIpCache cache) { super(tag); this.field = field; this.targetField = targetField; this.dbReader = dbReader; this.properties = properties; this.ignoreMissing = ignoreMissing; + this.cache = cache; } boolean isIgnoreMissing() { @@ -81,11 +86,11 @@ public final class GeoIpProcessor extends AbstractProcessor { } @Override - public void execute(IngestDocument ingestDocument) { + public IngestDocument execute(IngestDocument ingestDocument) { String ip = ingestDocument.getFieldValue(field, String.class, ignoreMissing); if (ip == null && ignoreMissing) { - return; + return ingestDocument; } else if (ip == null) { throw new IllegalArgumentException("field [" + field + "] is null, cannot extract geoip information."); } @@ -120,6 +125,7 @@ public final class GeoIpProcessor extends AbstractProcessor { if (geoData.isEmpty() == false) { ingestDocument.setFieldValue(targetField, geoData); } + return ingestDocument; } @Override @@ -145,15 +151,16 @@ public final class GeoIpProcessor extends AbstractProcessor { private Map retrieveCityGeoData(InetAddress ipAddress) { SpecialPermission.check(); - CityResponse response = AccessController.doPrivileged((PrivilegedAction) () -> { - try { - return dbReader.city(ipAddress); - } catch (AddressNotFoundException e) { - throw new AddressNotFoundRuntimeException(e); - } catch (Exception e) { - throw new RuntimeException(e); - } - }); + CityResponse response = AccessController.doPrivileged((PrivilegedAction) () -> + cache.putIfAbsent(ipAddress, CityResponse.class, ip -> { + try { + return dbReader.city(ip); + } catch (AddressNotFoundException e) { + throw new AddressNotFoundRuntimeException(e); + } catch (Exception e) { + throw new RuntimeException(e); + } + })); Country country = response.getCountry(); City city = response.getCity(); @@ -230,15 +237,16 @@ public final class GeoIpProcessor extends AbstractProcessor { private Map retrieveCountryGeoData(InetAddress ipAddress) { SpecialPermission.check(); - CountryResponse response = AccessController.doPrivileged((PrivilegedAction) () -> { - try { - return dbReader.country(ipAddress); - } catch (AddressNotFoundException e) { - throw new AddressNotFoundRuntimeException(e); - } catch (Exception e) { - throw new RuntimeException(e); - } - }); + CountryResponse response = AccessController.doPrivileged((PrivilegedAction) () -> + cache.putIfAbsent(ipAddress, CountryResponse.class, ip -> { + try { + return dbReader.country(ip); + } catch (AddressNotFoundException e) { + throw new AddressNotFoundRuntimeException(e); + } catch (Exception e) { + throw new RuntimeException(e); + } + })); Country country = response.getCountry(); Continent continent = response.getContinent(); @@ -274,15 +282,16 @@ public final class GeoIpProcessor extends AbstractProcessor { private Map retrieveAsnGeoData(InetAddress ipAddress) { SpecialPermission.check(); - AsnResponse response = AccessController.doPrivileged((PrivilegedAction) () -> { - try { - return dbReader.asn(ipAddress); - } catch (AddressNotFoundException e) { - throw new AddressNotFoundRuntimeException(e); - } catch (Exception e) { - throw new RuntimeException(e); - } - }); + AsnResponse response = AccessController.doPrivileged((PrivilegedAction) () -> + cache.putIfAbsent(ipAddress, AsnResponse.class, ip -> { + try { + return dbReader.asn(ip); + } catch (AddressNotFoundException e) { + throw new AddressNotFoundRuntimeException(e); + } catch (Exception e) { + throw new RuntimeException(e); + } + })); Integer asn = response.getAutonomousSystemNumber(); String organization_name = response.getAutonomousSystemOrganization(); @@ -321,9 +330,11 @@ public final class GeoIpProcessor extends AbstractProcessor { ); private final Map databaseReaders; + private final GeoIpCache cache; - public Factory(Map databaseReaders) { + public Factory(Map databaseReaders, GeoIpCache cache) { this.databaseReaders = databaseReaders; + this.cache = cache; } @Override @@ -367,14 +378,15 @@ public final class GeoIpProcessor extends AbstractProcessor { } } - return new GeoIpProcessor(processorTag, ipField, databaseReader, targetField, properties, ignoreMissing); + return new GeoIpProcessor(processorTag, ipField, databaseReader, targetField, properties, ignoreMissing, cache); } } // Geoip2's AddressNotFoundException is checked and due to the fact that we need run their code // inside a PrivilegedAction code block, we are forced to catch any checked exception and rethrow // it with an unchecked exception. - private static final class AddressNotFoundRuntimeException extends RuntimeException { + //package private for testing + static final class AddressNotFoundRuntimeException extends RuntimeException { AddressNotFoundRuntimeException(Throwable cause) { super(cause); diff --git a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java index c9c742d1789..95e20f340b5 100644 --- a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java +++ b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java @@ -23,16 +23,20 @@ import com.maxmind.db.NoCache; import com.maxmind.db.NodeCache; import com.maxmind.db.Reader; import com.maxmind.geoip2.DatabaseReader; -import org.elasticsearch.core.internal.io.IOUtils; +import com.maxmind.geoip2.model.AbstractResponse; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.cache.Cache; +import org.elasticsearch.common.cache.CacheBuilder; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ingest.Processor; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.Plugin; import java.io.Closeable; import java.io.IOException; +import java.net.InetAddress; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.PathMatcher; @@ -42,6 +46,8 @@ import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.function.Function; import java.util.stream.Stream; public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, Closeable { @@ -61,24 +67,18 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, Closeable throw new IllegalStateException("getProcessors called twice for geoip plugin!!"); } Path geoIpConfigDirectory = parameters.env.configFile().resolve("ingest-geoip"); - NodeCache cache; long cacheSize = CACHE_SIZE.get(parameters.env.settings()); - if (cacheSize > 0) { - cache = new GeoIpCache(cacheSize); - } else { - cache = NoCache.getInstance(); - } try { - databaseReaders = loadDatabaseReaders(geoIpConfigDirectory, cache); + databaseReaders = loadDatabaseReaders(geoIpConfigDirectory); } catch (IOException e) { throw new RuntimeException(e); } - return Collections.singletonMap(GeoIpProcessor.TYPE, new GeoIpProcessor.Factory(databaseReaders)); + return Collections.singletonMap(GeoIpProcessor.TYPE, new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(cacheSize))); } - static Map loadDatabaseReaders(Path geoIpConfigDirectory, NodeCache cache) throws IOException { + static Map loadDatabaseReaders(Path geoIpConfigDirectory) throws IOException { if (Files.exists(geoIpConfigDirectory) == false && Files.isDirectory(geoIpConfigDirectory)) { - throw new IllegalStateException("the geoip directory [" + geoIpConfigDirectory + "] containing databases doesn't exist"); + throw new IllegalStateException("the geoip directory [" + geoIpConfigDirectory + "] containing databases doesn't exist"); } boolean loadDatabaseOnHeap = Booleans.parseBoolean(System.getProperty("es.geoip.load_db_on_heap", "false")); Map databaseReaders = new HashMap<>(); @@ -92,7 +92,7 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, Closeable String databaseFileName = databasePath.getFileName().toString(); DatabaseReaderLazyLoader holder = new DatabaseReaderLazyLoader(databaseFileName, () -> { - DatabaseReader.Builder builder = createDatabaseBuilder(databasePath).withCache(cache); + DatabaseReader.Builder builder = createDatabaseBuilder(databasePath).withCache(NoCache.getInstance()); if (loadDatabaseOnHeap) { builder.fileMode(Reader.FileMode.MEMORY); } else { @@ -119,4 +119,75 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, Closeable } } + /** + * The in-memory cache for the geoip data. There should only be 1 instance of this class.. + * This cache differs from the maxmind's {@link NodeCache} such that this cache stores the deserialized Json objects to avoid the + * cost of deserialization for each lookup (cached or not). This comes at slight expense of higher memory usage, but significant + * reduction of CPU usage. + */ + static class GeoIpCache { + private final Cache cache; + + //package private for testing + GeoIpCache(long maxSize) { + if (maxSize < 0) { + throw new IllegalArgumentException("geoip max cache size must be 0 or greater"); + } + this.cache = CacheBuilder.builder().setMaximumWeight(maxSize).build(); + } + + T putIfAbsent(InetAddress ip, Class responseType, + Function retrieveFunction) { + + //can't use cache.computeIfAbsent due to the elevated permissions for the jackson (run via the cache loader) + CacheKey cacheKey = new CacheKey<>(ip, responseType); + //intentionally non-locking for simplicity...it's OK if we re-put the same key/value in the cache during a race condition. + AbstractResponse response = cache.get(cacheKey); + if (response == null) { + response = retrieveFunction.apply(ip); + cache.put(cacheKey, response); + } + return responseType.cast(response); + } + + //only useful for testing + T get(InetAddress ip, Class responseType) { + CacheKey cacheKey = new CacheKey<>(ip, responseType); + return responseType.cast(cache.get(cacheKey)); + } + + /** + * The key to use for the cache. Since this cache can span multiple geoip processors that all use different databases, the response + * type is needed to be included in the cache key. For example, if we only used the IP address as the key the City and ASN the same + * IP may be in both with different values and we need to cache both. The response type scopes the IP to the correct database + * provides a means to safely cast the return objects. + * @param The AbstractResponse type used to scope the key and cast the result. + */ + private static class CacheKey { + + private final InetAddress ip; + private final Class responseType; + + private CacheKey(InetAddress ip, Class responseType) { + this.ip = ip; + this.responseType = responseType; + } + + //generated + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CacheKey cacheKey = (CacheKey) o; + return Objects.equals(ip, cacheKey.ip) && + Objects.equals(responseType, cacheKey.responseType); + } + + //generated + @Override + public int hashCode() { + return Objects.hash(ip, responseType); + } + } + } } diff --git a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpCacheTests.java b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpCacheTests.java deleted file mode 100644 index 71cab99115f..00000000000 --- a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpCacheTests.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.ingest.geoip; - -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.node.IntNode; -import com.maxmind.db.NodeCache; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.test.ESTestCase; - -public class GeoIpCacheTests extends ESTestCase { - public void testCachesAndEvictsResults() throws Exception { - GeoIpCache cache = new GeoIpCache(1); - final NodeCache.Loader loader = key -> new IntNode(key); - - JsonNode jsonNode1 = cache.get(1, loader); - assertSame(jsonNode1, cache.get(1, loader)); - - // evict old key by adding another value - cache.get(2, loader); - - assertNotSame(jsonNode1, cache.get(1, loader)); - } - - public void testThrowsElasticsearchException() throws Exception { - GeoIpCache cache = new GeoIpCache(1); - NodeCache.Loader loader = (int key) -> { - throw new IllegalArgumentException("Illegal key"); - }; - ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> cache.get(1, loader)); - assertTrue("Expected cause to be of type IllegalArgumentException but was [" + ex.getCause().getClass() + "]", - ex.getCause() instanceof IllegalArgumentException); - assertEquals("Illegal key", ex.getCause().getMessage()); - } -} diff --git a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java index 7a5d6f5808f..316cfbc152c 100644 --- a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java +++ b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java @@ -20,11 +20,10 @@ package org.elasticsearch.ingest.geoip; import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import com.maxmind.db.NoCache; -import com.maxmind.db.NodeCache; import org.apache.lucene.util.Constants; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Randomness; +import org.elasticsearch.ingest.geoip.IngestGeoIpPlugin.GeoIpCache; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.StreamsUtils; import org.junit.AfterClass; @@ -69,8 +68,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { Files.copy(new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-ASN.mmdb")), geoIpConfigDir.resolve("GeoLite2-ASN.mmdb")); - NodeCache cache = randomFrom(NoCache.getInstance(), new GeoIpCache(randomNonNegativeLong())); - databaseReaders = IngestGeoIpPlugin.loadDatabaseReaders(geoIpConfigDir, cache); + databaseReaders = IngestGeoIpPlugin.loadDatabaseReaders(geoIpConfigDir); } @AfterClass @@ -92,7 +90,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { // This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected. // As a consequence, the corresponding file appears to be still in use and Windows cannot delete it. assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS); - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000)); Map config = new HashMap<>(); config.put("field", "_field"); @@ -111,7 +109,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { // This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected. // As a consequence, the corresponding file appears to be still in use and Windows cannot delete it. assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS); - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000)); Map config = new HashMap<>(); config.put("field", "_field"); @@ -131,7 +129,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { // This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected. // As a consequence, the corresponding file appears to be still in use and Windows cannot delete it. assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS); - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000)); Map config = new HashMap<>(); config.put("field", "_field"); @@ -152,7 +150,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { // This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected. // As a consequence, the corresponding file appears to be still in use and Windows cannot delete it. assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS); - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000)); Map config = new HashMap<>(); config.put("field", "_field"); @@ -173,7 +171,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { // This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected. // As a consequence, the corresponding file appears to be still in use and Windows cannot delete it. assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS); - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000)); Map config = new HashMap<>(); config.put("field", "_field"); config.put("target_field", "_field"); @@ -187,7 +185,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { // This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected. // As a consequence, the corresponding file appears to be still in use and Windows cannot delete it. assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS); - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000)); Map config = new HashMap<>(); config.put("field", "_field"); config.put("database_file", "GeoLite2-Country.mmdb"); @@ -203,7 +201,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { // This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected. // As a consequence, the corresponding file appears to be still in use and Windows cannot delete it. assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS); - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000)); Map config = new HashMap<>(); config.put("field", "_field"); config.put("database_file", "GeoLite2-Country.mmdb"); @@ -220,7 +218,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { // This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected. // As a consequence, the corresponding file appears to be still in use and Windows cannot delete it. assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS); - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000)); Map config = new HashMap<>(); config.put("field", "_field"); config.put("database_file", "GeoLite2-ASN.mmdb"); @@ -237,7 +235,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { // This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected. // As a consequence, the corresponding file appears to be still in use and Windows cannot delete it. assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS); - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000)); Map config = new HashMap<>(); config.put("field", "_field"); @@ -250,7 +248,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { // This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected. // As a consequence, the corresponding file appears to be still in use and Windows cannot delete it. assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS); - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000)); Set properties = EnumSet.noneOf(GeoIpProcessor.Property.class); List fieldNames = new ArrayList<>(); @@ -277,7 +275,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { // This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected. // As a consequence, the corresponding file appears to be still in use and Windows cannot delete it. assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS); - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000)); Map config1 = new HashMap<>(); config1.put("field", "_field"); @@ -311,8 +309,8 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { // database readers used at class level are reused between tests. (we want to keep that otherwise running this // test will take roughly 4 times more time) Map databaseReaders = - IngestGeoIpPlugin.loadDatabaseReaders(geoIpConfigDir, NoCache.getInstance()); - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders); + IngestGeoIpPlugin.loadDatabaseReaders(geoIpConfigDir); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000)); for (DatabaseReaderLazyLoader lazyLoader : databaseReaders.values()) { assertNull(lazyLoader.databaseReader.get()); } diff --git a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java index 4c04d4e340a..4da680f186e 100644 --- a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java +++ b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java @@ -20,8 +20,9 @@ package org.elasticsearch.ingest.geoip; import com.maxmind.geoip2.DatabaseReader; -import org.elasticsearch.ingest.RandomDocumentPicks; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.RandomDocumentPicks; +import org.elasticsearch.ingest.geoip.IngestGeoIpPlugin.GeoIpCache; import org.elasticsearch.test.ESTestCase; import java.io.InputStream; @@ -40,7 +41,8 @@ public class GeoIpProcessorTests extends ESTestCase { public void testCity() throws Exception { InputStream database = getDatabaseFileInputStream("/GeoLite2-City.mmdb"); GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field", - new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false); + new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false, + new GeoIpCache(1000)); Map document = new HashMap<>(); document.put("source_field", "8.8.8.8"); @@ -64,7 +66,8 @@ public class GeoIpProcessorTests extends ESTestCase { public void testNullValueWithIgnoreMissing() throws Exception { InputStream database = getDatabaseFileInputStream("/GeoLite2-City.mmdb"); GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field", - new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), true); + new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), true, + new GeoIpCache(1000)); IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("source_field", null)); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); @@ -75,7 +78,8 @@ public class GeoIpProcessorTests extends ESTestCase { public void testNonExistentWithIgnoreMissing() throws Exception { InputStream database = getDatabaseFileInputStream("/GeoLite2-City.mmdb"); GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field", - new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), true); + new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), true, + new GeoIpCache(1000)); IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); processor.execute(ingestDocument); @@ -85,7 +89,8 @@ public class GeoIpProcessorTests extends ESTestCase { public void testNullWithoutIgnoreMissing() throws Exception { InputStream database = getDatabaseFileInputStream("/GeoLite2-City.mmdb"); GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field", - new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false); + new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false, + new GeoIpCache(1000)); IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("source_field", null)); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); @@ -96,7 +101,8 @@ public class GeoIpProcessorTests extends ESTestCase { public void testNonExistentWithoutIgnoreMissing() throws Exception { InputStream database = getDatabaseFileInputStream("/GeoLite2-City.mmdb"); GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field", - new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false); + new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false, + new GeoIpCache(1000)); IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); Exception exception = expectThrows(Exception.class, () -> processor.execute(ingestDocument)); @@ -106,7 +112,8 @@ public class GeoIpProcessorTests extends ESTestCase { public void testCity_withIpV6() throws Exception { InputStream database = getDatabaseFileInputStream("/GeoLite2-City.mmdb"); GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field", - new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false); + new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false, + new GeoIpCache(1000)); String address = "2602:306:33d3:8000::3257:9652"; Map document = new HashMap<>(); @@ -135,7 +142,8 @@ public class GeoIpProcessorTests extends ESTestCase { public void testCityWithMissingLocation() throws Exception { InputStream database = getDatabaseFileInputStream("/GeoLite2-City.mmdb"); GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field", - new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false); + new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false, + new GeoIpCache(1000)); Map document = new HashMap<>(); document.put("source_field", "80.231.5.0"); @@ -152,7 +160,8 @@ public class GeoIpProcessorTests extends ESTestCase { public void testCountry() throws Exception { InputStream database = getDatabaseFileInputStream("/GeoLite2-Country.mmdb"); GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field", - new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false); + new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false, + new GeoIpCache(1000)); Map document = new HashMap<>(); document.put("source_field", "82.170.213.79"); @@ -172,7 +181,8 @@ public class GeoIpProcessorTests extends ESTestCase { public void testCountryWithMissingLocation() throws Exception { InputStream database = getDatabaseFileInputStream("/GeoLite2-Country.mmdb"); GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field", - new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false); + new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false, + new GeoIpCache(1000)); Map document = new HashMap<>(); document.put("source_field", "80.231.5.0"); @@ -190,7 +200,8 @@ public class GeoIpProcessorTests extends ESTestCase { String ip = "82.170.213.79"; InputStream database = getDatabaseFileInputStream("/GeoLite2-ASN.mmdb"); GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field", - new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false); + new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false, + new GeoIpCache(1000)); Map document = new HashMap<>(); document.put("source_field", ip); @@ -209,7 +220,8 @@ public class GeoIpProcessorTests extends ESTestCase { public void testAddressIsNotInTheDatabase() throws Exception { InputStream database = getDatabaseFileInputStream("/GeoLite2-City.mmdb"); GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field", - new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false); + new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false, + new GeoIpCache(1000)); Map document = new HashMap<>(); document.put("source_field", "127.0.0.1"); @@ -222,7 +234,8 @@ public class GeoIpProcessorTests extends ESTestCase { public void testInvalid() throws Exception { InputStream database = getDatabaseFileInputStream("/GeoLite2-City.mmdb"); GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field", - new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false); + new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false, + new GeoIpCache(1000)); Map document = new HashMap<>(); document.put("source_field", "www.google.com"); diff --git a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IngestGeoIpPluginTests.java b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IngestGeoIpPluginTests.java new file mode 100644 index 00000000000..884056bb0be --- /dev/null +++ b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IngestGeoIpPluginTests.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.geoip; + +import com.maxmind.geoip2.model.AbstractResponse; +import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.ingest.geoip.IngestGeoIpPlugin.GeoIpCache; +import org.elasticsearch.test.ESTestCase; + +import static org.mockito.Mockito.mock; + +public class IngestGeoIpPluginTests extends ESTestCase { + + public void testCachesAndEvictsResults() { + GeoIpCache cache = new GeoIpCache(1); + AbstractResponse response1 = mock(AbstractResponse.class); + AbstractResponse response2 = mock(AbstractResponse.class); + + //add a key + AbstractResponse cachedResponse = cache.putIfAbsent(InetAddresses.forString("127.0.0.1"), AbstractResponse.class, ip -> response1); + assertSame(cachedResponse, response1); + assertSame(cachedResponse, cache.putIfAbsent(InetAddresses.forString("127.0.0.1"), AbstractResponse.class, ip -> response1)); + assertSame(cachedResponse, cache.get(InetAddresses.forString("127.0.0.1"), AbstractResponse.class)); + + + // evict old key by adding another value + cachedResponse = cache.putIfAbsent(InetAddresses.forString("127.0.0.2"), AbstractResponse.class, ip -> response2); + assertSame(cachedResponse, response2); + assertSame(cachedResponse, cache.putIfAbsent(InetAddresses.forString("127.0.0.2"), AbstractResponse.class, ip -> response2)); + assertSame(cachedResponse, cache.get(InetAddresses.forString("127.0.0.2"), AbstractResponse.class)); + + assertNotSame(response1, cache.get(InetAddresses.forString("127.0.0.1"), AbstractResponse.class)); + } + + public void testThrowsFunctionsException() { + GeoIpCache cache = new GeoIpCache(1); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, + () -> cache.putIfAbsent(InetAddresses.forString("127.0.0.1"), AbstractResponse.class, + ip -> { throw new IllegalArgumentException("bad"); })); + assertEquals("bad", ex.getMessage()); + } + + public void testInvalidInit() { + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new GeoIpCache(-1)); + assertEquals("geoip max cache size must be 0 or greater", ex.getMessage()); + } +} diff --git a/plugins/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java b/plugins/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java index 93f210c427b..6e7f588f0bd 100644 --- a/plugins/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java +++ b/plugins/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java @@ -63,11 +63,11 @@ public class UserAgentProcessor extends AbstractProcessor { } @Override - public void execute(IngestDocument ingestDocument) throws Exception { + public IngestDocument execute(IngestDocument ingestDocument) throws Exception { String userAgent = ingestDocument.getFieldValue(field, String.class, ignoreMissing); if (userAgent == null && ignoreMissing) { - return; + return ingestDocument; } else if (userAgent == null) { throw new IllegalArgumentException("field [" + field + "] is null, cannot parse user-agent."); } @@ -144,6 +144,7 @@ public class UserAgentProcessor extends AbstractProcessor { } ingestDocument.setFieldValue(targetField, uaDetails); + return ingestDocument; } /** To maintain compatibility with logstash-filter-useragent */ diff --git a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java index a6dc27b1f8a..50af824fae9 100644 --- a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java +++ b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java @@ -26,7 +26,6 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.hash.MurmurHash3; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -93,10 +92,6 @@ public class Murmur3FieldMapper extends FieldMapper { throw new MapperParsingException("Setting [index] cannot be modified for field [" + name + "]"); } - if (parserContext.indexVersionCreated().before(Version.V_5_0_0_alpha2)) { - node.remove("precision_step"); - } - TypeParsers.parseField(builder, name, node, parserContext); return builder; diff --git a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java index 04ab7ecd245..393275f0c39 100644 --- a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java +++ b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java @@ -82,10 +82,6 @@ public class SizeFieldMapper extends MetadataFieldMapper { @Override public SizeFieldMapper build(BuilderContext context) { setupFieldType(context); - if (context.indexCreatedVersion().onOrBefore(Version.V_5_0_0_alpha4)) { - // Make sure that the doc_values are disabled on indices created before V_5_0_0_alpha4 - fieldType.setHasDocValues(false); - } return new SizeFieldMapper(enabledState, fieldType, context.indexSettings()); } } @@ -149,9 +145,8 @@ public class SizeFieldMapper extends MetadataFieldMapper { } @Override - public Mapper parse(ParseContext context) throws IOException { + public void parse(ParseContext context) throws IOException { // nothing to do here, we call the parent in postParse - return null; } @Override diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 07ef4b4be5e..510c101379d 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -23,28 +23,38 @@ esplugin { } dependencies { - compile 'com.google.cloud:google-cloud-storage:1.28.0' - compile 'com.google.cloud:google-cloud-core:1.28.0' - compile 'com.google.cloud:google-cloud-core-http:1.28.0' - compile 'com.google.auth:google-auth-library-oauth2-http:0.9.1' - compile 'com.google.auth:google-auth-library-credentials:0.9.1' - compile 'com.google.oauth-client:google-oauth-client:1.23.0' - compile 'com.google.http-client:google-http-client:1.23.0' - compile 'com.google.http-client:google-http-client-jackson:1.23.0' - compile 'com.google.http-client:google-http-client-jackson2:1.23.0' - compile 'com.google.http-client:google-http-client-appengine:1.23.0' - compile 'com.google.api-client:google-api-client:1.23.0' - compile 'com.google.api:gax:1.25.0' - compile 'com.google.api:gax-httpjson:0.40.0' - compile 'com.google.api:api-common:1.5.0' - compile 'com.google.api.grpc:proto-google-common-protos:1.8.0' + compile 'com.google.cloud:google-cloud-storage:1.40.0' + compile 'com.google.cloud:google-cloud-core:1.40.0' compile 'com.google.guava:guava:20.0' - compile 'com.google.apis:google-api-services-storage:v1-rev115-1.23.0' - compile 'org.codehaus.jackson:jackson-core-asl:1.9.13' - compile 'io.grpc:grpc-context:1.9.0' - compile 'io.opencensus:opencensus-api:0.11.1' - compile 'io.opencensus:opencensus-contrib-http-util:0.11.1' - compile 'org.threeten:threetenbp:1.3.6' + compile 'joda-time:joda-time:2.10' + compile 'com.google.http-client:google-http-client:1.24.1' + compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" + compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" + compile "commons-logging:commons-logging:${versions.commonslogging}" + compile "commons-codec:commons-codec:${versions.commonscodec}" + compile 'com.google.api:api-common:1.7.0' + compile 'com.google.api:gax:1.30.0' + compile 'org.threeten:threetenbp:1.3.3' + compile 'com.google.protobuf:protobuf-java-util:3.6.0' + compile 'com.google.protobuf:protobuf-java:3.6.0' + compile 'com.google.code.gson:gson:2.7' + compile 'com.google.api.grpc:proto-google-common-protos:1.12.0' + compile 'com.google.api.grpc:proto-google-iam-v1:0.12.0' + compile 'com.google.cloud:google-cloud-core-http:1.40.0' + compile 'com.google.auth:google-auth-library-credentials:0.10.0' + compile 'com.google.auth:google-auth-library-oauth2-http:0.10.0' + compile 'com.google.oauth-client:google-oauth-client:1.24.1' + compile 'com.google.api-client:google-api-client:1.24.1' + compile 'com.google.http-client:google-http-client-appengine:1.24.1' + compile 'com.google.http-client:google-http-client-jackson:1.24.1' + compile 'org.codehaus.jackson:jackson-core-asl:1.9.11' + compile 'com.google.http-client:google-http-client-jackson2:1.24.1' + compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + compile 'com.google.api:gax-httpjson:0.47.0' + compile 'io.opencensus:opencensus-api:0.15.0' + compile 'io.grpc:grpc-context:1.12.0' + compile 'io.opencensus:opencensus-contrib-http-util:0.15.0' + compile 'com.google.apis:google-api-services-storage:v1-rev135-1.24.1' } dependencyLicenses { @@ -52,10 +62,18 @@ dependencyLicenses { mapping from: /google-auth-.*/, to: 'google-auth' mapping from: /google-http-.*/, to: 'google-http' mapping from: /opencensus.*/, to: 'opencensus' + mapping from: /jackson-.*/, to: 'jackson' + mapping from: /http.*/, to: 'httpclient' + mapping from: /protobuf.*/, to: 'protobuf' + mapping from: /proto-google.*/, to: 'proto-google' } thirdPartyAudit.excludes = [ // uses internal java api: sun.misc.Unsafe + 'com.google.protobuf.UnsafeUtil', + 'com.google.protobuf.UnsafeUtil$1', + 'com.google.protobuf.UnsafeUtil$JvmMemoryAccessor', + 'com.google.protobuf.UnsafeUtil$MemoryAccessor', 'com.google.common.cache.Striped64', 'com.google.common.cache.Striped64$1', 'com.google.common.cache.Striped64$Cell', @@ -87,139 +105,13 @@ thirdPartyAudit.excludes = [ 'com.google.appengine.api.urlfetch.HTTPResponse', 'com.google.appengine.api.urlfetch.URLFetchService', 'com.google.appengine.api.urlfetch.URLFetchServiceFactory', - 'com.google.gson.Gson', - 'com.google.gson.GsonBuilder', - 'com.google.gson.TypeAdapter', - 'com.google.gson.stream.JsonReader', - 'com.google.gson.stream.JsonWriter', - 'com.google.iam.v1.Binding$Builder', - 'com.google.iam.v1.Binding', - 'com.google.iam.v1.Policy$Builder', - 'com.google.iam.v1.Policy', - 'com.google.protobuf.AbstractMessageLite$Builder', - 'com.google.protobuf.AbstractParser', - 'com.google.protobuf.Any$Builder', - 'com.google.protobuf.Any', - 'com.google.protobuf.AnyOrBuilder', - 'com.google.protobuf.AnyProto', - 'com.google.protobuf.Api$Builder', - 'com.google.protobuf.Api', - 'com.google.protobuf.ApiOrBuilder', - 'com.google.protobuf.ApiProto', - 'com.google.protobuf.ByteString', - 'com.google.protobuf.CodedInputStream', - 'com.google.protobuf.CodedOutputStream', - 'com.google.protobuf.DescriptorProtos', - 'com.google.protobuf.Descriptors$Descriptor', - 'com.google.protobuf.Descriptors$EnumDescriptor', - 'com.google.protobuf.Descriptors$EnumValueDescriptor', - 'com.google.protobuf.Descriptors$FieldDescriptor', - 'com.google.protobuf.Descriptors$FileDescriptor$InternalDescriptorAssigner', - 'com.google.protobuf.Descriptors$FileDescriptor', - 'com.google.protobuf.Descriptors$OneofDescriptor', - 'com.google.protobuf.Duration$Builder', - 'com.google.protobuf.Duration', - 'com.google.protobuf.DurationOrBuilder', - 'com.google.protobuf.DurationProto', - 'com.google.protobuf.EmptyProto', - 'com.google.protobuf.Enum$Builder', - 'com.google.protobuf.Enum', - 'com.google.protobuf.EnumOrBuilder', - 'com.google.protobuf.ExtensionRegistry', - 'com.google.protobuf.ExtensionRegistryLite', - 'com.google.protobuf.FloatValue$Builder', - 'com.google.protobuf.FloatValue', - 'com.google.protobuf.FloatValueOrBuilder', - 'com.google.protobuf.GeneratedMessage$GeneratedExtension', - 'com.google.protobuf.GeneratedMessage', - 'com.google.protobuf.GeneratedMessageV3$Builder', - 'com.google.protobuf.GeneratedMessageV3$BuilderParent', - 'com.google.protobuf.GeneratedMessageV3$FieldAccessorTable', - 'com.google.protobuf.GeneratedMessageV3', - 'com.google.protobuf.Internal$EnumLite', - 'com.google.protobuf.Internal$EnumLiteMap', - 'com.google.protobuf.Internal', - 'com.google.protobuf.InvalidProtocolBufferException', - 'com.google.protobuf.LazyStringArrayList', - 'com.google.protobuf.LazyStringList', - 'com.google.protobuf.MapEntry$Builder', - 'com.google.protobuf.MapEntry', - 'com.google.protobuf.MapField', - 'com.google.protobuf.Message', - 'com.google.protobuf.MessageOrBuilder', - 'com.google.protobuf.Parser', - 'com.google.protobuf.ProtocolMessageEnum', - 'com.google.protobuf.ProtocolStringList', - 'com.google.protobuf.RepeatedFieldBuilderV3', - 'com.google.protobuf.SingleFieldBuilderV3', - 'com.google.protobuf.Struct$Builder', - 'com.google.protobuf.Struct', - 'com.google.protobuf.StructOrBuilder', - 'com.google.protobuf.StructProto', - 'com.google.protobuf.Timestamp$Builder', - 'com.google.protobuf.Timestamp', - 'com.google.protobuf.TimestampProto', - 'com.google.protobuf.Type$Builder', - 'com.google.protobuf.Type', - 'com.google.protobuf.TypeOrBuilder', - 'com.google.protobuf.TypeProto', - 'com.google.protobuf.UInt32Value$Builder', - 'com.google.protobuf.UInt32Value', - 'com.google.protobuf.UInt32ValueOrBuilder', - 'com.google.protobuf.UnknownFieldSet$Builder', - 'com.google.protobuf.UnknownFieldSet', - 'com.google.protobuf.WireFormat$FieldType', - 'com.google.protobuf.WrappersProto', - 'com.google.protobuf.util.Timestamps', - 'org.apache.http.ConnectionReuseStrategy', - 'org.apache.http.Header', - 'org.apache.http.HttpEntity', - 'org.apache.http.HttpEntityEnclosingRequest', - 'org.apache.http.HttpHost', - 'org.apache.http.HttpRequest', - 'org.apache.http.HttpResponse', - 'org.apache.http.HttpVersion', - 'org.apache.http.RequestLine', - 'org.apache.http.StatusLine', - 'org.apache.http.client.AuthenticationHandler', - 'org.apache.http.client.HttpClient', - 'org.apache.http.client.HttpRequestRetryHandler', - 'org.apache.http.client.RedirectHandler', - 'org.apache.http.client.RequestDirector', - 'org.apache.http.client.UserTokenHandler', - 'org.apache.http.client.methods.HttpDelete', - 'org.apache.http.client.methods.HttpEntityEnclosingRequestBase', - 'org.apache.http.client.methods.HttpGet', - 'org.apache.http.client.methods.HttpHead', - 'org.apache.http.client.methods.HttpOptions', - 'org.apache.http.client.methods.HttpPost', - 'org.apache.http.client.methods.HttpPut', - 'org.apache.http.client.methods.HttpRequestBase', - 'org.apache.http.client.methods.HttpTrace', - 'org.apache.http.conn.ClientConnectionManager', - 'org.apache.http.conn.ConnectionKeepAliveStrategy', - 'org.apache.http.conn.params.ConnManagerParams', - 'org.apache.http.conn.params.ConnPerRouteBean', - 'org.apache.http.conn.params.ConnRouteParams', - 'org.apache.http.conn.routing.HttpRoutePlanner', - 'org.apache.http.conn.scheme.PlainSocketFactory', - 'org.apache.http.conn.scheme.Scheme', - 'org.apache.http.conn.scheme.SchemeRegistry', - 'org.apache.http.conn.ssl.SSLSocketFactory', - 'org.apache.http.conn.ssl.X509HostnameVerifier', - 'org.apache.http.entity.AbstractHttpEntity', - 'org.apache.http.impl.client.DefaultHttpClient', - 'org.apache.http.impl.client.DefaultHttpRequestRetryHandler', - 'org.apache.http.impl.conn.ProxySelectorRoutePlanner', - 'org.apache.http.impl.conn.tsccm.ThreadSafeClientConnManager', - 'org.apache.http.message.BasicHttpResponse', - 'org.apache.http.params.BasicHttpParams', - 'org.apache.http.params.HttpConnectionParams', - 'org.apache.http.params.HttpParams', - 'org.apache.http.params.HttpProtocolParams', - 'org.apache.http.protocol.HttpContext', - 'org.apache.http.protocol.HttpProcessor', - 'org.apache.http.protocol.HttpRequestExecutor' + // commons-logging optional dependencies + 'org.apache.avalon.framework.logger.Logger', + 'org.apache.log.Hierarchy', + 'org.apache.log.Logger', + // commons-logging provided dependencies + 'javax.servlet.ServletContextEvent', + 'javax.servlet.ServletContextListener' ] check { diff --git a/plugins/repository-gcs/licenses/api-common-1.5.0.jar.sha1 b/plugins/repository-gcs/licenses/api-common-1.5.0.jar.sha1 deleted file mode 100644 index 64435356e5e..00000000000 --- a/plugins/repository-gcs/licenses/api-common-1.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e537338d40a57ad469239acb6d828fa544fb52b \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/api-common-1.7.0.jar.sha1 b/plugins/repository-gcs/licenses/api-common-1.7.0.jar.sha1 new file mode 100644 index 00000000000..67291b658e5 --- /dev/null +++ b/plugins/repository-gcs/licenses/api-common-1.7.0.jar.sha1 @@ -0,0 +1 @@ +ea59fb8b2450999345035dec8a6f472543391766 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/commons-codec-1.10.jar.sha1 b/plugins/repository-gcs/licenses/commons-codec-1.10.jar.sha1 new file mode 100644 index 00000000000..3fe8682a1b0 --- /dev/null +++ b/plugins/repository-gcs/licenses/commons-codec-1.10.jar.sha1 @@ -0,0 +1 @@ +4b95f4897fa13f2cd904aee711aeafc0c5295cd8 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/old/commons-codec-LICENSE.txt b/plugins/repository-gcs/licenses/commons-codec-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/old/commons-codec-LICENSE.txt rename to plugins/repository-gcs/licenses/commons-codec-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/old/commons-codec-NOTICE.txt b/plugins/repository-gcs/licenses/commons-codec-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/old/commons-codec-NOTICE.txt rename to plugins/repository-gcs/licenses/commons-codec-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 b/plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 new file mode 100644 index 00000000000..5b8f029e582 --- /dev/null +++ b/plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 @@ -0,0 +1 @@ +f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/old/commons-logging-LICENSE.txt b/plugins/repository-gcs/licenses/commons-logging-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/old/commons-logging-LICENSE.txt rename to plugins/repository-gcs/licenses/commons-logging-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/old/commons-logging-NOTICE.txt b/plugins/repository-gcs/licenses/commons-logging-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/old/commons-logging-NOTICE.txt rename to plugins/repository-gcs/licenses/commons-logging-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/gax-1.25.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-1.25.0.jar.sha1 deleted file mode 100644 index 594177047c1..00000000000 --- a/plugins/repository-gcs/licenses/gax-1.25.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -36ab73c0b5d4a67447eb89a3174cc76ced150bd1 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-1.30.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-1.30.0.jar.sha1 new file mode 100644 index 00000000000..d6d2bb20ed8 --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-1.30.0.jar.sha1 @@ -0,0 +1 @@ +58fa2feb11b092be0a6ebe705a28736f12374230 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-httpjson-0.40.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-0.40.0.jar.sha1 deleted file mode 100644 index c251ea1dd95..00000000000 --- a/plugins/repository-gcs/licenses/gax-httpjson-0.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cb4bafbfd45b9d24efbb6138a31e37918fac015f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-httpjson-0.47.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-0.47.0.jar.sha1 new file mode 100644 index 00000000000..fdc722d1520 --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-httpjson-0.47.0.jar.sha1 @@ -0,0 +1 @@ +d096f3142eb3adbf877588d1044895d148d9efcb \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-client-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-api-client-1.23.0.jar.sha1 deleted file mode 100644 index 0c35d8e08b9..00000000000 --- a/plugins/repository-gcs/licenses/google-api-client-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -522ea860eb48dee71dfe2c61a1fd09663539f556 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-client-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-api-client-1.24.1.jar.sha1 new file mode 100644 index 00000000000..27dafe58a01 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-client-1.24.1.jar.sha1 @@ -0,0 +1 @@ +37de23fb9b8b077de4ecec3192d98e752b0e5d72 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-1.23.0.jar.sha1 deleted file mode 100644 index 9f6f77ada3a..00000000000 --- a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ba4fb6c5dc8d5ad94dedd9927ceee10a31a59abd \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev135-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev135-1.24.1.jar.sha1 new file mode 100644 index 00000000000..e3042ee6ea0 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev135-1.24.1.jar.sha1 @@ -0,0 +1 @@ +28d3d391dfc7e7e7951760708ad2f48cecacf38f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-credentials-0.10.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-credentials-0.10.0.jar.sha1 new file mode 100644 index 00000000000..c8258d69326 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-credentials-0.10.0.jar.sha1 @@ -0,0 +1 @@ +f981288bd84fe6d140ed70d1d8dbe994a64fa3cc \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-credentials-0.9.1.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-credentials-0.9.1.jar.sha1 deleted file mode 100644 index 0922a53d2e3..00000000000 --- a/plugins/repository-gcs/licenses/google-auth-library-credentials-0.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -25e0f45f3b3d1b4fccc8944845e51a7a4f359652 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.10.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.10.0.jar.sha1 new file mode 100644 index 00000000000..f55ef7c9c21 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.10.0.jar.sha1 @@ -0,0 +1 @@ +c079a62086121973a23d90f54e2b8c13050fa39d \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.9.1.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.9.1.jar.sha1 deleted file mode 100644 index 100a44c1872..00000000000 --- a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c0fe3a39b0f28d59de1986b3c50f018cd7cb9ec2 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-1.28.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-1.28.0.jar.sha1 deleted file mode 100644 index 071533f2278..00000000000 --- a/plugins/repository-gcs/licenses/google-cloud-core-1.28.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c0e88c78ce17c92d76bf46345faf3fa68833b216 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-1.40.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-1.40.0.jar.sha1 new file mode 100644 index 00000000000..7562ead12e9 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-core-1.40.0.jar.sha1 @@ -0,0 +1 @@ +4985701f989030e262cf8f4e38cc954115f5b082 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-http-1.28.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-http-1.28.0.jar.sha1 deleted file mode 100644 index fed3fc257c3..00000000000 --- a/plugins/repository-gcs/licenses/google-cloud-core-http-1.28.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7b4559a9513abd98da50958c56a10f8ae00cb0f7 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-http-1.40.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-http-1.40.0.jar.sha1 new file mode 100644 index 00000000000..2761bfdc745 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-core-http-1.40.0.jar.sha1 @@ -0,0 +1 @@ +67f5806beda32894f1e6c9527925b64199fd2e4f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-storage-1.28.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-storage-1.28.0.jar.sha1 deleted file mode 100644 index f49152ea056..00000000000 --- a/plugins/repository-gcs/licenses/google-cloud-storage-1.28.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -226019ae816b42c59f1b06999aeeb73722b87200 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-storage-1.40.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-storage-1.40.0.jar.sha1 new file mode 100644 index 00000000000..33e83b73712 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-storage-1.40.0.jar.sha1 @@ -0,0 +1 @@ +fabefef46f07d1e334123f0de17702708b4dfbd1 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.23.0.jar.sha1 deleted file mode 100644 index 5526275d5a1..00000000000 --- a/plugins/repository-gcs/licenses/google-http-client-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e86c84ff3c98eca6423e97780325b299133d858 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.24.1.jar.sha1 new file mode 100644 index 00000000000..46b99f23e47 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-1.24.1.jar.sha1 @@ -0,0 +1 @@ +396eac8d3fb1332675f82b208f48a469d64f3b4a \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.23.0.jar.sha1 deleted file mode 100644 index 823c3a85089..00000000000 --- a/plugins/repository-gcs/licenses/google-http-client-appengine-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0eda0d0f758c1cc525866e52e1226c4eb579d130 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.24.1.jar.sha1 new file mode 100644 index 00000000000..e39f63fe33a --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-appengine-1.24.1.jar.sha1 @@ -0,0 +1 @@ +8535031ae10bf6a196e68f25e10c0d6382699cb6 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson-1.23.0.jar.sha1 deleted file mode 100644 index 85ba0ab798d..00000000000 --- a/plugins/repository-gcs/licenses/google-http-client-jackson-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a72ea3a197937ef63a893e73df312dac0d813663 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson-1.24.1.jar.sha1 new file mode 100644 index 00000000000..f6b9694abaa --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-jackson-1.24.1.jar.sha1 @@ -0,0 +1 @@ +02c88e77c14effdda76f02a0eac968de74e0bd4e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.23.0.jar.sha1 deleted file mode 100644 index 510856a517f..00000000000 --- a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd6761f4046a8cb0455e6fa5f58e12b061e9826e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.24.1.jar.sha1 new file mode 100644 index 00000000000..634b7d9198c --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.24.1.jar.sha1 @@ -0,0 +1 @@ +2ad1dffd8a450055e68d8004fe003033b751d761 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.23.0.jar.sha1 deleted file mode 100644 index 036812b88b5..00000000000 --- a/plugins/repository-gcs/licenses/google-oauth-client-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e57ea1e2220bda5a2bd24ff17860212861f3c5cf \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.24.1.jar.sha1 new file mode 100644 index 00000000000..2d89939674a --- /dev/null +++ b/plugins/repository-gcs/licenses/google-oauth-client-1.24.1.jar.sha1 @@ -0,0 +1 @@ +7b0e0218b96808868c23a7d0b40566a713931d9f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/grpc-context-1.12.0.jar.sha1 b/plugins/repository-gcs/licenses/grpc-context-1.12.0.jar.sha1 new file mode 100644 index 00000000000..57f37a81c96 --- /dev/null +++ b/plugins/repository-gcs/licenses/grpc-context-1.12.0.jar.sha1 @@ -0,0 +1 @@ +5b63a170b786051a42cce08118d5ea3c8f60f749 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/grpc-context-1.9.0.jar.sha1 b/plugins/repository-gcs/licenses/grpc-context-1.9.0.jar.sha1 deleted file mode 100644 index 02bac0e4920..00000000000 --- a/plugins/repository-gcs/licenses/grpc-context-1.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -28b0836f48c9705abf73829bbc536dba29a1329a \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gson-2.7.jar.sha1 b/plugins/repository-gcs/licenses/gson-2.7.jar.sha1 new file mode 100644 index 00000000000..b3433f306eb --- /dev/null +++ b/plugins/repository-gcs/licenses/gson-2.7.jar.sha1 @@ -0,0 +1 @@ +751f548c85fa49f330cecbb1875893f971b33c4e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/jackson-core-asl-LICENSE.txt b/plugins/repository-gcs/licenses/gson-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/jackson-core-asl-LICENSE.txt rename to plugins/repository-gcs/licenses/gson-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/gson-NOTICE.txt b/plugins/repository-gcs/licenses/gson-NOTICE.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/plugins/repository-gcs/licenses/httpclient-4.5.2.jar.sha1 b/plugins/repository-gcs/licenses/httpclient-4.5.2.jar.sha1 new file mode 100644 index 00000000000..6937112a09f --- /dev/null +++ b/plugins/repository-gcs/licenses/httpclient-4.5.2.jar.sha1 @@ -0,0 +1 @@ +733db77aa8d9b2d68015189df76ab06304406e50 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/old/httpclient-LICENSE.txt b/plugins/repository-gcs/licenses/httpclient-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/old/httpclient-LICENSE.txt rename to plugins/repository-gcs/licenses/httpclient-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/old/httpclient-NOTICE.txt b/plugins/repository-gcs/licenses/httpclient-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/old/httpclient-NOTICE.txt rename to plugins/repository-gcs/licenses/httpclient-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/httpcore-4.4.5.jar.sha1 b/plugins/repository-gcs/licenses/httpcore-4.4.5.jar.sha1 new file mode 100644 index 00000000000..58172660174 --- /dev/null +++ b/plugins/repository-gcs/licenses/httpcore-4.4.5.jar.sha1 @@ -0,0 +1 @@ +e7501a1b34325abb00d17dde96150604a0658b54 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/jackson-LICENSE b/plugins/repository-gcs/licenses/jackson-LICENSE new file mode 100644 index 00000000000..f5f45d26a49 --- /dev/null +++ b/plugins/repository-gcs/licenses/jackson-LICENSE @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/plugins/repository-gcs/licenses/jackson-NOTICE b/plugins/repository-gcs/licenses/jackson-NOTICE new file mode 100644 index 00000000000..4c976b7b4cc --- /dev/null +++ b/plugins/repository-gcs/licenses/jackson-NOTICE @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/plugins/repository-gcs/licenses/jackson-core-asl-1.9.11.jar.sha1 b/plugins/repository-gcs/licenses/jackson-core-asl-1.9.11.jar.sha1 new file mode 100644 index 00000000000..ed70030899a --- /dev/null +++ b/plugins/repository-gcs/licenses/jackson-core-asl-1.9.11.jar.sha1 @@ -0,0 +1 @@ +e32303ef8bd18a5c9272780d49b81c95e05ddf43 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/jackson-core-asl-1.9.13.jar.sha1 b/plugins/repository-gcs/licenses/jackson-core-asl-1.9.13.jar.sha1 deleted file mode 100644 index c5016bf828d..00000000000 --- a/plugins/repository-gcs/licenses/jackson-core-asl-1.9.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3c304d70f42f832e0a86d45bd437f692129299a4 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/old/google-LICENSE.txt b/plugins/repository-gcs/licenses/old/google-LICENSE.txt deleted file mode 100644 index 980a15ac24e..00000000000 --- a/plugins/repository-gcs/licenses/old/google-LICENSE.txt +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/plugins/repository-gcs/licenses/old/google-NOTICE.txt b/plugins/repository-gcs/licenses/old/google-NOTICE.txt deleted file mode 100644 index 8d1c8b69c3f..00000000000 --- a/plugins/repository-gcs/licenses/old/google-NOTICE.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/plugins/repository-gcs/licenses/old/httpcore-LICENSE.txt b/plugins/repository-gcs/licenses/old/httpcore-LICENSE.txt deleted file mode 100644 index 72819a9f06f..00000000000 --- a/plugins/repository-gcs/licenses/old/httpcore-LICENSE.txt +++ /dev/null @@ -1,241 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - -========================================================================= - -This project contains annotations in the package org.apache.http.annotation -which are derived from JCIP-ANNOTATIONS -Copyright (c) 2005 Brian Goetz and Tim Peierls. -See http://www.jcip.net and the Creative Commons Attribution License -(http://creativecommons.org/licenses/by/2.5) -Full text: http://creativecommons.org/licenses/by/2.5/legalcode - -License - -THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED. - -BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE. THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS. - -1. Definitions - - "Collective Work" means a work, such as a periodical issue, anthology or encyclopedia, in which the Work in its entirety in unmodified form, along with a number of other contributions, constituting separate and independent works in themselves, are assembled into a collective whole. A work that constitutes a Collective Work will not be considered a Derivative Work (as defined below) for the purposes of this License. - "Derivative Work" means a work based upon the Work or upon the Work and other pre-existing works, such as a translation, musical arrangement, dramatization, fictionalization, motion picture version, sound recording, art reproduction, abridgment, condensation, or any other form in which the Work may be recast, transformed, or adapted, except that a work that constitutes a Collective Work will not be considered a Derivative Work for the purpose of this License. For the avoidance of doubt, where the Work is a musical composition or sound recording, the synchronization of the Work in timed-relation with a moving image ("synching") will be considered a Derivative Work for the purpose of this License. - "Licensor" means the individual or entity that offers the Work under the terms of this License. - "Original Author" means the individual or entity who created the Work. - "Work" means the copyrightable work of authorship offered under the terms of this License. - "You" means an individual or entity exercising rights under this License who has not previously violated the terms of this License with respect to the Work, or who has received express permission from the Licensor to exercise rights under this License despite a previous violation. - -2. Fair Use Rights. Nothing in this license is intended to reduce, limit, or restrict any rights arising from fair use, first sale or other limitations on the exclusive rights of the copyright owner under copyright law or other applicable laws. - -3. License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable copyright) license to exercise the rights in the Work as stated below: - - to reproduce the Work, to incorporate the Work into one or more Collective Works, and to reproduce the Work as incorporated in the Collective Works; - to create and reproduce Derivative Works; - to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission the Work including as incorporated in Collective Works; - to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission Derivative Works. - - For the avoidance of doubt, where the work is a musical composition: - Performance Royalties Under Blanket Licenses. Licensor waives the exclusive right to collect, whether individually or via a performance rights society (e.g. ASCAP, BMI, SESAC), royalties for the public performance or public digital performance (e.g. webcast) of the Work. - Mechanical Rights and Statutory Royalties. Licensor waives the exclusive right to collect, whether individually or via a music rights agency or designated agent (e.g. Harry Fox Agency), royalties for any phonorecord You create from the Work ("cover version") and distribute, subject to the compulsory license created by 17 USC Section 115 of the US Copyright Act (or the equivalent in other jurisdictions). - Webcasting Rights and Statutory Royalties. For the avoidance of doubt, where the Work is a sound recording, Licensor waives the exclusive right to collect, whether individually or via a performance-rights society (e.g. SoundExchange), royalties for the public digital performance (e.g. webcast) of the Work, subject to the compulsory license created by 17 USC Section 114 of the US Copyright Act (or the equivalent in other jurisdictions). - -The above rights may be exercised in all media and formats whether now known or hereafter devised. The above rights include the right to make such modifications as are technically necessary to exercise the rights in other media and formats. All rights not expressly granted by Licensor are hereby reserved. - -4. Restrictions.The license granted in Section 3 above is expressly made subject to and limited by the following restrictions: - - You may distribute, publicly display, publicly perform, or publicly digitally perform the Work only under the terms of this License, and You must include a copy of, or the Uniform Resource Identifier for, this License with every copy or phonorecord of the Work You distribute, publicly display, publicly perform, or publicly digitally perform. You may not offer or impose any terms on the Work that alter or restrict the terms of this License or the recipients' exercise of the rights granted hereunder. You may not sublicense the Work. You must keep intact all notices that refer to this License and to the disclaimer of warranties. You may not distribute, publicly display, publicly perform, or publicly digitally perform the Work with any technological measures that control access or use of the Work in a manner inconsistent with the terms of this License Agreement. The above applies to the Work as incorporated in a Collective Work, but this does not require the Collective Work apart from the Work itself to be made subject to the terms of this License. If You create a Collective Work, upon notice from any Licensor You must, to the extent practicable, remove from the Collective Work any credit as required by clause 4(b), as requested. If You create a Derivative Work, upon notice from any Licensor You must, to the extent practicable, remove from the Derivative Work any credit as required by clause 4(b), as requested. - If you distribute, publicly display, publicly perform, or publicly digitally perform the Work or any Derivative Works or Collective Works, You must keep intact all copyright notices for the Work and provide, reasonable to the medium or means You are utilizing: (i) the name of the Original Author (or pseudonym, if applicable) if supplied, and/or (ii) if the Original Author and/or Licensor designate another party or parties (e.g. a sponsor institute, publishing entity, journal) for attribution in Licensor's copyright notice, terms of service or by other reasonable means, the name of such party or parties; the title of the Work if supplied; to the extent reasonably practicable, the Uniform Resource Identifier, if any, that Licensor specifies to be associated with the Work, unless such URI does not refer to the copyright notice or licensing information for the Work; and in the case of a Derivative Work, a credit identifying the use of the Work in the Derivative Work (e.g., "French translation of the Work by Original Author," or "Screenplay based on original Work by Original Author"). Such credit may be implemented in any reasonable manner; provided, however, that in the case of a Derivative Work or Collective Work, at a minimum such credit will appear where any other comparable authorship credit appears and in a manner at least as prominent as such other comparable authorship credit. - -5. Representations, Warranties and Disclaimer - -UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU. - -6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -7. Termination - - This License and the rights granted hereunder will terminate automatically upon any breach by You of the terms of this License. Individuals or entities who have received Derivative Works or Collective Works from You under this License, however, will not have their licenses terminated provided such individuals or entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License. - Subject to the above terms and conditions, the license granted here is perpetual (for the duration of the applicable copyright in the Work). Notwithstanding the above, Licensor reserves the right to release the Work under different license terms or to stop distributing the Work at any time; provided, however that any such election will not serve to withdraw this License (or any other license that has been, or is required to be, granted under the terms of this License), and this License will continue in full force and effect unless terminated as stated above. - -8. Miscellaneous - - Each time You distribute or publicly digitally perform the Work or a Collective Work, the Licensor offers to the recipient a license to the Work on the same terms and conditions as the license granted to You under this License. - Each time You distribute or publicly digitally perform a Derivative Work, Licensor offers to the recipient a license to the original Work on the same terms and conditions as the license granted to You under this License. - If any provision of this License is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this License, and without further action by the parties to this agreement, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. - No term or provision of this License shall be deemed waived and no breach consented to unless such waiver or consent shall be in writing and signed by the party to be charged with such waiver or consent. - This License constitutes the entire agreement between the parties with respect to the Work licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual written agreement of the Licensor and You. diff --git a/plugins/repository-gcs/licenses/old/httpcore-NOTICE.txt b/plugins/repository-gcs/licenses/old/httpcore-NOTICE.txt deleted file mode 100644 index c0be50a505e..00000000000 --- a/plugins/repository-gcs/licenses/old/httpcore-NOTICE.txt +++ /dev/null @@ -1,8 +0,0 @@ -Apache HttpComponents Core -Copyright 2005-2014 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - -This project contains annotations derived from JCIP-ANNOTATIONS -Copyright (c) 2005 Brian Goetz and Tim Peierls. See http://www.jcip.net diff --git a/plugins/repository-gcs/licenses/opencensus-api-0.11.1.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-api-0.11.1.jar.sha1 deleted file mode 100644 index 61d8e3b1481..00000000000 --- a/plugins/repository-gcs/licenses/opencensus-api-0.11.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -54689fbf750a7f26e34fa1f1f96b883c53f51486 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/opencensus-api-0.15.0.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-api-0.15.0.jar.sha1 new file mode 100644 index 00000000000..e200e2e24a7 --- /dev/null +++ b/plugins/repository-gcs/licenses/opencensus-api-0.15.0.jar.sha1 @@ -0,0 +1 @@ +9a098392b287d7924660837f4eba0ce252013683 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.11.1.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.11.1.jar.sha1 deleted file mode 100644 index c0b04f0f8cc..00000000000 --- a/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.11.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -82e572b41e81ecf58d0d1e9a3953a05aa8f9c84b \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.15.0.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.15.0.jar.sha1 new file mode 100644 index 00000000000..b642e1ebebd --- /dev/null +++ b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.15.0.jar.sha1 @@ -0,0 +1 @@ +d88690591669d9b5ba6d91d9eac7736e58ccf3da \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-LICENSE.txt b/plugins/repository-gcs/licenses/proto-google-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/proto-google-common-protos-LICENSE.txt rename to plugins/repository-gcs/licenses/proto-google-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/proto-google-NOTICE.txt b/plugins/repository-gcs/licenses/proto-google-NOTICE.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-1.12.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-1.12.0.jar.sha1 new file mode 100644 index 00000000000..47f3c178a68 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-1.12.0.jar.sha1 @@ -0,0 +1 @@ +1140cc74df039deb044ed0e320035e674dc13062 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-1.8.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-1.8.0.jar.sha1 deleted file mode 100644 index 0a2dee4447e..00000000000 --- a/plugins/repository-gcs/licenses/proto-google-common-protos-1.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b3282312ba82536fc9a7778cabfde149a875e877 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-iam-v1-0.12.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-iam-v1-0.12.0.jar.sha1 new file mode 100644 index 00000000000..2bfae3456d4 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-iam-v1-0.12.0.jar.sha1 @@ -0,0 +1 @@ +ea312c0250a5d0a7cdd1b20bc2c3259938b79855 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-LICENSE.txt b/plugins/repository-gcs/licenses/protobuf-LICENSE.txt new file mode 100644 index 00000000000..19b305b0006 --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-LICENSE.txt @@ -0,0 +1,32 @@ +Copyright 2008 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. diff --git a/plugins/repository-gcs/licenses/protobuf-NOTICE.txt b/plugins/repository-gcs/licenses/protobuf-NOTICE.txt new file mode 100644 index 00000000000..19b305b0006 --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-NOTICE.txt @@ -0,0 +1,32 @@ +Copyright 2008 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. diff --git a/plugins/repository-gcs/licenses/protobuf-java-3.6.0.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-3.6.0.jar.sha1 new file mode 100644 index 00000000000..050ebd44c92 --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-java-3.6.0.jar.sha1 @@ -0,0 +1 @@ +5333f7e422744d76840c08a106e28e519fbe3acd \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-java-util-3.6.0.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-util-3.6.0.jar.sha1 new file mode 100644 index 00000000000..cc85974499a --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-java-util-3.6.0.jar.sha1 @@ -0,0 +1 @@ +3680d0042d4fe0b95ada844ff24da0698a7f0773 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/threetenbp-1.3.3.jar.sha1 b/plugins/repository-gcs/licenses/threetenbp-1.3.3.jar.sha1 new file mode 100644 index 00000000000..9273043e145 --- /dev/null +++ b/plugins/repository-gcs/licenses/threetenbp-1.3.3.jar.sha1 @@ -0,0 +1 @@ +3ea31c96676ff12ab56be0b1af6fff61d1a4f1f2 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/threetenbp-1.3.6.jar.sha1 b/plugins/repository-gcs/licenses/threetenbp-1.3.6.jar.sha1 deleted file mode 100644 index 65c16fed4a0..00000000000 --- a/plugins/repository-gcs/licenses/threetenbp-1.3.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -89dcc04a7e028c3c963413a71f950703cf51f057 \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 6debaf5282f..557dcaa5fae 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -582,6 +582,25 @@ thirdPartyAudit.excludes = [ 'com.squareup.okhttp.ResponseBody' ] -if (JavaVersion.current() > JavaVersion.VERSION_1_8) { +if (project.runtimeJavaVersion > JavaVersion.VERSION_1_8) { thirdPartyAudit.excludes += ['javax.xml.bind.annotation.adapters.HexBinaryAdapter'] } + +if (project.runtimeJavaVersion == JavaVersion.VERSION_1_8) { + thirdPartyAudit.excludes += [ + // TODO: Why is this needed ? + 'com.sun.javadoc.AnnotationDesc', + 'com.sun.javadoc.AnnotationTypeDoc', + 'com.sun.javadoc.ClassDoc', + 'com.sun.javadoc.ConstructorDoc', + 'com.sun.javadoc.Doc', + 'com.sun.javadoc.DocErrorReporter', + 'com.sun.javadoc.FieldDoc', + 'com.sun.javadoc.LanguageVersion', + 'com.sun.javadoc.MethodDoc', + 'com.sun.javadoc.PackageDoc', + 'com.sun.javadoc.ProgramElementDoc', + 'com.sun.javadoc.RootDoc', + 'com.sun.tools.doclets.standard.Standard' + ] +} diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 7f0ca209db7..5d248b22caf 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -447,7 +447,7 @@ thirdPartyAudit.excludes = [ ] // jarhell with jdk (intentionally, because jaxb was removed from default modules in java 9) -if (JavaVersion.current() <= JavaVersion.VERSION_1_8) { +if (project.runtimeJavaVersion <= JavaVersion.VERSION_1_8) { thirdPartyAudit.excludes += [ 'javax.xml.bind.Binder', 'javax.xml.bind.ContextFinder$1', diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index 07605bfee29..cb8916b857c 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -62,7 +62,6 @@ thirdPartyAudit.excludes = [ 'io.netty.internal.tcnative.SSLContext', // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) - 'org.bouncycastle.asn1.x500.X500Name', 'org.bouncycastle.cert.X509v3CertificateBuilder', 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', 'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder', @@ -141,4 +140,11 @@ thirdPartyAudit.excludes = [ 'org.conscrypt.BufferAllocator', 'org.conscrypt.Conscrypt', 'org.conscrypt.HandshakeListener' -] \ No newline at end of file +] +if (project.inFipsJvm == false) { + // BouncyCastleFIPS provides this class, so the exclusion is invalid when running CI in + // a FIPS JVM with BouncyCastleFIPS Provider + thirdPartyAudit.excludes += [ + 'org.bouncycastle.asn1.x500.X500Name' + ] +} \ No newline at end of file diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java index 3dcd59cf8e2..17a5c1fb97e 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java @@ -139,7 +139,7 @@ public class HttpReadWriteHandler implements ReadWriteHandler { if (request.decoderResult().isFailure()) { Throwable cause = request.decoderResult().cause(); if (cause instanceof Error) { - ExceptionsHelper.dieOnError(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); transport.incomingRequestError(httpRequest, nioHttpChannel, new Exception(cause)); } else { transport.incomingRequestError(httpRequest, nioHttpChannel, (Exception) cause); diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java index 41cb72aa322..133206e1322 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java @@ -73,7 +73,7 @@ public class NettyAdaptor implements AutoCloseable { closeFuture.await(); if (closeFuture.isSuccess() == false) { Throwable cause = closeFuture.cause(); - ExceptionsHelper.dieOnError(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); throw (Exception) cause; } } @@ -84,7 +84,7 @@ public class NettyAdaptor implements AutoCloseable { listener.accept(null, null); } else { final Throwable cause = f.cause(); - ExceptionsHelper.dieOnError(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); assert cause instanceof Exception; listener.accept(null, (Exception) cause); } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java index 2cdaa4708d1..637bbafff8e 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java @@ -223,7 +223,7 @@ public class NettyListener implements BiConsumer, ChannelPromis biConsumer.accept(null, null); } else { if (cause instanceof Error) { - ExceptionsHelper.dieOnError(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); biConsumer.accept(null, new Exception(cause)); } else { biConsumer.accept(null, (Exception) cause); diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index 47229a0df2f..129f0ada77d 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -21,6 +21,7 @@ package org.elasticsearch.transport.nio; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.recycler.Recycler; @@ -82,7 +83,8 @@ public class NioTransport extends TcpTransport { } @Override - protected NioTcpChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException { + protected NioTcpChannel initiateChannel(DiscoveryNode node, ActionListener connectListener) throws IOException { + InetSocketAddress address = node.getAddress().address(); NioTcpChannel channel = nioGroup.openChannel(address, clientChannelFactory); channel.addConnectListener(ActionListener.toBiConsumer(connectListener)); return channel; diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportLoggingIT.java b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportLoggingIT.java new file mode 100644 index 00000000000..b29df77cae1 --- /dev/null +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportLoggingIT.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.apache.logging.log4j.Level; +import org.elasticsearch.NioIntegTestCase; +import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.transport.TransportLogger; + +@ESIntegTestCase.ClusterScope(numDataNodes = 2) +@TestLogging(value = "org.elasticsearch.transport.TransportLogger:trace") +public class NioTransportLoggingIT extends NioIntegTestCase { + + private MockLogAppender appender; + + public void setUp() throws Exception { + super.setUp(); + appender = new MockLogAppender(); + Loggers.addAppender(Loggers.getLogger(TransportLogger.class), appender); + appender.start(); + } + + public void tearDown() throws Exception { + Loggers.removeAppender(Loggers.getLogger(TransportLogger.class), appender); + appender.stop(); + super.tearDown(); + } + + public void testLoggingHandler() throws IllegalAccessException { + final String writePattern = + ".*\\[length: \\d+" + + ", request id: \\d+" + + ", type: request" + + ", version: .*" + + ", action: cluster:monitor/nodes/hot_threads\\[n\\]\\]" + + " WRITE: \\d+B"; + final MockLogAppender.LoggingExpectation writeExpectation = + new MockLogAppender.PatternSeenEventExcpectation( + "hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, writePattern); + + final String readPattern = + ".*\\[length: \\d+" + + ", request id: \\d+" + + ", type: request" + + ", version: .*" + + ", action: cluster:monitor/nodes/hot_threads\\[n\\]\\]" + + " READ: \\d+B"; + + final MockLogAppender.LoggingExpectation readExpectation = + new MockLogAppender.PatternSeenEventExcpectation( + "hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, readPattern); + + appender.addExpectation(writeExpectation); + appender.addExpectation(readExpectation); + client().admin().cluster().nodesHotThreads(new NodesHotThreadsRequest()).actionGet(); + appender.assertAllExpectationsMatched(); + } +} diff --git a/qa/ccs-unavailable-clusters/build.gradle b/qa/ccs-unavailable-clusters/build.gradle index d9de422bb43..c1f2bc96271 100644 --- a/qa/ccs-unavailable-clusters/build.gradle +++ b/qa/ccs-unavailable-clusters/build.gradle @@ -21,5 +21,5 @@ apply plugin: 'elasticsearch.rest-test' apply plugin: 'elasticsearch.test-with-dependencies' dependencies { - testCompile project(path: ':client:rest-high-level', configuration: 'shadow') + testCompile "org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}" } diff --git a/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java index 6bfa4de8d4a..0c42e4be89a 100644 --- a/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -235,8 +235,8 @@ public class CrossClusterSearchUnavailableClusterIT extends ESRestTestCase { () -> client().performRequest(request)); assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode()); assertThat(responseException.getMessage(), - containsString("Missing required setting [search.remote.remote1.seeds] " + - "for setting [search.remote.remote1.skip_unavailable]")); + containsString("missing required setting [cluster.remote.remote1.seeds] " + + "for setting [cluster.remote.remote1.skip_unavailable]")); } Map settingsMap = new HashMap<>(); @@ -251,8 +251,8 @@ public class CrossClusterSearchUnavailableClusterIT extends ESRestTestCase { ResponseException responseException = expectThrows(ResponseException.class, () -> client().performRequest(request)); assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode()); - assertThat(responseException.getMessage(), containsString("Missing required setting [search.remote.remote1.seeds] " + - "for setting [search.remote.remote1.skip_unavailable]")); + assertThat(responseException.getMessage(), containsString("missing required setting [cluster.remote.remote1.seeds] " + + "for setting [cluster.remote.remote1.skip_unavailable]")); } if (randomBoolean()) { @@ -304,7 +304,7 @@ public class CrossClusterSearchUnavailableClusterIT extends ESRestTestCase { { builder.startObject("persistent"); { - builder.startObject("search.remote.remote1"); + builder.startObject("cluster.remote.remote1"); { for (Map.Entry entry : settings.entrySet()) { builder.field(entry.getKey(), entry.getValue()); diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index 992d3ce71f6..9250122025c 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -90,14 +90,14 @@ public class DieWithDignityIT extends ESRestTestCase { final Iterator it = lines.iterator(); - boolean fatalErrorOnTheNetworkLayer = false; + boolean fatalError = false; boolean fatalErrorInThreadExiting = false; - while (it.hasNext() && (fatalErrorOnTheNetworkLayer == false || fatalErrorInThreadExiting == false)) { + while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) { final String line = it.next(); - if (line.contains("fatal error on the network layer")) { - fatalErrorOnTheNetworkLayer = true; - } else if (line.matches(".*\\[ERROR\\]\\[o.e.b.ElasticsearchUncaughtExceptionHandler\\] \\[node-0\\]" + if (line.matches(".*\\[ERROR\\]\\[o\\.e\\.ExceptionsHelper\\s*\\] \\[node-0\\] fatal error")) { + fatalError = true; + } else if (line.matches(".*\\[ERROR\\]\\[o\\.e\\.b\\.ElasticsearchUncaughtExceptionHandler\\] \\[node-0\\]" + " fatal error in thread \\[Thread-\\d+\\], exiting$")) { fatalErrorInThreadExiting = true; assertTrue(it.hasNext()); @@ -105,7 +105,7 @@ public class DieWithDignityIT extends ESRestTestCase { } } - assertTrue(fatalErrorOnTheNetworkLayer); + assertTrue(fatalError); assertTrue(fatalErrorInThreadExiting); } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java index ede61da1369..a06d7ad5445 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java @@ -357,7 +357,7 @@ public class EvilLoggerTests extends ESTestCase { } } - public void testNoNodeNameWarning() throws IOException, UserException { + public void testNoNodeNameInPatternWarning() throws IOException, UserException { setupLogging("no_node_name"); final String path = @@ -368,7 +368,7 @@ public class EvilLoggerTests extends ESTestCase { assertThat(events.size(), equalTo(2)); final String location = "org.elasticsearch.common.logging.LogConfigurator"; // the first message is a warning for unsupported configuration files - assertLogLine(events.get(0), Level.WARN, location, "\\[null\\] Some logging configurations have %marker but don't " + assertLogLine(events.get(0), Level.WARN, location, "\\[unknown\\] Some logging configurations have %marker but don't " + "have %node_name. We will automatically add %node_name to the pattern to ease the migration for users " + "who customize log4j2.properties but will stop this behavior in 7.0. You should manually replace " + "`%node_name` with `\\[%node_name\\]%marker ` in these locations:"); diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java index 57d4a363cc8..642694856a6 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java @@ -52,7 +52,7 @@ public class NodeEnvironmentEvilTests extends ESTestCase { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) .putList(Environment.PATH_DATA_SETTING.getKey(), tempPaths).build(); IOException ioException = expectThrows(IOException.class, () -> { - new NodeEnvironment(build, TestEnvironment.newEnvironment(build)); + new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {}); }); assertTrue(ioException.getMessage(), ioException.getMessage().startsWith(path.toString())); } @@ -72,7 +72,7 @@ public class NodeEnvironmentEvilTests extends ESTestCase { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) .putList(Environment.PATH_DATA_SETTING.getKey(), tempPaths).build(); IOException ioException = expectThrows(IOException.class, () -> { - new NodeEnvironment(build, TestEnvironment.newEnvironment(build)); + new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {}); }); assertTrue(ioException.getMessage(), ioException.getMessage().startsWith("failed to test writes in data directory")); } @@ -97,7 +97,7 @@ public class NodeEnvironmentEvilTests extends ESTestCase { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) .putList(Environment.PATH_DATA_SETTING.getKey(), tempPaths).build(); IOException ioException = expectThrows(IOException.class, () -> { - new NodeEnvironment(build, TestEnvironment.newEnvironment(build)); + new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {}); }); assertTrue(ioException.getMessage(), ioException.getMessage().startsWith("failed to test writes in data directory")); } diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 0b936e44e5b..3ee5c07308f 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -68,10 +68,8 @@ import static org.hamcrest.Matchers.notNullValue; * version is started with the same data directories and then this is rerun * with {@code tests.is_old_cluster} set to {@code false}. */ -public class FullClusterRestartIT extends ESRestTestCase { - private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster")); - private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); - private final boolean supportsLenientBooleans = oldClusterVersion.before(Version.V_6_0_0_alpha1); +public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { + private final boolean supportsLenientBooleans = getOldClusterVersion().before(Version.V_6_0_0_alpha1); private static final Version VERSION_5_1_0_UNRELEASED = Version.fromString("5.1.0"); private String index; @@ -81,29 +79,9 @@ public class FullClusterRestartIT extends ESRestTestCase { index = getTestName().toLowerCase(Locale.ROOT); } - @Override - protected boolean preserveIndicesUponCompletion() { - return true; - } - - @Override - protected boolean preserveSnapshotsUponCompletion() { - return true; - } - - @Override - protected boolean preserveReposUponCompletion() { - return true; - } - - @Override - protected boolean preserveTemplatesUponCompletion() { - return true; - } - public void testSearch() throws Exception { int count; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); { @@ -169,7 +147,7 @@ public class FullClusterRestartIT extends ESRestTestCase { } public void testNewReplicasWork() throws Exception { - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); { @@ -237,10 +215,10 @@ public class FullClusterRestartIT extends ESRestTestCase { */ public void testAliasWithBadName() throws Exception { assumeTrue("Can only test bad alias name if old cluster is on 5.1.0 or before", - oldClusterVersion.before(VERSION_5_1_0_UNRELEASED)); + getOldClusterVersion().before(VERSION_5_1_0_UNRELEASED)); int count; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); { @@ -291,7 +269,7 @@ public class FullClusterRestartIT extends ESRestTestCase { Map searchRsp = entityAsMap(client().performRequest(new Request("GET", "/" + aliasName + "/_search"))); int totalHits = (int) XContentMapValues.extractValue("hits.total", searchRsp); assertEquals(count, totalHits); - if (runningAgainstOldCluster == false) { + if (isRunningAgainstOldCluster() == false) { // We can remove the alias. Response response = client().performRequest(new Request("DELETE", "/" + index + "/_alias/" + aliasName)); assertEquals(200, response.getStatusLine().getStatusCode()); @@ -302,7 +280,7 @@ public class FullClusterRestartIT extends ESRestTestCase { } public void testClusterState() throws Exception { - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); mappingsAndSettings.field("template", index); @@ -341,14 +319,14 @@ public class FullClusterRestartIT extends ESRestTestCase { assertEquals("0", numberOfReplicas); Version version = Version.fromId(Integer.valueOf((String) XContentMapValues.extractValue("metadata.indices." + index + ".settings.index.version.created", clusterState))); - assertEquals(oldClusterVersion, version); + assertEquals(getOldClusterVersion(), version); } public void testShrink() throws IOException { String shrunkenIndex = index + "_shrunk"; int numDocs; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); { @@ -413,7 +391,7 @@ public class FullClusterRestartIT extends ESRestTestCase { public void testShrinkAfterUpgrade() throws IOException { String shrunkenIndex = index + "_shrunk"; int numDocs; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); { @@ -465,7 +443,7 @@ public class FullClusterRestartIT extends ESRestTestCase { int totalHits = (int) XContentMapValues.extractValue("hits.total", response); assertEquals(numDocs, totalHits); - if (runningAgainstOldCluster == false) { + if (isRunningAgainstOldCluster() == false) { response = entityAsMap(client().performRequest(new Request("GET", "/" + shrunkenIndex + "/_search"))); assertNoFailures(response); totalShards = (int) XContentMapValues.extractValue("_shards.total", response); @@ -477,6 +455,62 @@ public class FullClusterRestartIT extends ESRestTestCase { } } + /** + * Test upgrading after a rollover. Specifically: + *
    + *
  1. Create an index with a write alias + *
  2. Write some documents to the write alias + *
  3. Roll over the index + *
  4. Make sure the document count is correct + *
  5. Upgrade + *
  6. Write some more documents to the write alias + *
  7. Make sure the document count is correct + *
+ */ + public void testRollover() throws IOException { + if (isRunningAgainstOldCluster()) { + Request createIndex = new Request("PUT", "/" + index + "-000001"); + createIndex.setJsonEntity("{" + + " \"aliases\": {" + + " \"" + index + "_write\": {}" + + " }" + + "}"); + client().performRequest(createIndex); + } + + int bulkCount = 10; + StringBuilder bulk = new StringBuilder(); + for (int i = 0; i < bulkCount; i++) { + bulk.append("{\"index\":{}}\n"); + bulk.append("{\"test\":\"test\"}\n"); + } + Request bulkRequest = new Request("POST", "/" + index + "_write/doc/_bulk"); + bulkRequest.setJsonEntity(bulk.toString()); + bulkRequest.addParameter("refresh", ""); + assertThat(EntityUtils.toString(client().performRequest(bulkRequest).getEntity()), containsString("\"errors\":false")); + + if (isRunningAgainstOldCluster()) { + Request rolloverRequest = new Request("POST", "/" + index + "_write/_rollover"); + rolloverRequest.setJsonEntity("{" + + " \"conditions\": {" + + " \"max_docs\": 5" + + " }" + + "}"); + client().performRequest(rolloverRequest); + + assertThat(EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices?v")).getEntity()), + containsString("testrollover-000002")); + } + + Request countRequest = new Request("POST", "/" + index + "-*/_search"); + countRequest.addParameter("size", "0"); + Map count = entityAsMap(client().performRequest(countRequest)); + assertNoFailures(count); + + int expectedCount = bulkCount + (isRunningAgainstOldCluster() ? 0 : bulkCount); + assertEquals(expectedCount, (int) XContentMapValues.extractValue("hits.total", count)); + } + void assertBasicSearchWorks(int count) throws IOException { logger.info("--> testing basic search"); { @@ -632,7 +666,7 @@ public class FullClusterRestartIT extends ESRestTestCase { String docLocation = "/" + index + "/doc/1"; String doc = "{\"test\": \"test\"}"; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { Request createDoc = new Request("PUT", docLocation); createDoc.setJsonEntity(doc); client().performRequest(createDoc); @@ -647,7 +681,7 @@ public class FullClusterRestartIT extends ESRestTestCase { public void testEmptyShard() throws IOException { final String index = "test_empty_shard"; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { Settings.Builder settings = Settings.builder() .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) @@ -670,7 +704,7 @@ public class FullClusterRestartIT extends ESRestTestCase { public void testRecovery() throws Exception { int count; boolean shouldHaveTranslog; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { count = between(200, 300); /* We've had bugs in the past where we couldn't restore * an index without a translog so we randomize whether @@ -716,7 +750,7 @@ public class FullClusterRestartIT extends ESRestTestCase { String countResponse = toStr(client().performRequest(countRequest)); assertThat(countResponse, containsString("\"total\":" + count)); - if (false == runningAgainstOldCluster) { + if (false == isRunningAgainstOldCluster()) { boolean restoredFromTranslog = false; boolean foundPrimary = false; Request recoveryRequest = new Request("GET", "/_cat/recovery/" + index); @@ -744,7 +778,7 @@ public class FullClusterRestartIT extends ESRestTestCase { assertEquals("mismatch while checking for translog recovery\n" + recoveryResponse, shouldHaveTranslog, restoredFromTranslog); String currentLuceneVersion = Version.CURRENT.luceneVersion.toString(); - String bwcLuceneVersion = oldClusterVersion.luceneVersion.toString(); + String bwcLuceneVersion = getOldClusterVersion().luceneVersion.toString(); if (shouldHaveTranslog && false == currentLuceneVersion.equals(bwcLuceneVersion)) { int numCurrentVersion = 0; int numBwcVersion = 0; @@ -784,7 +818,7 @@ public class FullClusterRestartIT extends ESRestTestCase { */ public void testSnapshotRestore() throws IOException { int count; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { // Create the index count = between(200, 300); indexRandomDocuments(count, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject()); @@ -804,7 +838,7 @@ public class FullClusterRestartIT extends ESRestTestCase { // Stick a routing attribute into to cluster settings so we can see it after the restore Request addRoutingSettings = new Request("PUT", "/_cluster/settings"); addRoutingSettings.setJsonEntity( - "{\"persistent\": {\"cluster.routing.allocation.exclude.test_attr\": \"" + oldClusterVersion + "\"}}"); + "{\"persistent\": {\"cluster.routing.allocation.exclude.test_attr\": \"" + getOldClusterVersion() + "\"}}"); client().performRequest(addRoutingSettings); // Stick a template into the cluster so we can see it after the restore @@ -829,7 +863,7 @@ public class FullClusterRestartIT extends ESRestTestCase { templateBuilder.startObject("alias2"); { templateBuilder.startObject("filter"); { templateBuilder.startObject("term"); { - templateBuilder.field("version", runningAgainstOldCluster ? oldClusterVersion : Version.CURRENT); + templateBuilder.field("version", isRunningAgainstOldCluster() ? getOldClusterVersion() : Version.CURRENT); } templateBuilder.endObject(); } @@ -842,7 +876,7 @@ public class FullClusterRestartIT extends ESRestTestCase { createTemplateRequest.setJsonEntity(Strings.toString(templateBuilder)); client().performRequest(createTemplateRequest); - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { // Create the repo XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); { repoConfig.field("type", "fs"); @@ -858,19 +892,19 @@ public class FullClusterRestartIT extends ESRestTestCase { client().performRequest(createRepoRequest); } - Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + (runningAgainstOldCluster ? "old_snap" : "new_snap")); + Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + (isRunningAgainstOldCluster() ? "old_snap" : "new_snap")); createSnapshot.addParameter("wait_for_completion", "true"); createSnapshot.setJsonEntity("{\"indices\": \"" + index + "\"}"); client().performRequest(createSnapshot); - checkSnapshot("old_snap", count, oldClusterVersion); - if (false == runningAgainstOldCluster) { + checkSnapshot("old_snap", count, getOldClusterVersion()); + if (false == isRunningAgainstOldCluster()) { checkSnapshot("new_snap", count, Version.CURRENT); } } public void testHistoryUUIDIsAdded() throws Exception { - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); { @@ -908,9 +942,6 @@ public class FullClusterRestartIT extends ESRestTestCase { private void checkSnapshot(String snapshotName, int count, Version tookOnVersion) throws IOException { // Check the snapshot metadata, especially the version Request listSnapshotRequest = new Request("GET", "/_snapshot/repo/" + snapshotName); - if (false == (runningAgainstOldCluster && oldClusterVersion.before(Version.V_5_5_0))) { - listSnapshotRequest.addParameter("verbose", "true"); - } Map listSnapshotResponse = entityAsMap(client().performRequest(listSnapshotRequest)); assertEquals(singletonList(snapshotName), XContentMapValues.extractValue("snapshots.snapshot", listSnapshotResponse)); assertEquals(singletonList("SUCCESS"), XContentMapValues.extractValue("snapshots.state", listSnapshotResponse)); @@ -950,7 +981,7 @@ public class FullClusterRestartIT extends ESRestTestCase { Request writeToRestoredRequest = new Request("POST", "/restored_" + index + "/doc/_bulk"); writeToRestoredRequest.addParameter("refresh", "true"); writeToRestoredRequest.setJsonEntity(bulk.toString()); - client().performRequest(writeToRestoredRequest); + assertThat(EntityUtils.toString(client().performRequest(writeToRestoredRequest).getEntity()), containsString("\"errors\":false")); // And count to make sure the add worked // Make sure search finds all documents @@ -969,7 +1000,7 @@ public class FullClusterRestartIT extends ESRestTestCase { Map expectedClusterSettings = new HashMap<>(); expectedClusterSettings.put("transient", emptyMap()); expectedClusterSettings.put("persistent", - singletonMap("cluster.routing.allocation.exclude.test_attr", oldClusterVersion.toString())); + singletonMap("cluster.routing.allocation.exclude.test_attr", getOldClusterVersion().toString())); if (expectedClusterSettings.equals(clusterSettingsResponse) == false) { NotEqualMessageBuilder builder = new NotEqualMessageBuilder(); builder.compareMaps(clusterSettingsResponse, expectedClusterSettings); @@ -979,7 +1010,7 @@ public class FullClusterRestartIT extends ESRestTestCase { // Check that the template was restored successfully Map getTemplateResponse = entityAsMap(client().performRequest(new Request("GET", "/_template/test_template"))); Map expectedTemplate = new HashMap<>(); - if (runningAgainstOldCluster && oldClusterVersion.before(Version.V_6_0_0_beta1)) { + if (isRunningAgainstOldCluster() && getOldClusterVersion().before(Version.V_6_0_0_beta1)) { expectedTemplate.put("template", "evil_*"); } else { expectedTemplate.put("index_patterns", singletonList("evil_*")); diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java index 49a9dec870e..2b7250f86b7 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -20,10 +20,8 @@ package org.elasticsearch.upgrades; import org.apache.http.util.EntityUtils; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; -import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; @@ -48,7 +46,6 @@ import org.elasticsearch.index.query.SpanTermQueryBuilder; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.test.rest.ESRestTestCase; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -71,7 +68,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; * The queries to test are specified in json format, which turns out to work because we tend break here rarely. If the * json format of a query being tested here then feel free to change this. */ -public class QueryBuilderBWCIT extends ESRestTestCase { +public class QueryBuilderBWCIT extends AbstractFullClusterRestartTestCase { private static final List CANDIDATES = new ArrayList<>(); @@ -145,32 +142,9 @@ public class QueryBuilderBWCIT extends ESRestTestCase { CANDIDATES.add(new Object[]{"{\"query\": {" + querySource + "}}", expectedQb}); } - private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); - private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster")); - - @Override - protected boolean preserveIndicesUponCompletion() { - return true; - } - - @Override - protected boolean preserveSnapshotsUponCompletion() { - return true; - } - - @Override - protected boolean preserveReposUponCompletion() { - return true; - } - - @Override - protected boolean preserveTemplatesUponCompletion() { - return true; - } - public void testQueryBuilderBWC() throws Exception { String index = "queries"; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); { @@ -230,7 +204,7 @@ public class QueryBuilderBWCIT extends ESRestTestCase { byte[] qbSource = Base64.getDecoder().decode(queryBuilderStr); try (InputStream in = new ByteArrayInputStream(qbSource, 0, qbSource.length)) { try (StreamInput input = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(in), registry)) { - input.setVersion(oldClusterVersion); + input.setVersion(getOldClusterVersion()); QueryBuilder queryBuilder = input.readNamedWriteable(QueryBuilder.class); assert in.read() == -1; assertEquals(expectedQueryBuilder, queryBuilder); diff --git a/qa/multi-cluster-search/build.gradle b/qa/multi-cluster-search/build.gradle index 782e83fbb34..3012be985bc 100644 --- a/qa/multi-cluster-search/build.gradle +++ b/qa/multi-cluster-search/build.gradle @@ -28,7 +28,7 @@ task remoteClusterTest(type: RestIntegTestTask) { remoteClusterTestCluster { numNodes = 2 clusterName = 'remote-cluster' - setting 'search.remote.connect', false + setting 'cluster.remote.connect', false } remoteClusterTestRunner { @@ -39,9 +39,9 @@ task mixedClusterTest(type: RestIntegTestTask) {} mixedClusterTestCluster { dependsOn remoteClusterTestRunner - setting 'search.remote.my_remote_cluster.seeds', "\"${-> remoteClusterTest.nodes.get(0).transportUri()}\"" - setting 'search.remote.connections_per_cluster', 1 - setting 'search.remote.connect', true + setting 'cluster.remote.my_remote_cluster.seeds', "\"${-> remoteClusterTest.nodes.get(0).transportUri()}\"" + setting 'cluster.remote.connections_per_cluster', 1 + setting 'cluster.remote.connect', true } mixedClusterTestRunner { diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml index 8617ecc1fe2..e2b15bc0d5d 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml @@ -99,16 +99,16 @@ cluster.get_settings: include_defaults: true - - set: { defaults.search.remote.my_remote_cluster.seeds.0: remote_ip } + - set: { defaults.cluster.remote.my_remote_cluster.seeds.0: remote_ip } - do: cluster.put_settings: flat_settings: true body: transient: - search.remote.test_remote_cluster.seeds: $remote_ip + cluster.remote.test_remote_cluster.seeds: $remote_ip - - match: {transient: {search.remote.test_remote_cluster.seeds: $remote_ip}} + - match: {transient: {cluster.remote.test_remote_cluster.seeds: $remote_ip}} - do: search: @@ -124,16 +124,16 @@ cluster.get_settings: include_defaults: true - - set: { defaults.search.remote.my_remote_cluster.seeds.0: remote_ip } + - set: { defaults.cluster.remote.my_remote_cluster.seeds.0: remote_ip } - do: cluster.put_settings: flat_settings: true body: transient: - search.remote.test_remote_cluster.seeds: $remote_ip + cluster.remote.test_remote_cluster.seeds: $remote_ip - - match: {transient: {search.remote.test_remote_cluster.seeds: $remote_ip}} + - match: {transient: {cluster.remote.test_remote_cluster.seeds: $remote_ip}} - do: search: diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml index b4487e4fefe..45cc570ecea 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml @@ -14,16 +14,16 @@ cluster.get_settings: include_defaults: true - - set: { defaults.search.remote.my_remote_cluster.seeds.0: remote_ip } + - set: { defaults.cluster.remote.my_remote_cluster.seeds.0: remote_ip } - do: cluster.put_settings: flat_settings: true body: transient: - search.remote.test_remote_cluster.seeds: $remote_ip + cluster.remote.test_remote_cluster.seeds: $remote_ip - - match: {transient: {search.remote.test_remote_cluster.seeds: $remote_ip}} + - match: {transient: {cluster.remote.test_remote_cluster.seeds: $remote_ip}} # we do another search here since this will enforce the connection to be established # otherwise the cluster might not have been connected yet. @@ -56,7 +56,7 @@ cluster.put_settings: body: transient: - search.remote.test_remote_cluster.seeds: null + cluster.remote.test_remote_cluster.seeds: null --- "skip_unavailable is returned as part of _remote/info response": @@ -68,16 +68,16 @@ cluster.get_settings: include_defaults: true - - set: { defaults.search.remote.my_remote_cluster.seeds.0: remote_ip } + - set: { defaults.cluster.remote.my_remote_cluster.seeds.0: remote_ip } - do: cluster.put_settings: flat_settings: true body: transient: - search.remote.remote1.seeds: $remote_ip + cluster.remote.remote1.seeds: $remote_ip - - match: {transient: {search.remote.remote1.seeds: $remote_ip}} + - match: {transient: {cluster.remote.remote1.seeds: $remote_ip}} - do: cluster.remote_info: {} @@ -87,9 +87,9 @@ cluster.put_settings: body: transient: - search.remote.remote1.skip_unavailable: true + cluster.remote.remote1.skip_unavailable: true - - is_true: transient.search.remote.remote1.skip_unavailable + - is_true: transient.cluster.remote.remote1.skip_unavailable - do: cluster.remote_info: {} @@ -100,9 +100,9 @@ cluster.put_settings: body: transient: - search.remote.remote1.skip_unavailable: false + cluster.remote.remote1.skip_unavailable: false - - is_false: transient.search.remote.remote1.skip_unavailable + - is_false: transient.cluster.remote.remote1.skip_unavailable - do: cluster.remote_info: {} @@ -113,7 +113,7 @@ cluster.put_settings: body: transient: - search.remote.remote1.skip_unavailable: null + cluster.remote.remote1.skip_unavailable: null - match: {transient: {}} @@ -126,5 +126,5 @@ cluster.put_settings: body: transient: - search.remote.remote1.seeds: null - search.remote.remote1.skip_unavailable: null + cluster.remote.remote1.seeds: null + cluster.remote.remote1.skip_unavailable: null diff --git a/qa/unconfigured-node-name/build.gradle b/qa/unconfigured-node-name/build.gradle new file mode 100644 index 00000000000..dcc3e7c6a16 --- /dev/null +++ b/qa/unconfigured-node-name/build.gradle @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +integTestCluster { + setting 'node.name', null +} + +integTestRunner { + systemProperty 'tests.logfile', + "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }.log" +} diff --git a/qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/NodeNameInLogsIT.java b/qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/NodeNameInLogsIT.java new file mode 100644 index 00000000000..9f36a600b68 --- /dev/null +++ b/qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/NodeNameInLogsIT.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.unconfigured_node_name; + +import org.elasticsearch.bootstrap.BootstrapInfo; +import org.elasticsearch.common.logging.NodeNameInLogsIntegTestCase; + +import java.io.IOException; +import java.io.BufferedReader; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivilegedAction; + +public class NodeNameInLogsIT extends NodeNameInLogsIntegTestCase { + @Override + protected BufferedReader openReader(Path logFile) throws IOException { + assumeTrue("We log a line without the node name if we can't install the seccomp filters", + BootstrapInfo.isSystemCallFilterInstalled()); + return AccessController.doPrivileged((PrivilegedAction) () -> { + try { + return Files.newBufferedReader(logFile, StandardCharsets.UTF_8); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } + + public void testDummy() { + /* Dummy test case so that when we run this test on a platform that + * does not support our syscall filters and we skip the test above + * we don't fail the entire test run because we skipped all the tests. + */ + } +} diff --git a/qa/unconfigured-node-name/src/test/resources/plugin-security.policy b/qa/unconfigured-node-name/src/test/resources/plugin-security.policy new file mode 100644 index 00000000000..d0d865c4ede --- /dev/null +++ b/qa/unconfigured-node-name/src/test/resources/plugin-security.policy @@ -0,0 +1,4 @@ +grant { + // Needed to read the log file + permission java.io.FilePermission "${tests.logfile}", "read"; +}; diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index 4a0c9146962..4c3b48cbac9 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -69,9 +67,7 @@ esvagrant { } forbiddenApisMain { - signaturesURLs = [ - PrecommitTasks.getResource('/forbidden/jdk-signatures.txt') - ] + replaceSignatureFiles 'jdk-signatures' } // we don't have additional tests for the tests themselves diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json index 090c429fd82..13281a2a232 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json @@ -33,6 +33,11 @@ "type" : "number", "description" : "A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on it's rewrite method ie. if date filters are mandatory to match but the shard bounds and the query are disjoint.", "default" : 128 + }, + "max_concurrent_shard_requests" : { + "type" : "number", + "description" : "The number of concurrent shard requests each sub search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests", + "default" : "The default grows with the number of nodes in the cluster but is at most 256." } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.reload_secure_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.reload_secure_settings.json new file mode 100644 index 00000000000..487beaba865 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.reload_secure_settings.json @@ -0,0 +1,23 @@ +{ + "nodes.reload_secure_settings": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-reload-secure-settings.html", + "methods": ["POST"], + "url": { + "path": "/_nodes/reload_secure_settings", + "paths": ["/_nodes/reload_secure_settings", "/_nodes/{node_id}/reload_secure_settings"], + "parts": { + "node_id": { + "type": "list", + "description": "A comma-separated list of node IDs to span the reload/reinit call. Should stay empty because reloading usually involves all cluster nodes." + } + }, + "params": { + "timeout": { + "type" : "time", + "description" : "Explicit operation timeout" + } + } + }, + "body": null + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml index 536e2bfaf94..fb884ddfca2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml @@ -61,3 +61,35 @@ setup: - match: { responses.3.error.root_cause.0.reason: "/no.such.index/" } - match: { responses.3.error.root_cause.0.index: index_3 } - match: { responses.4.hits.total: 4 } + +--- +"Least impact smoke test": +# only passing these parameters to make sure they are consumed + - do: + max_concurrent_shard_requests: 1 + max_concurrent_searches: 1 + msearch: + body: + - index: index_* + - query: + match: {foo: foo} + - index: index_2 + - query: + match_all: {} + - index: index_1 + - query: + match: {foo: foo} + - index: index_3 + - query: + match_all: {} + - type: test + - query: + match_all: {} + + - match: { responses.0.hits.total: 2 } + - match: { responses.1.hits.total: 1 } + - match: { responses.2.hits.total: 1 } + - match: { responses.3.error.root_cause.0.type: index_not_found_exception } + - match: { responses.3.error.root_cause.0.reason: "/no.such.index/" } + - match: { responses.3.error.root_cause.0.index: index_3 } + - match: { responses.4.hits.total: 4 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.reload_secure_settings/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.reload_secure_settings/10_basic.yml new file mode 100644 index 00000000000..0a4cf0d64a0 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.reload_secure_settings/10_basic.yml @@ -0,0 +1,8 @@ +--- +"node_reload_secure_settings test": + + - do: + nodes.reload_secure_settings: {} + + - is_true: nodes + - is_true: cluster_name diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/10_basic.yml index 5ecc357e0e1..6ab18146bba 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/10_basic.yml @@ -233,3 +233,51 @@ query: match_all: {} size: 0 + +--- +"Scroll max_score is null": + - skip: + version: " - 6.99.99" + reason: max_score was set to 0 rather than null before 7.0 + + - do: + indices.create: + index: test_scroll + - do: + index: + index: test_scroll + type: test + id: 42 + body: { foo: 1 } + + - do: + index: + index: test_scroll + type: test + id: 43 + body: { foo: 2 } + + - do: + indices.refresh: {} + + - do: + search: + index: test_scroll + size: 1 + scroll: 1m + sort: foo + body: + query: + match_all: {} + + - set: {_scroll_id: scroll_id} + - length: {hits.hits: 1 } + - match: { hits.max_score: null } + + - do: + scroll: + scroll_id: $scroll_id + scroll: 1m + + - length: {hits.hits: 1 } + - match: { hits.max_score: null } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/100_stored_fields.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/100_stored_fields.yml index f39b4dbd3f5..92910a4f1f9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/100_stored_fields.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/100_stored_fields.yml @@ -39,6 +39,5 @@ setup: stored_fields: "_none_" - is_false: hits.hits.0._id - - is_false: hits.hits.0._type - is_false: hits.hits.0._source diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml index 521dc4c1cac..dad05cce4eb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml @@ -244,6 +244,23 @@ setup: - match: { hits.total: 6 } - length: { hits.hits: 0 } +--- +"no hits and inner_hits max_score null": + + - skip: + version: " - 6.99.99" + reason: max_score was set to 0 rather than null before 7.0 + + - do: + search: + index: test + body: + size: 0 + collapse: { field: numeric_group, inner_hits: { name: sub_hits, size: 1} } + sort: [{ sort: desc }] + + - match: { hits.max_score: null } + --- "field collapsing and multiple inner_hits": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml index dc6b130b289..c63dee2e211 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml @@ -128,7 +128,6 @@ setup: - match: { hits.total: 2 } - match: { aggregations.some_agg.doc_count: 3 } - - do: search: pre_filter_shard_size: 1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml index dfe0b6825cd..62770e2915d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml @@ -39,6 +39,7 @@ setup: df: text - match: {hits.total: 1} + - match: {hits.max_score: 1} - match: {hits.hits.0._score: 1} - do: @@ -52,6 +53,7 @@ setup: boost: 2 - match: {hits.total: 1} + - match: {hits.max_score: 2} - match: {hits.hits.0._score: 2} - do: @@ -61,6 +63,7 @@ setup: df: text - match: {hits.total: 1} + - match: {hits.max_score: 1} - match: {hits.hits.0._score: 1} --- diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml index 24920580c45..4d7ee91bef5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml @@ -29,6 +29,7 @@ query_weight: 5 rescore_query_weight: 10 + - match: {hits.max_score: 15} - match: { hits.hits.0._score: 15 } - match: { hits.hits.0._explanation.value: 15 } diff --git a/server/build.gradle b/server/build.gradle index b22a93a702c..c01fb92b050 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -58,13 +58,13 @@ if (!isEclipse && !isIdea) { sourceCompatibility = 9 targetCompatibility = 9 } - - /* Enable this when forbiddenapis was updated to 2.6. - * See: https://github.com/elastic/elasticsearch/issues/29292 + forbiddenApisJava9 { - targetCompatibility = 9 + if (project.runtimeJavaVersion < JavaVersion.VERSION_1_9) { + targetCompatibility = JavaVersion.VERSION_1_9 + javaHome = project.java9Home + } } - */ jar { metaInf { @@ -304,17 +304,7 @@ thirdPartyAudit.excludes = [ 'com.google.common.geometry.S2LatLng', ] -if (JavaVersion.current() <= JavaVersion.VERSION_1_8) { - // Used by Log4J 2.11.1 - thirdPartyAudit.excludes += [ - 'java.io.ObjectInputFilter', - 'java.io.ObjectInputFilter$Config', - 'java.io.ObjectInputFilter$FilterInfo', - 'java.io.ObjectInputFilter$Status' - ] -} - -if (JavaVersion.current() > JavaVersion.VERSION_1_8) { +if (project.runtimeJavaVersion > JavaVersion.VERSION_1_8) { thirdPartyAudit.excludes += ['javax.xml.bind.DatatypeConverter'] } @@ -341,16 +331,3 @@ if (isEclipse == false || project.path == ":server-tests") { check.dependsOn integTest integTest.mustRunAfter test } - -// TODO: remove these compatibility tests in 7.0 -additionalTest('testScriptedMetricAggParamsV6Compatibility') { - include '**/ScriptedMetricAggregatorAggStateV6CompatTests.class' - include '**/InternalScriptedMetricAggStateV6CompatTests.class' - systemProperty 'es.aggregations.enable_scripted_metric_agg_param', 'true' -} - -test { - // these are tested explicitly in separate test tasks - exclude '**/ScriptedMetricAggregatorAggStateV6CompatTests.class' - exclude '**/InternalScriptedMetricAggStateV6CompatTests.class' -} diff --git a/server/licenses/lucene-analyzers-common-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-analyzers-common-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 2cbf3968762..00000000000 --- a/server/licenses/lucene-analyzers-common-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c547b30525ad80d0ceeaa40c2d3a901c7e76fd46 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..72f7319e6af --- /dev/null +++ b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +a22f1c6749ca4a3fbc9b330161a8ea3301cac8de \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-backward-codecs-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 9e2473361f0..00000000000 --- a/server/licenses/lucene-backward-codecs-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9c327295d54d5abd2684e00c3aefe58aa1caace7 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..f4bf99b4a03 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +41ce415b93d75662cc2e790d09120bc0234d6b1b \ No newline at end of file diff --git a/server/licenses/lucene-core-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-core-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index fdedaf3fc57..00000000000 --- a/server/licenses/lucene-core-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -73dd7703a94ec2357581f65ee7c1c4d618ff310f \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..50a21f5c504 --- /dev/null +++ b/server/licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +06c1e4fa838807059d27aaf5405cfdfe7303369c \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-grouping-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 4e555692b0f..00000000000 --- a/server/licenses/lucene-grouping-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1c3802fa30990a1758f2df19d17fe2c95fc45870 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-grouping-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..76bdfa1c6c4 --- /dev/null +++ b/server/licenses/lucene-grouping-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +5b0a019a938deb58160647e7640b348bb99c10a8 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-highlighter-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 73b6c15f332..00000000000 --- a/server/licenses/lucene-highlighter-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d7abdbb7900d7e6a76c391d8be07217c0d882ca \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..017225c0e46 --- /dev/null +++ b/server/licenses/lucene-highlighter-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +4d813f3ba0ddd56bac728edb88ed8875e6acfd18 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-join-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 23414b8e8e1..00000000000 --- a/server/licenses/lucene-join-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -011f78ae9d9a386fcf20ceea29ba30e75fb512e8 \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-join-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..29cdbbfe69f --- /dev/null +++ b/server/licenses/lucene-join-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +00c7e20b6a35ebecc875dd52bfb324967c5555d6 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-memory-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index d227ebaf463..00000000000 --- a/server/licenses/lucene-memory-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c3dd461a7cebdcacc77304660218513e10f89adb \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-memory-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..49087293afa --- /dev/null +++ b/server/licenses/lucene-memory-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +e4dbff54a0befdc7d67c0f39890586c220df718e \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-misc-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index a892f3a2272..00000000000 --- a/server/licenses/lucene-misc-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d63101181708d78eccc441b0d1193dd91d1a0bf1 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-misc-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..3c12235dff6 --- /dev/null +++ b/server/licenses/lucene-misc-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +74d17f6bdf1fa4d499f02904432aa3b1024bde88 \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-queries-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 5d0fead48cb..00000000000 --- a/server/licenses/lucene-queries-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -22e56fbd44d6a47d7dddbdda3c17ce22ad0a6680 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-queries-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..a423deb397d --- /dev/null +++ b/server/licenses/lucene-queries-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +bec78be38f777765146c35f65e247909563d6814 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-queryparser-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 8be3d6447b0..00000000000 --- a/server/licenses/lucene-queryparser-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -36b38a1d71045f5bee5dc40526f8d57084dbdc00 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..79195ed1d5e --- /dev/null +++ b/server/licenses/lucene-queryparser-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +74b76f8fed44400bc2a5d938ca2611a97b4d7a7c \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-sandbox-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 6d968f5400c..00000000000 --- a/server/licenses/lucene-sandbox-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21eb8b111bcb94f4abb8c6402dfd10f51ecc0b38 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..d5cd94b7fe5 --- /dev/null +++ b/server/licenses/lucene-sandbox-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +2f65fa728b3bc924db6538f4c3caf2fcd25451cf \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-spatial-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index b6aec2eae1d..00000000000 --- a/server/licenses/lucene-spatial-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d60081c5641ed21aea82d5d0976b40e1f184c8e5 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-spatial-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..76857b72f01 --- /dev/null +++ b/server/licenses/lucene-spatial-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +916a91f0cab2d3684707c59e9adca7b3030b2c66 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-spatial-extras-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 6999baccc89..00000000000 --- a/server/licenses/lucene-spatial-extras-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2d42b373546aa8923d25e4e9a673dd186064f9bd \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..7ab84df992b --- /dev/null +++ b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +eb3e630d6013e41838fb277943ce921f256f1c61 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-spatial3d-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index b866b198556..00000000000 --- a/server/licenses/lucene-spatial3d-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f31607959e5a2ed84ab2d9a007a3f76e9a2d38c \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..d793f4c54d9 --- /dev/null +++ b/server/licenses/lucene-spatial3d-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +fa10ff14eab2f579cff2f0fa33c9c7f3b24daf12 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-suggest-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 55e1c5990de..00000000000 --- a/server/licenses/lucene-suggest-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f7619348f0619867c52f4801531c70358f49873a \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-suggest-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..0ea0c2fb573 --- /dev/null +++ b/server/licenses/lucene-suggest-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +3dd65ca6612b4f98530847b99ab348fd83055fdf \ No newline at end of file diff --git a/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java b/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java index 3cc16ce9320..63db15b2ee1 100644 --- a/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java @@ -26,6 +26,7 @@ import org.apache.lucene.search.ConstantScoreScorer; import org.apache.lucene.search.ConstantScoreWeight; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; @@ -58,7 +59,7 @@ public final class BinaryDocValuesRangeQuery extends Query { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { return new ConstantScoreWeight(this, boost) { @Override diff --git a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java index cd5da674b8e..dd3ac992475 100644 --- a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java @@ -22,7 +22,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; -import org.apache.lucene.index.TermContext; +import org.apache.lucene.index.TermStates; import org.apache.lucene.index.TermState; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; @@ -84,10 +84,10 @@ public abstract class BlendedTermQuery extends Query { return rewritten; } IndexReaderContext context = reader.getContext(); - TermContext[] ctx = new TermContext[terms.length]; + TermStates[] ctx = new TermStates[terms.length]; int[] docFreqs = new int[ctx.length]; for (int i = 0; i < terms.length; i++) { - ctx[i] = TermContext.build(context, terms[i]); + ctx[i] = TermStates.build(context, terms[i], true); docFreqs[i] = ctx[i].docFreq(); } @@ -96,16 +96,16 @@ public abstract class BlendedTermQuery extends Query { return topLevelQuery(terms, ctx, docFreqs, maxDoc); } - protected abstract Query topLevelQuery(Term[] terms, TermContext[] ctx, int[] docFreqs, int maxDoc); + protected abstract Query topLevelQuery(Term[] terms, TermStates[] ctx, int[] docFreqs, int maxDoc); - protected void blend(final TermContext[] contexts, int maxDoc, IndexReader reader) throws IOException { + protected void blend(final TermStates[] contexts, int maxDoc, IndexReader reader) throws IOException { if (contexts.length <= 1) { return; } int max = 0; long minSumTTF = Long.MAX_VALUE; for (int i = 0; i < contexts.length; i++) { - TermContext ctx = contexts[i]; + TermStates ctx = contexts[i]; int df = ctx.docFreq(); // we use the max here since it's the only "true" estimation we can make here // at least max(df) documents have that term. Sum or Averages don't seem @@ -155,7 +155,7 @@ public abstract class BlendedTermQuery extends Query { // the more popular (more frequent) fields // that acts as a tie breaker for (int i : tieBreak) { - TermContext ctx = contexts[i]; + TermStates ctx = contexts[i]; if (ctx.docFreq() == 0) { break; } @@ -183,12 +183,12 @@ public abstract class BlendedTermQuery extends Query { } } - private TermContext adjustTTF(IndexReaderContext readerContext, TermContext termContext, long sumTTF) { + private TermStates adjustTTF(IndexReaderContext readerContext, TermStates termContext, long sumTTF) throws IOException { assert termContext.wasBuiltFor(readerContext); if (sumTTF == -1 && termContext.totalTermFreq() == -1) { return termContext; } - TermContext newTermContext = new TermContext(readerContext); + TermStates newTermContext = new TermStates(readerContext); List leaves = readerContext.leaves(); final int len; if (leaves == null) { @@ -199,7 +199,7 @@ public abstract class BlendedTermQuery extends Query { int df = termContext.docFreq(); long ttf = sumTTF; for (int i = 0; i < len; i++) { - TermState termState = termContext.get(i); + TermState termState = termContext.get(leaves.get(i)); if (termState == null) { continue; } @@ -210,7 +210,7 @@ public abstract class BlendedTermQuery extends Query { return newTermContext; } - private static TermContext adjustDF(IndexReaderContext readerContext, TermContext ctx, int newDocFreq) { + private static TermStates adjustDF(IndexReaderContext readerContext, TermStates ctx, int newDocFreq) throws IOException { assert ctx.wasBuiltFor(readerContext); // Use a value of ttf that is consistent with the doc freq (ie. gte) long newTTF; @@ -226,9 +226,9 @@ public abstract class BlendedTermQuery extends Query { } else { len = leaves.size(); } - TermContext newCtx = new TermContext(readerContext); + TermStates newCtx = new TermStates(readerContext); for (int i = 0; i < len; ++i) { - TermState termState = ctx.get(i); + TermState termState = ctx.get(leaves.get(i)); if (termState == null) { continue; } @@ -299,7 +299,7 @@ public abstract class BlendedTermQuery extends Query { public static BlendedTermQuery commonTermsBlendedQuery(Term[] terms, final float[] boosts, final float maxTermFrequency) { return new BlendedTermQuery(terms, boosts) { @Override - protected Query topLevelQuery(Term[] terms, TermContext[] ctx, int[] docFreqs, int maxDoc) { + protected Query topLevelQuery(Term[] terms, TermStates[] ctx, int[] docFreqs, int maxDoc) { BooleanQuery.Builder highBuilder = new BooleanQuery.Builder(); BooleanQuery.Builder lowBuilder = new BooleanQuery.Builder(); for (int i = 0; i < terms.length; i++) { @@ -342,7 +342,7 @@ public abstract class BlendedTermQuery extends Query { public static BlendedTermQuery dismaxBlendedQuery(Term[] terms, final float[] boosts, final float tieBreakerMultiplier) { return new BlendedTermQuery(terms, boosts) { @Override - protected Query topLevelQuery(Term[] terms, TermContext[] ctx, int[] docFreqs, int maxDoc) { + protected Query topLevelQuery(Term[] terms, TermStates[] ctx, int[] docFreqs, int maxDoc) { List queries = new ArrayList<>(ctx.length); for (int i = 0; i < terms.length; i++) { Query query = new TermQuery(terms[i], ctx[i]); diff --git a/server/src/main/java/org/apache/lucene/queries/MinDocQuery.java b/server/src/main/java/org/apache/lucene/queries/MinDocQuery.java index 0fed8316a05..b9a001b6e73 100644 --- a/server/src/main/java/org/apache/lucene/queries/MinDocQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/MinDocQuery.java @@ -26,6 +26,7 @@ import org.apache.lucene.search.ConstantScoreWeight; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; @@ -76,7 +77,7 @@ public final class MinDocQuery extends Query { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { if (readerId == null) { throw new IllegalStateException("Rewrite first"); } else if (Objects.equals(searcher.getIndexReader().getContext().id(), readerId) == false) { diff --git a/server/src/main/java/org/apache/lucene/queries/SearchAfterSortedDocQuery.java b/server/src/main/java/org/apache/lucene/queries/SearchAfterSortedDocQuery.java index 5da0e618752..2c436f02272 100644 --- a/server/src/main/java/org/apache/lucene/queries/SearchAfterSortedDocQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/SearchAfterSortedDocQuery.java @@ -23,16 +23,17 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.ConstantScoreScorer; import org.apache.lucene.search.ConstantScoreWeight; import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.search.EarlyTerminatingSortingCollector; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafFieldComparator; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.Weight; +import org.elasticsearch.common.lucene.Lucene; import java.io.IOException; import java.util.Arrays; @@ -53,7 +54,7 @@ public class SearchAfterSortedDocQuery extends Query { throw new IllegalArgumentException("after doc has " + after.fields.length + " value(s) but sort has " + sort.getSort().length + "."); } - this.sort = sort; + this.sort = Objects.requireNonNull(sort); this.after = after; int numFields = sort.getSort().length; this.fieldComparators = new FieldComparator[numFields]; @@ -70,12 +71,12 @@ public class SearchAfterSortedDocQuery extends Query { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { return new ConstantScoreWeight(this, 1.0f) { @Override public Scorer scorer(LeafReaderContext context) throws IOException { Sort segmentSort = context.reader().getMetaData().getSort(); - if (EarlyTerminatingSortingCollector.canEarlyTerminate(sort, segmentSort) == false) { + if (segmentSort == null || Lucene.canEarlyTerminate(sort, segmentSort) == false) { throw new IOException("search sort :[" + sort.getSort() + "] does not match the index sort:[" + segmentSort + "]"); } final int afterDoc = after.doc - context.docBase; diff --git a/server/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java b/server/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java index c5362cbf858..4dba67abdeb 100644 --- a/server/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java +++ b/server/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java @@ -24,6 +24,7 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopFieldDocs; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.util.PriorityQueue; import java.util.ArrayList; @@ -40,9 +41,9 @@ public final class CollapseTopFieldDocs extends TopFieldDocs { /** The collapse value for each top doc */ public final Object[] collapseValues; - public CollapseTopFieldDocs(String field, long totalHits, ScoreDoc[] scoreDocs, - SortField[] sortFields, Object[] values, float maxScore) { - super(totalHits, scoreDocs, sortFields, maxScore); + public CollapseTopFieldDocs(String field, TotalHits totalHits, ScoreDoc[] scoreDocs, + SortField[] sortFields, Object[] values) { + super(totalHits, scoreDocs, sortFields); this.field = field; this.collapseValues = values; } @@ -172,23 +173,23 @@ public final class CollapseTopFieldDocs extends TopFieldDocs { long totalHitCount = 0; int availHitCount = 0; - float maxScore = Float.MIN_VALUE; + TotalHits.Relation totalHitsRelation = TotalHits.Relation.EQUAL_TO; for(int shardIDX=0;shardIDX 0) { availHitCount += shard.scoreDocs.length; queue.add(new ShardRef(shardIDX, setShardIndex == false)); - maxScore = Math.max(maxScore, shard.getMaxScore()); } } - if (availHitCount == 0) { - maxScore = Float.NaN; - } - final ScoreDoc[] hits; final Object[] values; if (availHitCount <= start) { @@ -237,6 +238,7 @@ public final class CollapseTopFieldDocs extends TopFieldDocs { hits = hitList.toArray(new ScoreDoc[0]); values = collapseList.toArray(new Object[0]); } - return new CollapseTopFieldDocs(collapseField, totalHitCount, hits, sort.getSort(), values, maxScore); + TotalHits totalHits = new TotalHits(totalHitCount, totalHitsRelation); + return new CollapseTopFieldDocs(collapseField, totalHits, hits, sort.getSort(), values); } } diff --git a/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java b/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java index fedda3ead59..e28d8990c91 100644 --- a/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java +++ b/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java @@ -19,10 +19,12 @@ package org.apache.lucene.search.grouping; import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TotalHits; import java.io.IOException; import java.util.Collection; @@ -34,40 +36,34 @@ import static org.apache.lucene.search.SortField.Type.SCORE; * A collector that groups documents based on field values and returns {@link CollapseTopFieldDocs} * output. The collapsing is done in a single pass by selecting only the top sorted document per collapse key. * The value used for the collapse key of each group can be found in {@link CollapseTopFieldDocs#collapseValues}. + * + * TODO: If the sort is based on score we should propagate the mininum competitive score when orderedGroups is full. + * This is safe for collapsing since the group sort is the same as the query sort. */ public final class CollapsingTopDocsCollector extends FirstPassGroupingCollector { protected final String collapseField; protected final Sort sort; - protected Scorer scorer; + protected Scorable scorer; private int totalHitCount; - private float maxScore; - private final boolean trackMaxScore; - CollapsingTopDocsCollector(GroupSelector groupSelector, String collapseField, Sort sort, - int topN, boolean trackMaxScore) { + CollapsingTopDocsCollector(GroupSelector groupSelector, String collapseField, Sort sort, int topN) { super(groupSelector, sort, topN); this.collapseField = collapseField; - this.trackMaxScore = trackMaxScore; - if (trackMaxScore) { - maxScore = Float.NEGATIVE_INFINITY; - } else { - maxScore = Float.NaN; - } this.sort = sort; } /** - * Transform {@link FirstPassGroupingCollector#getTopGroups(int, boolean)} output in + * Transform {@link FirstPassGroupingCollector#getTopGroups(int)} output in * {@link CollapseTopFieldDocs}. The collapsing needs only one pass so we can get the final top docs at the end * of the first pass. */ public CollapseTopFieldDocs getTopDocs() throws IOException { - Collection> groups = super.getTopGroups(0, true); + Collection> groups = super.getTopGroups(0); if (groups == null) { - return new CollapseTopFieldDocs(collapseField, totalHitCount, new ScoreDoc[0], - sort.getSort(), new Object[0], Float.NaN); + TotalHits totalHits = new TotalHits(0, TotalHits.Relation.EQUAL_TO); + return new CollapseTopFieldDocs(collapseField, totalHits, new ScoreDoc[0], sort.getSort(), new Object[0]); } FieldDoc[] docs = new FieldDoc[groups.size()]; Object[] collapseValues = new Object[groups.size()]; @@ -92,20 +88,21 @@ public final class CollapsingTopDocsCollector extends FirstPassGroupingCollec collapseValues[pos] = group.groupValue; pos++; } - return new CollapseTopFieldDocs(collapseField, totalHitCount, docs, sort.getSort(), - collapseValues, maxScore); + TotalHits totalHits = new TotalHits(totalHitCount, TotalHits.Relation.EQUAL_TO); + return new CollapseTopFieldDocs(collapseField, totalHits, docs, sort.getSort(), collapseValues); } @Override - public boolean needsScores() { - if (super.needsScores() == false) { - return trackMaxScore; + public ScoreMode scoreMode() { + if (super.scoreMode().needsScores()) { + return ScoreMode.COMPLETE; + } else { + return ScoreMode.COMPLETE_NO_SCORES; } - return true; } @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { super.setScorer(scorer); this.scorer = scorer; } @@ -113,9 +110,6 @@ public final class CollapsingTopDocsCollector extends FirstPassGroupingCollec @Override public void collect(int doc) throws IOException { super.collect(doc); - if (trackMaxScore) { - maxScore = Math.max(maxScore, scorer.score()); - } totalHitCount++; } @@ -134,9 +128,9 @@ public final class CollapsingTopDocsCollector extends FirstPassGroupingCollec * @param topN How many top groups to keep. */ public static CollapsingTopDocsCollector createNumeric(String collapseField, Sort sort, - int topN, boolean trackMaxScore) { + int topN) { return new CollapsingTopDocsCollector<>(new CollapsingDocValuesSource.Numeric(collapseField), - collapseField, sort, topN, trackMaxScore); + collapseField, sort, topN); } /** @@ -153,8 +147,8 @@ public final class CollapsingTopDocsCollector extends FirstPassGroupingCollec * @param topN How many top groups to keep. */ public static CollapsingTopDocsCollector createKeyword(String collapseField, Sort sort, - int topN, boolean trackMaxScore) { + int topN) { return new CollapsingTopDocsCollector<>(new CollapsingDocValuesSource.Keyword(collapseField), - collapseField, sort, topN, trackMaxScore); + collapseField, sort, topN); } } diff --git a/server/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java b/server/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java index 45ee7becc98..d9bf9613cba 100644 --- a/server/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java +++ b/server/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java @@ -48,6 +48,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.function.Predicate; /** * Subclass of the {@link UnifiedHighlighter} that works for a single field in a single document. @@ -136,15 +137,16 @@ public class CustomUnifiedHighlighter extends UnifiedHighlighter { @Override protected FieldHighlighter getFieldHighlighter(String field, Query query, Set allTerms, int maxPassages) { - BytesRef[] terms = filterExtractedTerms(getFieldMatcher(field), allTerms); + Predicate fieldMatcher = getFieldMatcher(field); + BytesRef[] terms = filterExtractedTerms(fieldMatcher, allTerms); Set highlightFlags = getFlags(field); PhraseHelper phraseHelper = getPhraseHelper(field, query, highlightFlags); CharacterRunAutomaton[] automata = getAutomata(field, query, highlightFlags); OffsetSource offsetSource = getOptimizedOffsetSource(field, terms, phraseHelper, automata); BreakIterator breakIterator = new SplittingBreakIterator(getBreakIterator(field), UnifiedHighlighter.MULTIVAL_SEP_CHAR); - FieldOffsetStrategy strategy = - getOffsetStrategy(offsetSource, field, terms, phraseHelper, automata, highlightFlags); + UHComponents components = new UHComponents(field, fieldMatcher, query, terms, phraseHelper, automata, highlightFlags); + FieldOffsetStrategy strategy = getOffsetStrategy(offsetSource, components); return new CustomFieldHighlighter(field, strategy, breakIteratorLocale, breakIterator, getScorer(field), maxPassages, (noMatchSize > 0 ? 1 : 0), getFormatter(field), noMatchSize, fieldValue); } diff --git a/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java b/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java index 6b670953ecb..16073abfc00 100644 --- a/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java +++ b/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java @@ -22,7 +22,6 @@ package org.apache.lucene.search.vectorhighlight; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.queries.BlendedTermQuery; -import org.apache.lucene.queries.BoostingQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MultiPhraseQuery; @@ -74,12 +73,11 @@ public class CustomFieldQuery extends FieldQuery { } else if (sourceQuery instanceof BlendedTermQuery) { final BlendedTermQuery blendedTermQuery = (BlendedTermQuery) sourceQuery; flatten(blendedTermQuery.rewrite(reader), reader, flatQueries, boost); - } else if (sourceQuery instanceof BoostingQuery) { - BoostingQuery boostingQuery = (BoostingQuery) sourceQuery; - //flatten positive query with query boost - flatten(boostingQuery.getMatch(), reader, flatQueries, boost); - //flatten negative query with negative boost - flatten(boostingQuery.getContext(), reader, flatQueries, boostingQuery.getBoost()); + } else if (sourceQuery instanceof org.apache.lucene.queries.function.FunctionScoreQuery) { + org.apache.lucene.queries.function.FunctionScoreQuery funcScoreQuery = + (org.apache.lucene.queries.function.FunctionScoreQuery) sourceQuery; + //flatten query with query boost + flatten(funcScoreQuery.getWrappedQuery(), reader, flatQueries, boost); } else if (sourceQuery instanceof SynonymQuery) { // SynonymQuery should be handled by the parent class directly. // This statement should be removed when https://issues.apache.org/jira/browse/LUCENE-7484 is merged. diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 0c5ee331515..3f985f0995c 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -44,7 +44,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; @@ -137,17 +136,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte super(in.readOptionalString(), in.readException()); readStackTrace(this, in); headers.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString)); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - metadata.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString)); - } else { - for (Iterator>> iterator = headers.entrySet().iterator(); iterator.hasNext(); ) { - Map.Entry> header = iterator.next(); - if (header.getKey().startsWith("es.")) { - metadata.put(header.getKey(), header.getValue()); - iterator.remove(); - } - } - } + metadata.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString)); } /** @@ -287,15 +276,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte out.writeOptionalString(this.getMessage()); out.writeException(this.getCause()); writeStackTraces(this, out); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeMapOfLists(headers, StreamOutput::writeString, StreamOutput::writeString); - out.writeMapOfLists(metadata, StreamOutput::writeString, StreamOutput::writeString); - } else { - Map> finalHeaders = new HashMap<>(headers.size() + metadata.size()); - finalHeaders.putAll(headers); - finalHeaders.putAll(metadata); - out.writeMapOfLists(finalHeaders, StreamOutput::writeString, StreamOutput::writeString); - } + out.writeMapOfLists(headers, StreamOutput::writeString, StreamOutput::writeString); + out.writeMapOfLists(metadata, StreamOutput::writeString, StreamOutput::writeString); } public static ElasticsearchException readException(StreamInput input, int id) throws IOException { @@ -1018,11 +1000,11 @@ public class ElasticsearchException extends RuntimeException implements ToXConte STATUS_EXCEPTION(org.elasticsearch.ElasticsearchStatusException.class, org.elasticsearch.ElasticsearchStatusException::new, 145, UNKNOWN_VERSION_ADDED), TASK_CANCELLED_EXCEPTION(org.elasticsearch.tasks.TaskCancelledException.class, - org.elasticsearch.tasks.TaskCancelledException::new, 146, Version.V_5_1_1), + org.elasticsearch.tasks.TaskCancelledException::new, 146, UNKNOWN_VERSION_ADDED), SHARD_LOCK_OBTAIN_FAILED_EXCEPTION(org.elasticsearch.env.ShardLockObtainFailedException.class, - org.elasticsearch.env.ShardLockObtainFailedException::new, 147, Version.V_5_0_2), + org.elasticsearch.env.ShardLockObtainFailedException::new, 147, UNKNOWN_VERSION_ADDED), UNKNOWN_NAMED_OBJECT_EXCEPTION(org.elasticsearch.common.xcontent.UnknownNamedObjectException.class, - org.elasticsearch.common.xcontent.UnknownNamedObjectException::new, 148, Version.V_5_2_0), + org.elasticsearch.common.xcontent.UnknownNamedObjectException::new, 148, UNKNOWN_VERSION_ADDED), TOO_MANY_BUCKETS_EXCEPTION(MultiBucketConsumerService.TooManyBucketsException.class, MultiBucketConsumerService.TooManyBucketsException::new, 149, Version.V_7_0_0_alpha1), COORDINATION_STATE_REJECTED_EXCEPTION(org.elasticsearch.cluster.coordination.CoordinationStateRejectedException.class, diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index 59eb8b60dad..9f756666217 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -136,42 +136,6 @@ public final class ExceptionsHelper { return Arrays.stream(stackTrace).skip(1).map(e -> "\tat " + e).collect(Collectors.joining("\n")); } - static final int MAX_ITERATIONS = 1024; - - /** - * Unwrap the specified throwable looking for any suppressed errors or errors as a root cause of the specified throwable. - * - * @param cause the root throwable - * - * @return an optional error if one is found suppressed or a root cause in the tree rooted at the specified throwable - */ - public static Optional maybeError(final Throwable cause, final Logger logger) { - // early terminate if the cause is already an error - if (cause instanceof Error) { - return Optional.of((Error) cause); - } - - final Queue queue = new LinkedList<>(); - queue.add(cause); - int iterations = 0; - while (!queue.isEmpty()) { - iterations++; - if (iterations > MAX_ITERATIONS) { - logger.warn("giving up looking for fatal errors", cause); - break; - } - final Throwable current = queue.remove(); - if (current instanceof Error) { - return Optional.of((Error) current); - } - Collections.addAll(queue, current.getSuppressed()); - if (current.getCause() != null) { - queue.add(current.getCause()); - } - } - return Optional.empty(); - } - /** * Rethrows the first exception in the list and adds all remaining to the suppressed list. * If the given list is empty no exception is thrown @@ -243,20 +207,57 @@ public final class ExceptionsHelper { return true; } + static final int MAX_ITERATIONS = 1024; + + /** + * Unwrap the specified throwable looking for any suppressed errors or errors as a root cause of the specified throwable. + * + * @param cause the root throwable + * @return an optional error if one is found suppressed or a root cause in the tree rooted at the specified throwable + */ + public static Optional maybeError(final Throwable cause, final Logger logger) { + // early terminate if the cause is already an error + if (cause instanceof Error) { + return Optional.of((Error) cause); + } + + final Queue queue = new LinkedList<>(); + queue.add(cause); + int iterations = 0; + while (queue.isEmpty() == false) { + iterations++; + // this is a guard against deeply nested or circular chains of exceptions + if (iterations > MAX_ITERATIONS) { + logger.warn("giving up looking for fatal errors", cause); + break; + } + final Throwable current = queue.remove(); + if (current instanceof Error) { + return Optional.of((Error) current); + } + Collections.addAll(queue, current.getSuppressed()); + if (current.getCause() != null) { + queue.add(current.getCause()); + } + } + return Optional.empty(); + } + /** * If the specified cause is an unrecoverable error, this method will rethrow the cause on a separate thread so that it can not be - * caught and bubbles up to the uncaught exception handler. + * caught and bubbles up to the uncaught exception handler. Note that the cause tree is examined for any {@link Error}. See + * {@link #maybeError(Throwable, Logger)} for the semantics. * - * @param throwable the throwable to test + * @param throwable the throwable to possibly throw on another thread */ - public static void dieOnError(Throwable throwable) { - final Optional maybeError = ExceptionsHelper.maybeError(throwable, logger); - if (maybeError.isPresent()) { + public static void maybeDieOnAnotherThread(final Throwable throwable) { + ExceptionsHelper.maybeError(throwable, logger).ifPresent(error -> { /* - * Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, Netty wraps too many - * invocations of user-code in try/catch blocks that swallow all throwables. This means that a rethrow here will not bubble up - * to where we want it to. So, we fork a thread and throw the exception from there where Netty can not get to it. We do not wrap - * the exception so as to not lose the original cause during exit. + * Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, sometimes the stack + * contains statements that catch any throwable (e.g., Netty, and the JDK futures framework). This means that a rethrow here + * will not bubble up to where we want it to. So, we fork a thread and throw the exception from there where we are sure the + * stack does not contain statements that catch any throwable. We do not wrap the exception so as to not lose the original cause + * during exit. */ try { // try to log the current stack trace @@ -264,12 +265,12 @@ public final class ExceptionsHelper { logger.error("fatal error\n{}", formatted); } finally { new Thread( - () -> { - throw maybeError.get(); - }) - .start(); + () -> { + throw error; + }) + .start(); } - } + }); } /** diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index a815a9711d0..01738930b4b 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -43,85 +43,8 @@ public class Version implements Comparable, ToXContentFragment { * values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99 * indicating a release the (internal) format of the id is there so we can easily do after/before checks on the id */ - public static final int V_5_0_0_alpha1_ID = 5000001; - public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); - public static final int V_5_0_0_alpha2_ID = 5000002; - public static final Version V_5_0_0_alpha2 = new Version(V_5_0_0_alpha2_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); - public static final int V_5_0_0_alpha3_ID = 5000003; - public static final Version V_5_0_0_alpha3 = new Version(V_5_0_0_alpha3_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); - public static final int V_5_0_0_alpha4_ID = 5000004; - public static final Version V_5_0_0_alpha4 = new Version(V_5_0_0_alpha4_ID, org.apache.lucene.util.Version.LUCENE_6_1_0); - public static final int V_5_0_0_alpha5_ID = 5000005; - public static final Version V_5_0_0_alpha5 = new Version(V_5_0_0_alpha5_ID, org.apache.lucene.util.Version.LUCENE_6_1_0); - public static final int V_5_0_0_beta1_ID = 5000026; - public static final Version V_5_0_0_beta1 = new Version(V_5_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); - public static final int V_5_0_0_rc1_ID = 5000051; - public static final Version V_5_0_0_rc1 = new Version(V_5_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); - public static final int V_5_0_0_ID = 5000099; - public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); - public static final int V_5_0_1_ID = 5000199; - public static final Version V_5_0_1 = new Version(V_5_0_1_ID, org.apache.lucene.util.Version.LUCENE_6_2_1); - public static final int V_5_0_2_ID = 5000299; - public static final Version V_5_0_2 = new Version(V_5_0_2_ID, org.apache.lucene.util.Version.LUCENE_6_2_1); - // no version constant for 5.1.0 due to inadvertent release - public static final int V_5_1_1_ID = 5010199; - public static final Version V_5_1_1 = new Version(V_5_1_1_ID, org.apache.lucene.util.Version.LUCENE_6_3_0); - public static final int V_5_1_2_ID = 5010299; - public static final Version V_5_1_2 = new Version(V_5_1_2_ID, org.apache.lucene.util.Version.LUCENE_6_3_0); - public static final int V_5_2_0_ID = 5020099; - public static final Version V_5_2_0 = new Version(V_5_2_0_ID, org.apache.lucene.util.Version.LUCENE_6_4_0); - public static final int V_5_2_1_ID = 5020199; - public static final Version V_5_2_1 = new Version(V_5_2_1_ID, org.apache.lucene.util.Version.LUCENE_6_4_1); - public static final int V_5_2_2_ID = 5020299; - public static final Version V_5_2_2 = new Version(V_5_2_2_ID, org.apache.lucene.util.Version.LUCENE_6_4_1); - public static final int V_5_3_0_ID = 5030099; - public static final Version V_5_3_0 = new Version(V_5_3_0_ID, org.apache.lucene.util.Version.LUCENE_6_4_1); - public static final int V_5_3_1_ID = 5030199; - public static final Version V_5_3_1 = new Version(V_5_3_1_ID, org.apache.lucene.util.Version.LUCENE_6_4_2); - public static final int V_5_3_2_ID = 5030299; - public static final Version V_5_3_2 = new Version(V_5_3_2_ID, org.apache.lucene.util.Version.LUCENE_6_4_2); - public static final int V_5_3_3_ID = 5030399; - public static final Version V_5_3_3 = new Version(V_5_3_3_ID, org.apache.lucene.util.Version.LUCENE_6_4_2); - public static final int V_5_4_0_ID = 5040099; - public static final Version V_5_4_0 = new Version(V_5_4_0_ID, org.apache.lucene.util.Version.LUCENE_6_5_0); - public static final int V_5_4_1_ID = 5040199; - public static final Version V_5_4_1 = new Version(V_5_4_1_ID, org.apache.lucene.util.Version.LUCENE_6_5_1); - public static final int V_5_4_2_ID = 5040299; - public static final Version V_5_4_2 = new Version(V_5_4_2_ID, org.apache.lucene.util.Version.LUCENE_6_5_1); - public static final int V_5_4_3_ID = 5040399; - public static final Version V_5_4_3 = new Version(V_5_4_3_ID, org.apache.lucene.util.Version.LUCENE_6_5_1); - public static final int V_5_5_0_ID = 5050099; - public static final Version V_5_5_0 = new Version(V_5_5_0_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); - public static final int V_5_5_1_ID = 5050199; - public static final Version V_5_5_1 = new Version(V_5_5_1_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); - public static final int V_5_5_2_ID = 5050299; - public static final Version V_5_5_2 = new Version(V_5_5_2_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); - public static final int V_5_5_3_ID = 5050399; - public static final Version V_5_5_3 = new Version(V_5_5_3_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); - public static final int V_5_6_0_ID = 5060099; - public static final Version V_5_6_0 = new Version(V_5_6_0_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); - public static final int V_5_6_1_ID = 5060199; - public static final Version V_5_6_1 = new Version(V_5_6_1_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_2_ID = 5060299; - public static final Version V_5_6_2 = new Version(V_5_6_2_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_3_ID = 5060399; - public static final Version V_5_6_3 = new Version(V_5_6_3_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_4_ID = 5060499; - public static final Version V_5_6_4 = new Version(V_5_6_4_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_5_ID = 5060599; - public static final Version V_5_6_5 = new Version(V_5_6_5_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_6_ID = 5060699; - public static final Version V_5_6_6 = new Version(V_5_6_6_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_7_ID = 5060799; - public static final Version V_5_6_7 = new Version(V_5_6_7_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_8_ID = 5060899; - public static final Version V_5_6_8 = new Version(V_5_6_8_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_9_ID = 5060999; - public static final Version V_5_6_9 = new Version(V_5_6_9_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_10_ID = 5061099; - public static final Version V_5_6_10 = new Version(V_5_6_10_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_11_ID = 5061199; - public static final Version V_5_6_11 = new Version(V_5_6_11_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); + public static final int V_EMPTY_ID = 0; + public static final Version V_EMPTY = new Version(V_EMPTY_ID, org.apache.lucene.util.Version.LATEST); public static final int V_6_0_0_alpha1_ID = 6000001; public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0); @@ -174,15 +97,15 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_3_1 = new Version(V_6_3_1_ID, org.apache.lucene.util.Version.LUCENE_7_3_1); public static final int V_6_3_2_ID = 6030299; public static final Version V_6_3_2 = new Version(V_6_3_2_ID, org.apache.lucene.util.Version.LUCENE_7_3_1); - public static final int V_6_3_3_ID = 6030399; - public static final Version V_6_3_3 = new Version(V_6_3_3_ID, org.apache.lucene.util.Version.LUCENE_7_3_1); public static final int V_6_4_0_ID = 6040099; public static final Version V_6_4_0 = new Version(V_6_4_0_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); + public static final int V_6_4_1_ID = 6040199; + public static final Version V_6_4_1 = new Version(V_6_4_1_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); public static final int V_6_5_0_ID = 6050099; public static final Version V_6_5_0 = new Version(V_6_5_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); public static final int V_7_0_0_alpha1_ID = 7000001; public static final Version V_7_0_0_alpha1 = - new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); + new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version CURRENT = V_7_0_0_alpha1; static { @@ -200,10 +123,10 @@ public class Version implements Comparable, ToXContentFragment { return V_7_0_0_alpha1; case V_6_5_0_ID: return V_6_5_0; + case V_6_4_1_ID: + return V_6_4_1; case V_6_4_0_ID: return V_6_4_0; - case V_6_3_3_ID: - return V_6_3_3; case V_6_3_2_ID: return V_6_3_2; case V_6_3_1_ID: @@ -246,84 +169,8 @@ public class Version implements Comparable, ToXContentFragment { return V_6_0_0_alpha2; case V_6_0_0_alpha1_ID: return V_6_0_0_alpha1; - case V_5_6_11_ID: - return V_5_6_11; - case V_5_6_10_ID: - return V_5_6_10; - case V_5_6_9_ID: - return V_5_6_9; - case V_5_6_8_ID: - return V_5_6_8; - case V_5_6_7_ID: - return V_5_6_7; - case V_5_6_6_ID: - return V_5_6_6; - case V_5_6_5_ID: - return V_5_6_5; - case V_5_6_4_ID: - return V_5_6_4; - case V_5_6_3_ID: - return V_5_6_3; - case V_5_6_2_ID: - return V_5_6_2; - case V_5_6_1_ID: - return V_5_6_1; - case V_5_6_0_ID: - return V_5_6_0; - case V_5_5_3_ID: - return V_5_5_3; - case V_5_5_2_ID: - return V_5_5_2; - case V_5_5_1_ID: - return V_5_5_1; - case V_5_5_0_ID: - return V_5_5_0; - case V_5_4_3_ID: - return V_5_4_3; - case V_5_4_2_ID: - return V_5_4_2; - case V_5_4_1_ID: - return V_5_4_1; - case V_5_4_0_ID: - return V_5_4_0; - case V_5_3_3_ID: - return V_5_3_3; - case V_5_3_2_ID: - return V_5_3_2; - case V_5_3_1_ID: - return V_5_3_1; - case V_5_3_0_ID: - return V_5_3_0; - case V_5_2_2_ID: - return V_5_2_2; - case V_5_2_1_ID: - return V_5_2_1; - case V_5_2_0_ID: - return V_5_2_0; - case V_5_1_2_ID: - return V_5_1_2; - case V_5_1_1_ID: - return V_5_1_1; - case V_5_0_2_ID: - return V_5_0_2; - case V_5_0_1_ID: - return V_5_0_1; - case V_5_0_0_ID: - return V_5_0_0; - case V_5_0_0_rc1_ID: - return V_5_0_0_rc1; - case V_5_0_0_beta1_ID: - return V_5_0_0_beta1; - case V_5_0_0_alpha5_ID: - return V_5_0_0_alpha5; - case V_5_0_0_alpha4_ID: - return V_5_0_0_alpha4; - case V_5_0_0_alpha3_ID: - return V_5_0_0_alpha3; - case V_5_0_0_alpha2_ID: - return V_5_0_0_alpha2; - case V_5_0_0_alpha1_ID: - return V_5_0_0_alpha1; + case V_EMPTY_ID: + return V_EMPTY; default: return new Version(id, org.apache.lucene.util.Version.LATEST); } @@ -336,11 +183,14 @@ public class Version implements Comparable, ToXContentFragment { * {@value IndexMetaData#SETTING_VERSION_CREATED} */ public static Version indexCreated(Settings indexSettings) { - final Version indexVersion = indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null); - if (indexVersion == null) { - throw new IllegalStateException( - "[" + IndexMetaData.SETTING_VERSION_CREATED + "] is not present in the index settings for index with uuid: [" - + indexSettings.get(IndexMetaData.SETTING_INDEX_UUID) + "]"); + final Version indexVersion = IndexMetaData.SETTING_INDEX_VERSION_CREATED.get(indexSettings); + if (indexVersion == V_EMPTY) { + final String message = String.format( + Locale.ROOT, + "[%s] is not present in the index settings for index with UUID [%s]", + IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), + indexSettings.get(IndexMetaData.SETTING_INDEX_UUID)); + throw new IllegalStateException(message); } return indexVersion; } @@ -473,8 +323,11 @@ public class Version implements Comparable, ToXContentFragment { * is a beta or RC release then the version itself is returned. */ public Version minimumCompatibilityVersion() { - if (major >= 6) { - // all major versions from 6 onwards are compatible with last minor series of the previous major + if (major == 6) { + // force the minimum compatibility for version 6 to 5.6 since we don't reference version 5 anymore + return Version.fromId(5060099); + } else if (major >= 7) { + // all major versions from 7 onwards are compatible with last minor series of the previous major Version bwcVersion = null; for (int i = DeclaredVersionsHolder.DECLARED_VERSIONS.size() - 1; i >= 0; i--) { @@ -624,8 +477,10 @@ public class Version implements Comparable, ToXContentFragment { if (field.getType() != Version.class) { continue; } - if ("CURRENT".equals(field.getName())) { - continue; + switch (field.getName()) { + case "CURRENT": + case "V_EMPTY": + continue; } assert field.getName().matches("V(_\\d+)+(_(alpha|beta|rc)\\d+)?") : field.getName(); try { diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 58efce77c9f..b3ec72d5270 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -131,9 +131,7 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shrink.ResizeAction; -import org.elasticsearch.action.admin.indices.shrink.ShrinkAction; import org.elasticsearch.action.admin.indices.shrink.TransportResizeAction; -import org.elasticsearch.action.admin.indices.shrink.TransportShrinkAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; @@ -446,7 +444,6 @@ public class ActionModule extends AbstractModule { actions.register(IndicesSegmentsAction.INSTANCE, TransportIndicesSegmentsAction.class); actions.register(IndicesShardStoresAction.INSTANCE, TransportIndicesShardStoresAction.class); actions.register(CreateIndexAction.INSTANCE, TransportCreateIndexAction.class); - actions.register(ShrinkAction.INSTANCE, TransportShrinkAction.class); actions.register(ResizeAction.INSTANCE, TransportResizeAction.class); actions.register(RolloverAction.INSTANCE, TransportRolloverAction.class); actions.register(DeleteIndexAction.INSTANCE, TransportDeleteIndexAction.class); diff --git a/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java b/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java index 08a97d4d993..490a1760abe 100644 --- a/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java +++ b/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java @@ -33,7 +33,7 @@ import java.util.Objects; public abstract class ShardOperationFailedException implements Streamable, ToXContent { protected String index; - protected int shardId; + protected int shardId = -1; protected String reason; protected RestStatus status; protected Throwable cause; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java index 40960c33620..b6959afba5d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.allocation; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.Nullable; @@ -69,7 +68,6 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest(); - for (int i = 0; i < size; i++) { - String index = in.readString(); - AliasFilter aliasFilter = new AliasFilter(in); - indicesAndFilters.put(index, aliasFilter); - } + int size = in.readVInt(); + indicesAndFilters = new HashMap<>(); + for (int i = 0; i < size; i++) { + String index = in.readString(); + AliasFilter aliasFilter = new AliasFilter(in); + indicesAndFilters.put(index, aliasFilter); } } @@ -99,12 +96,10 @@ public class ClusterSearchShardsResponse extends ActionResponse implements ToXCo for (DiscoveryNode node : nodes) { node.writeTo(out); } - if (out.getVersion().onOrAfter(Version.V_5_1_1)) { - out.writeVInt(indicesAndFilters.size()); - for (Map.Entry entry : indicesAndFilters.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); - } + out.writeVInt(indicesAndFilters.size()); + for (Map.Entry entry : indicesAndFilters.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index b3b24b570ee..41ae57031d3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; import static org.elasticsearch.action.ValidateActions.addValidationError; -import static org.elasticsearch.snapshots.SnapshotInfo.VERBOSE_INTRODUCED; /** * Get snapshot request @@ -75,9 +74,7 @@ public class GetSnapshotsRequest extends MasterNodeRequest repository = in.readString(); snapshots = in.readStringArray(); ignoreUnavailable = in.readBoolean(); - if (in.getVersion().onOrAfter(VERBOSE_INTRODUCED)) { - verbose = in.readBoolean(); - } + verbose = in.readBoolean(); } @Override @@ -86,9 +83,7 @@ public class GetSnapshotsRequest extends MasterNodeRequest out.writeString(repository); out.writeStringArray(snapshots); out.writeBoolean(ignoreUnavailable); - if (out.getVersion().onOrAfter(VERBOSE_INTRODUCED)) { - out.writeBoolean(verbose); - } + out.writeBoolean(verbose); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java index 6f702cbbe7c..e7c5a07f568 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java @@ -25,6 +25,8 @@ import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.script.StoredScriptSource; @@ -34,7 +36,7 @@ import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; -public class PutStoredScriptRequest extends AcknowledgedRequest { +public class PutStoredScriptRequest extends AcknowledgedRequest implements ToXContent { private String id; private String context; @@ -121,11 +123,7 @@ public class PutStoredScriptRequest extends AcknowledgedRequest 1 ? positionLength : null); - } + out.writeOptionalVInt(positionLength > 1 ? positionLength : null); out.writeOptionalString(type); out.writeMapWithConsistentOrder(attributes); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java index f2e07e29bad..25f7f33647c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java @@ -55,8 +55,6 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ private final Set aliases = new HashSet<>(); - private final Map customs = new HashMap<>(); - private final Set blocks = new HashSet<>(); private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; @@ -83,11 +81,6 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ return this; } - public CreateIndexClusterStateUpdateRequest customs(Map customs) { - this.customs.putAll(customs); - return this; - } - public CreateIndexClusterStateUpdateRequest blocks(Set blocks) { this.blocks.addAll(blocks); return this; @@ -146,10 +139,6 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ return aliases; } - public Map customs() { - return customs; - } - public Set blocks() { return blocks; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index 875d17eb54b..fa2a395f2c9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -29,7 +29,6 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -58,9 +57,9 @@ import java.util.Objects; import java.util.Set; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; -import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; /** * A request to create an index. Best created with {@link org.elasticsearch.client.Requests#createIndexRequest(String)}. @@ -87,8 +86,6 @@ public class CreateIndexRequest extends AcknowledgedRequest private final Set aliases = new HashSet<>(); - private final Map customs = new HashMap<>(); - private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; public CreateIndexRequest() { @@ -388,18 +385,7 @@ public class CreateIndexRequest extends AcknowledgedRequest } else if (ALIASES.match(name, deprecationHandler)) { aliases((Map) entry.getValue()); } else { - // maybe custom? - IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(name); - if (proto != null) { - try { - customs.put(name, proto.fromMap((Map) entry.getValue())); - } catch (IOException e) { - throw new ElasticsearchParseException("failed to parse custom metadata for [{}]", name); - } - } else { - // found a key which is neither custom defined nor one of the supported ones - throw new ElasticsearchParseException("unknown key [{}] for create index", name); - } + throw new ElasticsearchParseException("unknown key [{}] for create index", name); } } return this; @@ -413,18 +399,6 @@ public class CreateIndexRequest extends AcknowledgedRequest return this.aliases; } - /** - * Adds custom metadata to the index to be created. - */ - public CreateIndexRequest custom(IndexMetaData.Custom custom) { - customs.put(custom.type(), custom); - return this; - } - - public Map customs() { - return this.customs; - } - public ActiveShardCount waitForActiveShards() { return waitForActiveShards; } @@ -474,11 +448,13 @@ public class CreateIndexRequest extends AcknowledgedRequest } mappings.put(type, source); } - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupPrototypeSafe(type).readFrom(in); - customs.put(type, customIndexMetaData); + if (in.getVersion().before(Version.V_6_5_0)) { + // This used to be the size of custom metadata classes + int customSize = in.readVInt(); + assert customSize == 0 : "unexpected custom metadata when none is supported"; + if (customSize > 0) { + throw new IllegalStateException("unexpected custom metadata when none is supported"); + } } int aliasesSize = in.readVInt(); for (int i = 0; i < aliasesSize; i++) { @@ -501,10 +477,9 @@ public class CreateIndexRequest extends AcknowledgedRequest out.writeString(entry.getKey()); out.writeString(entry.getValue()); } - out.writeVInt(customs.size()); - for (Map.Entry entry : customs.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); + if (out.getVersion().before(Version.V_6_5_0)) { + // Size of custom index metadata, which is removed + out.writeVInt(0); } out.writeVInt(aliases.size()); for (Alias alias : aliases) { @@ -542,10 +517,6 @@ public class CreateIndexRequest extends AcknowledgedRequest alias.toXContent(builder, params); } builder.endObject(); - - for (Map.Entry entry : customs.entrySet()) { - builder.field(entry.getKey(), entry.getValue(), params); - } return builder; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java index cc8fb2c32c3..d2593e7e94b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -224,14 +223,6 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index 1556ee2341d..a827444acb8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -297,10 +297,6 @@ public class PutMappingRequest extends AcknowledgedRequest im indicesOptions = IndicesOptions.readIndicesOptions(in); type = in.readOptionalString(); source = in.readString(); - if (in.getVersion().before(Version.V_5_3_0)) { - // we do not know the format from earlier versions so convert if necessary - source = XContentHelper.convertToJson(new BytesArray(source), false, false, XContentFactory.xContentType(source)); - } if (in.getVersion().before(Version.V_7_0_0_alpha1)) { in.readBoolean(); // updateAllTypes } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java index aa693c1b9e5..cc68a4a7e34 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -186,7 +186,9 @@ public class IndicesSegmentResponse extends BroadcastResponse { builder.field("mode", ((SortedSetSortField) field).getSelector() .toString().toLowerCase(Locale.ROOT)); } - builder.field("missing", field.getMissingValue()); + if (field.getMissingValue() != null) { + builder.field("missing", field.getMissingValue().toString()); + } builder.field("reverse", field.getReverse()); builder.endObject(); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java index 7195fd78154..a9d83cfbce6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -39,6 +39,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.threadpool.ThreadPool; @@ -171,6 +172,12 @@ public class TransportResizeAction extends TransportMasterNodeAction aliases = new HashSet<>(); - private Map customs = new HashMap<>(); - private Integer version; public PutIndexTemplateRequest() { @@ -353,15 +350,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest) entry.getValue()); } else { - // maybe custom? - IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(name); - if (proto != null) { - try { - customs.put(name, proto.fromMap((Map) entry.getValue())); - } catch (IOException e) { - throw new ElasticsearchParseException("failed to parse custom metadata for [{}]", name); - } - } + throw new ElasticsearchParseException("unknown key [{}] in the template ", name); } } return this; @@ -395,15 +384,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest customs() { - return this.customs; - } - public Set aliases() { return this.aliases; } @@ -492,18 +472,15 @@ public class PutIndexTemplateRequest extends MasterNodeRequest 0) { + throw new IllegalStateException("unexpected custom metadata when none is supported"); + } } int aliasesSize = in.readVInt(); for (int i = 0; i < aliasesSize; i++) { @@ -530,10 +507,8 @@ public class PutIndexTemplateRequest extends MasterNodeRequest entry : customs.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); + if (out.getVersion().before(Version.V_6_5_0)) { + out.writeVInt(0); } out.writeVInt(aliases.size()); for (Alias alias : aliases) { @@ -570,10 +545,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest entry : customs.entrySet()) { - builder.field(entry.getKey(), entry.getValue(), params); - } - return builder; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index bd8621a1a7d..34eccbf9d8a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -84,7 +84,6 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction

} explain = in.readBoolean(); rewrite = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_5_4_0)) { - allShards = in.readBoolean(); - } + allShards = in.readBoolean(); } @Override @@ -171,9 +168,7 @@ public class ValidateQueryRequest extends BroadcastRequest } out.writeBoolean(explain); out.writeBoolean(rewrite); - if (out.getVersion().onOrAfter(Version.V_5_4_0)) { - out.writeBoolean(allShards); - } + out.writeBoolean(allShards); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index fb535d312cf..838293b8b1f 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -28,11 +28,13 @@ import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -42,6 +44,8 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; @@ -161,11 +165,11 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject { * Represents a failure. */ public static class Failure implements Writeable, ToXContentFragment { - static final String INDEX_FIELD = "index"; - static final String TYPE_FIELD = "type"; - static final String ID_FIELD = "id"; - static final String CAUSE_FIELD = "cause"; - static final String STATUS_FIELD = "status"; + public static final String INDEX_FIELD = "index"; + public static final String TYPE_FIELD = "type"; + public static final String ID_FIELD = "id"; + public static final String CAUSE_FIELD = "cause"; + public static final String STATUS_FIELD = "status"; private final String index; private final String type; @@ -175,6 +179,23 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject { private final long seqNo; private final boolean aborted; + public static ConstructingObjectParser PARSER = + new ConstructingObjectParser<>( + "bulk_failures", + true, + a -> + new Failure( + (String)a[0], (String)a[1], (String)a[2], (Exception)a[3], RestStatus.fromCode((int)a[4]) + ) + ); + static { + PARSER.declareString(constructorArg(), new ParseField(INDEX_FIELD)); + PARSER.declareString(constructorArg(), new ParseField(TYPE_FIELD)); + PARSER.declareString(optionalConstructorArg(), new ParseField(ID_FIELD)); + PARSER.declareObject(constructorArg(), (p, c) -> ElasticsearchException.fromXContent(p), new ParseField(CAUSE_FIELD)); + PARSER.declareInt(constructorArg(), new ParseField(STATUS_FIELD)); + } + /** * For write failures before operation was assigned a sequence number. * @@ -244,8 +265,8 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject { } private static boolean supportsAbortedFlag(Version version) { - // The "aborted" flag was added for 5.5.3 and 5.6.0, but was not in 6.0.0-beta2 - return version.after(Version.V_6_0_0_beta2) || (version.major == 5 && version.onOrAfter(Version.V_5_5_3)); + // The "aborted" flag was not in 6.0.0-beta2 + return version.after(Version.V_6_0_0_beta2); } /** @@ -322,6 +343,10 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject { return builder; } + public static Failure fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + @Override public String toString() { return Strings.toString(this); @@ -447,11 +472,7 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject { @Override public void readFrom(StreamInput in) throws IOException { id = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - opType = OpType.fromId(in.readByte()); - } else { - opType = OpType.fromString(in.readString()); - } + opType = OpType.fromId(in.readByte()); byte type = in.readByte(); if (type == 0) { @@ -474,11 +495,7 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(id); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeByte(opType.getId()); - } else { - out.writeString(opType.getLowercase()); - } + out.writeByte(opType.getId()); if (response == null) { out.writeByte((byte) 2); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index e3e94e82339..a3d7d50f3e2 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -27,6 +27,7 @@ import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; @@ -37,6 +38,7 @@ import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.update.TransportUpdateAction; import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; @@ -521,28 +523,30 @@ public class TransportBulkAction extends HandledTransportAction listener) { long ingestStartTimeInNanos = System.nanoTime(); BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original); - ingestService.getPipelineExecutionService().executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> { - logger.debug(() -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]", - indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()), exception); - bulkRequestModifier.markCurrentItemAsFailed(exception); - }, (exception) -> { - if (exception != null) { - logger.error("failed to execute pipeline for a bulk request", exception); - listener.onFailure(exception); - } else { - long ingestTookInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - ingestStartTimeInNanos); - BulkRequest bulkRequest = bulkRequestModifier.getBulkRequest(); - ActionListener actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(ingestTookInMillis, listener); - if (bulkRequest.requests().isEmpty()) { - // at this stage, the transport bulk action can't deal with a bulk request with no requests, - // so we stop and send an empty response back to the client. - // (this will happen if pre-processing all items in the bulk failed) - actionListener.onResponse(new BulkResponse(new BulkItemResponse[0], 0)); + ingestService.executeBulkRequest(() -> bulkRequestModifier, + (indexRequest, exception) -> { + logger.debug(() -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]", + indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()), exception); + bulkRequestModifier.markCurrentItemAsFailed(exception); + }, (exception) -> { + if (exception != null) { + logger.error("failed to execute pipeline for a bulk request", exception); + listener.onFailure(exception); } else { - doExecute(task, bulkRequest, actionListener); + long ingestTookInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - ingestStartTimeInNanos); + BulkRequest bulkRequest = bulkRequestModifier.getBulkRequest(); + ActionListener actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(ingestTookInMillis, listener); + if (bulkRequest.requests().isEmpty()) { + // at this stage, the transport bulk action can't deal with a bulk request with no requests, + // so we stop and send an empty response back to the client. + // (this will happen if pre-processing all items in the bulk failed) + actionListener.onResponse(new BulkResponse(new BulkItemResponse[0], 0)); + } else { + doExecute(task, bulkRequest, actionListener); + } } - } - }); + }, + indexRequest -> bulkRequestModifier.markCurrentItemAsDropped()); } static final class BulkRequestModifier implements Iterator> { @@ -604,6 +608,19 @@ public class TransportBulkAction extends HandledTransportAction> responseMap, List indexResponses) { - this.responseMap = responseMap; - this.indexResponses = indexResponses; + this.responseMap = Objects.requireNonNull(responseMap); + this.indexResponses = Objects.requireNonNull(indexResponses); } /** * Used for serialization */ FieldCapabilitiesResponse() { - this.responseMap = Collections.emptyMap(); + this(Collections.emptyMap(), Collections.emptyList()); } /** @@ -82,6 +82,7 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont List getIndexResponses() { return indexResponses; } + /** * * Get the field capabilities per type for the provided {@code field}. @@ -95,11 +96,7 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont super.readFrom(in); this.responseMap = in.readMap(StreamInput::readString, FieldCapabilitiesResponse::readField); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - indexResponses = in.readList(FieldCapabilitiesIndexResponse::new); - } else { - indexResponses = Collections.emptyList(); - } + indexResponses = in.readList(FieldCapabilitiesIndexResponse::new); } private static Map readField(StreamInput in) throws IOException { @@ -110,10 +107,7 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeMap(responseMap, StreamOutput::writeString, FieldCapabilitiesResponse::writeField); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeList(indexResponses); - } - + out.writeList(indexResponses); } private static void writeField(StreamOutput out, diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java index ef0d19a2655..b8d1f477ac1 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java @@ -90,7 +90,7 @@ public class TransportFieldCapabilitiesAction extends HandledTransportAction innerListener = new ActionListener() { @Override diff --git a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java index d3cd052ecad..6b4c74fe56c 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java @@ -27,26 +27,23 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.ingest.PipelineStore; -import org.elasticsearch.node.NodeService; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; public class DeletePipelineTransportAction extends TransportMasterNodeAction { - private final PipelineStore pipelineStore; - private final ClusterService clusterService; + private final IngestService ingestService; @Inject - public DeletePipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, + public DeletePipelineTransportAction(Settings settings, ThreadPool threadPool, IngestService ingestService, TransportService transportService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService) { - super(settings, DeletePipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, DeletePipelineRequest::new); - this.clusterService = clusterService; - this.pipelineStore = nodeService.getIngestService().getPipelineStore(); + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, DeletePipelineAction.NAME, transportService, ingestService.getClusterService(), + threadPool, actionFilters, indexNameExpressionResolver, DeletePipelineRequest::new); + this.ingestService = ingestService; } @Override @@ -60,8 +57,9 @@ public class DeletePipelineTransportAction extends TransportMasterNodeAction listener) throws Exception { - pipelineStore.delete(clusterService, request, listener); + protected void masterOperation(DeletePipelineRequest request, ClusterState state, + ActionListener listener) throws Exception { + ingestService.delete(request, listener); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java index 191ed87a42c..540f46982a5 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java @@ -29,21 +29,17 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.ingest.PipelineStore; -import org.elasticsearch.node.NodeService; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; public class GetPipelineTransportAction extends TransportMasterNodeReadAction { - - private final PipelineStore pipelineStore; - + @Inject public GetPipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService) { + IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, GetPipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, GetPipelineRequest::new, indexNameExpressionResolver); - this.pipelineStore = nodeService.getIngestService().getPipelineStore(); } @Override @@ -58,7 +54,7 @@ public class GetPipelineTransportAction extends TransportMasterNodeReadAction listener) throws Exception { - listener.onResponse(new GetPipelineResponse(pipelineStore.getPipelines(state, request.getIds()))); + listener.onResponse(new GetPipelineResponse(IngestService.getPipelines(state, request.getIds()))); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java index 6447b0557db..abff28bcf55 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.ingest; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.bytes.BytesReference; @@ -82,11 +81,7 @@ public class PutPipelineRequest extends AcknowledgedRequest super.readFrom(in); id = in.readString(); source = in.readBytesReference(); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - xContentType = in.readEnum(XContentType.class); - } else { - xContentType = XContentHelper.xContentType(source); - } + xContentType = in.readEnum(XContentType.class); } @Override @@ -94,9 +89,7 @@ public class PutPipelineRequest extends AcknowledgedRequest super.writeTo(out); out.writeString(id); out.writeBytesReference(source); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeEnum(xContentType); - } + out.writeEnum(xContentType); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java index abe8f49272c..38e1f2fb54b 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java @@ -32,12 +32,10 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.ingest.PipelineStore; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.ingest.IngestInfo; -import org.elasticsearch.node.NodeService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -46,19 +44,19 @@ import java.util.Map; public class PutPipelineTransportAction extends TransportMasterNodeAction { - private final PipelineStore pipelineStore; - private final ClusterService clusterService; + private final IngestService ingestService; private final NodeClient client; @Inject - public PutPipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, - TransportService transportService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService, - NodeClient client) { - super(settings, PutPipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutPipelineRequest::new); - this.clusterService = clusterService; + public PutPipelineTransportAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + IngestService ingestService, NodeClient client) { + super( + settings, PutPipelineAction.NAME, transportService, ingestService.getClusterService(), + threadPool, actionFilters, indexNameExpressionResolver, PutPipelineRequest::new + ); this.client = client; - this.pipelineStore = nodeService.getIngestService().getPipelineStore(); + this.ingestService = ingestService; } @Override @@ -84,7 +82,7 @@ public class PutPipelineTransportAction extends TransportMasterNodeAction responses = new ArrayList<>(); for (IngestDocument ingestDocument : request.getDocuments()) { - responses.add(executeDocument(request.getPipeline(), ingestDocument, request.isVerbose())); + SimulateDocumentResult response = executeDocument(request.getPipeline(), ingestDocument, request.isVerbose()); + if (response != null) { + responses.add(response); + } } listener.onResponse(new SimulatePipelineResponse(request.getPipeline().getId(), request.isVerbose(), responses)); } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java index 9a7d6bb7fee..7514a41f575 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.ingest; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.bytes.BytesReference; @@ -32,8 +31,8 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.ingest.Pipeline; -import org.elasticsearch.ingest.PipelineStore; import java.io.IOException; import java.util.ArrayList; @@ -76,11 +75,7 @@ public class SimulatePipelineRequest extends ActionRequest implements ToXContent id = in.readOptionalString(); verbose = in.readBoolean(); source = in.readBytesReference(); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - xContentType = in.readEnum(XContentType.class); - } else { - xContentType = XContentHelper.xContentType(source); - } + xContentType = in.readEnum(XContentType.class); } @Override @@ -123,9 +118,7 @@ public class SimulatePipelineRequest extends ActionRequest implements ToXContent out.writeOptionalString(id); out.writeBoolean(verbose); out.writeBytesReference(source); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeEnum(xContentType); - } + out.writeEnum(xContentType); } @Override @@ -164,14 +157,13 @@ public class SimulatePipelineRequest extends ActionRequest implements ToXContent } } - private static final Pipeline.Factory PIPELINE_FACTORY = new Pipeline.Factory(); static final String SIMULATED_PIPELINE_ID = "_simulate_pipeline"; - static Parsed parseWithPipelineId(String pipelineId, Map config, boolean verbose, PipelineStore pipelineStore) { + static Parsed parseWithPipelineId(String pipelineId, Map config, boolean verbose, IngestService ingestService) { if (pipelineId == null) { throw new IllegalArgumentException("param [pipeline] is null"); } - Pipeline pipeline = pipelineStore.get(pipelineId); + Pipeline pipeline = ingestService.getPipeline(pipelineId); if (pipeline == null) { throw new IllegalArgumentException("pipeline [" + pipelineId + "] does not exist"); } @@ -179,9 +171,11 @@ public class SimulatePipelineRequest extends ActionRequest implements ToXContent return new Parsed(pipeline, ingestDocumentList, verbose); } - static Parsed parse(Map config, boolean verbose, PipelineStore pipelineStore) throws Exception { + static Parsed parse(Map config, boolean verbose, IngestService ingestService) throws Exception { Map pipelineConfig = ConfigurationUtils.readMap(null, null, config, Fields.PIPELINE); - Pipeline pipeline = PIPELINE_FACTORY.create(SIMULATED_PIPELINE_ID, pipelineConfig, pipelineStore.getProcessorFactories()); + Pipeline pipeline = Pipeline.create( + SIMULATED_PIPELINE_ID, pipelineConfig, ingestService.getProcessorFactories(), ingestService.getScriptService() + ); List ingestDocumentList = parseDocs(config); return new Parsed(pipeline, ingestDocumentList, verbose); } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java index 2e898c1895f..ad8577d5244 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java @@ -26,8 +26,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.ingest.PipelineStore; -import org.elasticsearch.node.NodeService; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -36,15 +35,15 @@ import java.util.Map; public class SimulatePipelineTransportAction extends HandledTransportAction { - private final PipelineStore pipelineStore; + private final IngestService ingestService; private final SimulateExecutionService executionService; @Inject public SimulatePipelineTransportAction(Settings settings, ThreadPool threadPool, TransportService transportService, - ActionFilters actionFilters, NodeService nodeService) { + ActionFilters actionFilters, IngestService ingestService) { super(settings, SimulatePipelineAction.NAME, transportService, actionFilters, (Writeable.Reader) SimulatePipelineRequest::new); - this.pipelineStore = nodeService.getIngestService().getPipelineStore(); + this.ingestService = ingestService; this.executionService = new SimulateExecutionService(threadPool); } @@ -55,9 +54,9 @@ public class SimulatePipelineTransportAction extends HandledTransportAction results, - final Collection bufferedTopDocs, final TopDocsStats topDocsStats, int from, int size) { + final Collection bufferedTopDocs, final TopDocsStats topDocsStats, int from, int size) { if (results.isEmpty()) { return SortedTopDocs.EMPTY; } @@ -169,12 +172,12 @@ public final class SearchPhaseController extends AbstractComponent { * top docs anymore but instead only pass relevant results / top docs to the merge method*/ QuerySearchResult queryResult = sortedResult.queryResult(); if (queryResult.hasConsumedTopDocs() == false) { // already consumed? - final TopDocs td = queryResult.consumeTopDocs(); + final TopDocsAndMaxScore td = queryResult.consumeTopDocs(); assert td != null; topDocsStats.add(td); - if (td.scoreDocs.length > 0) { // make sure we set the shard index before we add it - the consumer didn't do that yet - setShardIndex(td, queryResult.getShardIndex()); - topDocs.add(td); + if (td.topDocs.scoreDocs.length > 0) { // make sure we set the shard index before we add it - the consumer didn't do that yet + setShardIndex(td.topDocs, queryResult.getShardIndex()); + topDocs.add(td.topDocs); } } if (queryResult.hasSuggestHits()) { @@ -387,7 +390,9 @@ public final class SearchPhaseController extends AbstractComponent { assert index < fetchResult.hits().getHits().length : "not enough hits fetched. index [" + index + "] length: " + fetchResult.hits().getHits().length; SearchHit searchHit = fetchResult.hits().getHits()[index]; - searchHit.score(shardDoc.score); + if (sorted == false) { + searchHit.score(shardDoc.score); + } searchHit.shard(fetchResult.getSearchShardTarget()); if (sorted) { FieldDoc fieldDoc = (FieldDoc) shardDoc; @@ -683,10 +688,10 @@ public final class SearchPhaseController extends AbstractComponent { aggsBuffer[i] = (InternalAggregations) querySearchResult.consumeAggs(); } if (hasTopDocs) { - final TopDocs topDocs = querySearchResult.consumeTopDocs(); // can't be null + final TopDocsAndMaxScore topDocs = querySearchResult.consumeTopDocs(); // can't be null topDocsStats.add(topDocs); - SearchPhaseController.setShardIndex(topDocs, querySearchResult.getShardIndex()); - topDocsBuffer[i] = topDocs; + SearchPhaseController.setShardIndex(topDocs.topDocs, querySearchResult.getShardIndex()); + topDocsBuffer[i] = topDocs.topDocs; } } @@ -743,6 +748,7 @@ public final class SearchPhaseController extends AbstractComponent { static final class TopDocsStats { final boolean trackTotalHits; long totalHits; + TotalHits.Relation totalHitsRelation = TotalHits.Relation.EQUAL_TO; long fetchHits; float maxScore = Float.NEGATIVE_INFINITY; @@ -755,13 +761,16 @@ public final class SearchPhaseController extends AbstractComponent { this.totalHits = trackTotalHits ? 0 : -1; } - void add(TopDocs topDocs) { + void add(TopDocsAndMaxScore topDocs) { if (trackTotalHits) { - totalHits += topDocs.totalHits; + totalHits += topDocs.topDocs.totalHits.value; + if (topDocs.topDocs.totalHits.relation == Relation.GREATER_THAN_OR_EQUAL_TO) { + totalHitsRelation = TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO; + } } - fetchHits += topDocs.scoreDocs.length; - if (!Float.isNaN(topDocs.getMaxScore())) { - maxScore = Math.max(maxScore, topDocs.getMaxScore()); + fetchHits += topDocs.topDocs.scoreDocs.length; + if (!Float.isNaN(topDocs.maxScore)) { + maxScore = Math.max(maxScore, topDocs.maxScore); } } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java index 3d3737b0638..fa532777e9d 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java @@ -134,11 +134,10 @@ public class SearchPhaseExecutionException extends ElasticsearchException { @Override protected void metadataToXContent(XContentBuilder builder, Params params) throws IOException { builder.field("phase", phaseName); - final boolean group = params.paramAsBoolean("group_shard_failures", true); // we group by default - builder.field("grouped", group); // notify that it's grouped + builder.field("grouped", true); // notify that it's grouped builder.field("failed_shards"); builder.startArray(); - ShardOperationFailedException[] failures = group ? ExceptionsHelper.groupBy(shardFailures) : shardFailures; + ShardOperationFailedException[] failures = ExceptionsHelper.groupBy(shardFailures); for (ShardOperationFailedException failure : failures) { builder.startObject(); failure.toXContent(builder, params); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index e67517c4852..dd7f6872943 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -135,10 +135,8 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest indicesOptions = IndicesOptions.readIndicesOptions(in); requestCache = in.readOptionalBoolean(); batchedReduceSize = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_5_6_0)) { - maxConcurrentShardRequests = in.readVInt(); - preFilterShardSize = in.readVInt(); - } + maxConcurrentShardRequests = in.readVInt(); + preFilterShardSize = in.readVInt(); if (in.getVersion().onOrAfter(Version.V_6_3_0)) { allowPartialSearchResults = in.readOptionalBoolean(); } @@ -160,10 +158,8 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest indicesOptions.writeIndicesOptions(out); out.writeOptionalBoolean(requestCache); out.writeVInt(batchedReduceSize); - if (out.getVersion().onOrAfter(Version.V_5_6_0)) { - out.writeVInt(maxConcurrentShardRequests); - out.writeVInt(preFilterShardSize); - } + out.writeVInt(maxConcurrentShardRequests); + out.writeVInt(preFilterShardSize); if (out.getVersion().onOrAfter(Version.V_6_3_0)) { out.writeOptionalBoolean(allowPartialSearchResults); } @@ -188,6 +184,10 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest if (source != null && source.size() == 0 && scroll != null) { validationException = addValidationError("[size] cannot be [0] in a scroll context", validationException); } + if (source != null && source.rescores() != null && source.rescores().isEmpty() == false && scroll != null) { + validationException = + addValidationError("using [rescore] is not allowed in a scroll context", validationException); + } return validationException; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index 2a97798764e..0273d5e5821 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -374,9 +374,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb } scrollId = in.readOptionalString(); tookInMillis = in.readVLong(); - if (in.getVersion().onOrAfter(Version.V_5_6_0)) { - skippedShards = in.readVInt(); - } + skippedShards = in.readVInt(); } @Override @@ -395,9 +393,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb } out.writeOptionalString(scrollId); out.writeVLong(tookInMillis); - if(out.getVersion().onOrAfter(Version.V_5_6_0)) { - out.writeVInt(skippedShards); - } + out.writeVInt(skippedShards); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 133d0291df5..a4ea2616e0a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.search; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.IndicesRequest; @@ -113,17 +112,8 @@ public class SearchTransportService extends AbstractComponent { public void sendCanMatch(Transport.Connection connection, final ShardSearchTransportRequest request, SearchTask task, final ActionListener listener) { - if (connection.getNode().getVersion().onOrAfter(Version.V_5_6_0)) { - transportService.sendChildRequest(connection, QUERY_CAN_MATCH_NAME, request, task, - TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, CanMatchResponse::new)); - } else { - // this might look weird but if we are in a CrossClusterSearch environment we can get a connection - // to a pre 5.latest node which is proxied by a 5.latest node under the hood since we are only compatible with 5.latest - // instead of sending the request we shortcut it here and let the caller deal with this -- see #25704 - // also failing the request instead of returning a fake answer might trigger a retry on a replica which might be on a - // compatible node - throw new IllegalArgumentException("can_match is not supported on pre 5.6 nodes"); - } + transportService.sendChildRequest(connection, QUERY_CAN_MATCH_NAME, request, task, + TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, CanMatchResponse::new)); } public void sendClearAllScrollContexts(Transport.Connection connection, final ActionListener listener) { diff --git a/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java b/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java index 98418153d50..ddfadfa57e3 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java +++ b/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java @@ -54,8 +54,7 @@ public class ShardSearchFailure extends ShardOperationFailedException { private SearchShardTarget shardTarget; - private ShardSearchFailure() { - + ShardSearchFailure() { } public ShardSearchFailure(Exception e) { @@ -101,6 +100,8 @@ public class ShardSearchFailure extends ShardOperationFailedException { public void readFrom(StreamInput in) throws IOException { if (in.readBoolean()) { shardTarget = new SearchShardTarget(in); + index = shardTarget.getFullyQualifiedIndexName(); + shardId = shardTarget.getShardId().getId(); } reason = in.readString(); status = RestStatus.readFrom(in); diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java index bc5c696894a..3e0c1a6d1e4 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java @@ -271,7 +271,7 @@ public class ReplicationResponse extends ActionResponse { public void readFrom(StreamInput in) throws IOException { shardId = ShardId.readShardId(in); super.shardId = shardId.getId(); - super.index = shardId.getIndexName(); + index = shardId.getIndexName(); nodeId = in.readOptionalString(); cause = in.readException(); status = RestStatus.readFrom(in); diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index ae029ce3f93..5d425b16d16 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -130,6 +130,7 @@ public abstract class TransportWriteAction< implements RespondingWriteResult { boolean finishedAsyncActions; public final Location location; + public final IndexShard primary; ActionListener listener = null; public WritePrimaryResult(ReplicaRequest request, @Nullable Response finalResponse, @@ -137,6 +138,7 @@ public abstract class TransportWriteAction< IndexShard primary, Logger logger) { super(request, finalResponse, operationFailure); this.location = location; + this.primary = primary; assert location == null || operationFailure == null : "expected either failure to be null or translog location to be null, " + "but found: [" + location + "] translog location and [" + operationFailure + "] failure"; diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java index 031a537c37b..7d13cff2ebd 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java @@ -22,7 +22,9 @@ package org.elasticsearch.action.termvectors; import com.carrotsearch.hppc.ObjectLongHashMap; import com.carrotsearch.hppc.cursors.ObjectLongCursor; import org.apache.lucene.index.Fields; +import org.apache.lucene.index.ImpactsEnum; import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.SlowImpactsEnum; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.BoostAttribute; @@ -348,6 +350,11 @@ public final class TermVectorsFields extends Fields { : null, hasPayloads ? payloads : null, freq); } + @Override + public ImpactsEnum impacts(int flags) throws IOException { + return new SlowImpactsEnum(postings(null, flags)); + } + }; } diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java index f416627c1e0..d6bf911e572 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java @@ -498,14 +498,10 @@ public class TermVectorsRequest extends SingleShardRequest i if (in.readBoolean()) { doc = in.readBytesReference(); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - xContentType = in.readEnum(XContentType.class); - } else { - xContentType = XContentHelper.xContentType(doc); - } + xContentType = in.readEnum(XContentType.class); } routing = in.readOptionalString(); - + if (in.getVersion().before(Version.V_7_0_0_alpha1)) { in.readOptionalString(); // _parent } @@ -546,9 +542,7 @@ public class TermVectorsRequest extends SingleShardRequest i out.writeBoolean(doc != null); if (doc != null) { out.writeBytesReference(doc); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeEnum(xContentType); - } + out.writeEnum(xContentType); } out.writeOptionalString(routing); if (out.getVersion().before(Version.V_7_0_0_alpha1)) { diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java index 8a54406c1f9..9aca80b533f 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java @@ -112,13 +112,17 @@ final class TermVectorsWriter { // get the doc frequency if (dfs != null) { final TermStatistics statistics = dfs.termStatistics().get(term); - writeTermStatistics(statistics == null ? new TermStatistics(termBytesRef, 0, 0) : statistics); + if (statistics == null) { + writeMissingTermStatistics(); + } else { + writeTermStatistics(statistics); + } } else { boolean foundTerm = topLevelIterator.seekExact(termBytesRef); if (foundTerm) { writeTermStatistics(topLevelIterator); } else { - writeTermStatistics(new TermStatistics(termBytesRef, 0, 0)); + writeMissingTermStatistics(); } } } @@ -239,6 +243,11 @@ final class TermVectorsWriter { output.writeBytes(term.bytes, term.offset, term.length); } + private void writeMissingTermStatistics() throws IOException { + writePotentiallyNegativeVInt(0); + writePotentiallyNegativeVInt(0); + } + private void writeTermStatistics(TermsEnum topLevelIterator) throws IOException { int docFreq = topLevelIterator.docFreq(); assert (docFreq >= -1); diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index bc2fe747c03..2694baf2c39 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -37,7 +37,6 @@ import org.elasticsearch.common.inject.CreationException; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.logging.NodeNamePatternConverter; import org.elasticsearch.common.network.IfConfig; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureSettings; @@ -217,6 +216,11 @@ final class Bootstrap { final BoundTransportAddress boundTransportAddress, List checks) throws NodeValidationException { BootstrapChecks.check(context, boundTransportAddress, checks); } + + @Override + protected void registerDerivedNodeNameWithLogger(String nodeName) { + LogConfigurator.setNodeName(nodeName); + } }; } @@ -289,9 +293,9 @@ final class Bootstrap { final SecureSettings keystore = loadSecureSettings(initialEnv); final Environment environment = createEnvironment(pidFile, keystore, initialEnv.settings(), initialEnv.configFile()); - String nodeName = Node.NODE_NAME_SETTING.get(environment.settings()); - NodeNamePatternConverter.setNodeName(nodeName); - + if (Node.NODE_NAME_SETTING.exists(environment.settings())) { + LogConfigurator.setNodeName(Node.NODE_NAME_SETTING.get(environment.settings())); + } try { LogConfigurator.configure(environment); } catch (IOException e) { diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index 1a028042db2..c5a8e806f41 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.index.IndexModule; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.process.ProcessProbe; import org.elasticsearch.node.NodeValidationException; @@ -393,17 +394,22 @@ final class BootstrapChecks { static class MaxMapCountCheck implements BootstrapCheck { - private static final long LIMIT = 1 << 18; + static final long LIMIT = 1 << 18; @Override - public BootstrapCheckResult check(BootstrapContext context) { - if (getMaxMapCount() != -1 && getMaxMapCount() < LIMIT) { - final String message = String.format( - Locale.ROOT, - "max virtual memory areas vm.max_map_count [%d] is too low, increase to at least [%d]", - getMaxMapCount(), - LIMIT); - return BootstrapCheckResult.failure(message); + public BootstrapCheckResult check(final BootstrapContext context) { + // we only enforce the check if mmapfs is an allowed store type + if (IndexModule.NODE_STORE_ALLOW_MMAPFS.get(context.settings)) { + if (getMaxMapCount() != -1 && getMaxMapCount() < LIMIT) { + final String message = String.format( + Locale.ROOT, + "max virtual memory areas vm.max_map_count [%d] is too low, increase to at least [%d]", + getMaxMapCount(), + LIMIT); + return BootstrapCheckResult.failure(message); + } else { + return BootstrapCheckResult.success(); + } } else { return BootstrapCheckResult.success(); } diff --git a/server/src/main/java/org/elasticsearch/client/Client.java b/server/src/main/java/org/elasticsearch/client/Client.java index adb2f509b99..f97f618347a 100644 --- a/server/src/main/java/org/elasticsearch/client/Client.java +++ b/server/src/main/java/org/elasticsearch/client/Client.java @@ -455,7 +455,7 @@ public interface Client extends ElasticsearchClient, Releasable { /** * Builder for the field capabilities request. */ - FieldCapabilitiesRequestBuilder prepareFieldCaps(); + FieldCapabilitiesRequestBuilder prepareFieldCaps(String... indices); /** * An action that returns the field capabilities from the provided request diff --git a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 86d9d2c445f..553c92e6de8 100644 --- a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -651,8 +651,8 @@ public abstract class AbstractClient extends AbstractComponent implements Client } @Override - public FieldCapabilitiesRequestBuilder prepareFieldCaps() { - return new FieldCapabilitiesRequestBuilder(this, FieldCapabilitiesAction.INSTANCE); + public FieldCapabilitiesRequestBuilder prepareFieldCaps(String... indices) { + return new FieldCapabilitiesRequestBuilder(this, FieldCapabilitiesAction.INSTANCE, indices); } static class Admin implements AdminClient { diff --git a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java index ba18105e3f1..39829615fb3 100644 --- a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -19,7 +19,6 @@ package org.elasticsearch.client.transport; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionModule; @@ -44,6 +43,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.node.InternalSettingsPreparer; @@ -146,7 +146,8 @@ public abstract class TransportClient extends AbstractClient { for (final ExecutorBuilder builder : threadPool.builders()) { additionalSettings.addAll(builder.getRegisteredSettings()); } - SettingsModule settingsModule = new SettingsModule(settings, additionalSettings, additionalSettingsFilter); + SettingsModule settingsModule = + new SettingsModule(settings, additionalSettings, additionalSettingsFilter, Collections.emptySet()); SearchModule searchModule = new SearchModule(settings, true, pluginsService.filterPlugins(SearchPlugin.class)); IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 8362198a12c..7996059a099 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -25,7 +25,6 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService; import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; @@ -51,10 +50,10 @@ import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocatio import org.elasticsearch.cluster.routing.allocation.decider.RebalanceOnlyWhenActiveAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ResizeAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.RestoreInProgressAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider; -import org.elasticsearch.cluster.routing.allocation.decider.RestoreInProgressAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseField; @@ -268,7 +267,6 @@ public class ClusterModule extends AbstractModule { bind(AllocationService.class).toInstance(allocationService); bind(ClusterService.class).toInstance(clusterService); bind(NodeConnectionsService.class).asEagerSingleton(); - bind(MetaDataCreateIndexService.class).asEagerSingleton(); bind(MetaDataDeleteIndexService.class).asEagerSingleton(); bind(MetaDataIndexStateService.class).asEagerSingleton(); bind(MetaDataMappingService.class).asEagerSingleton(); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 7a377d10090..2f7c4b12edd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -318,7 +318,7 @@ public class ClusterState implements ToXContentFragment, Diffable final String TAB = " "; for (IndexMetaData indexMetaData : metaData) { sb.append(TAB).append(indexMetaData.getIndex()); - sb.append(": v[").append(indexMetaData.getVersion()).append("]\n"); + sb.append(": v[").append(indexMetaData.getVersion()).append("], mv[").append(indexMetaData.getMappingVersion()).append("]\n"); for (int shard = 0; shard < indexMetaData.getNumberOfShards(); shard++) { sb.append(TAB).append(TAB).append(shard).append(": "); sb.append("p_term [").append(indexMetaData.primaryTerm(shard)).append("], "); diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java index 234d1ef9f17..0134b798c72 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java @@ -40,8 +40,6 @@ import java.util.Objects; public class SnapshotDeletionsInProgress extends AbstractNamedDiffable implements Custom { public static final String TYPE = "snapshot_deletions"; - // the version where SnapshotDeletionsInProgress was introduced - public static final Version VERSION_INTRODUCED = Version.V_5_2_0; // the list of snapshot deletion request entries private final List entries; @@ -135,7 +133,7 @@ public class SnapshotDeletionsInProgress extends AbstractNamedDiffable i @Override public Version getMinimalSupportedVersion() { - return VERSION_INTRODUCED; + return Version.CURRENT.minimumCompatibilityVersion(); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 87563c968af..565c5134d1b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -48,12 +48,6 @@ import java.util.Map; public class SnapshotsInProgress extends AbstractNamedDiffable implements Custom { public static final String TYPE = "snapshots"; - // denotes an undefined repository state id, which will happen when receiving a cluster state with - // a snapshot in progress from a pre 5.2.x node - public static final long UNDEFINED_REPOSITORY_STATE_ID = -2L; - // the version where repository state ids were introduced - private static final Version REPOSITORY_ID_INTRODUCED_VERSION = Version.V_5_2_0; - @Override public boolean equals(Object o) { if (this == o) return true; @@ -432,10 +426,7 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implement builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState, reason)); } } - long repositoryStateId = UNDEFINED_REPOSITORY_STATE_ID; - if (in.getVersion().onOrAfter(REPOSITORY_ID_INTRODUCED_VERSION)) { - repositoryStateId = in.readLong(); - } + long repositoryStateId = in.readLong(); entries[i] = new Entry(snapshot, includeGlobalState, partial, @@ -471,9 +462,7 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implement out.writeByte(shardEntry.value.state().value()); } } - if (out.getVersion().onOrAfter(REPOSITORY_ID_INTRODUCED_VERSION)) { - out.writeLong(entry.repositoryStateId); - } + out.writeLong(entry.repositoryStateId); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java index efbd262b16d..fc09741f4d9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java +++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.block; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -138,11 +137,7 @@ public class ClusterBlock implements Streamable, ToXContentFragment { retryable = in.readBoolean(); disableStatePersistence = in.readBoolean(); status = RestStatus.readFrom(in); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - allowReleaseResources = in.readBoolean(); - } else { - allowReleaseResources = false; - } + allowReleaseResources = in.readBoolean(); } @Override @@ -156,9 +151,7 @@ public class ClusterBlock implements Streamable, ToXContentFragment { out.writeBoolean(retryable); out.writeBoolean(disableStatePersistence); RestStatus.writeTo(out, status); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeBoolean(allowReleaseResources); - } + out.writeBoolean(allowReleaseResources); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java index 33e1687e241..766b35307cd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -46,7 +45,6 @@ import static org.elasticsearch.index.query.AbstractQueryBuilder.parseInnerQuery */ public class AliasValidator extends AbstractComponent { - @Inject public AliasValidator(Settings settings) { super(settings); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java new file mode 100644 index 00000000000..4aa429f5704 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java @@ -0,0 +1,188 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.Diffable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * This is a {@code Map} that implements AbstractDiffable so it + * can be used for cluster state purposes + */ +public class DiffableStringMap extends AbstractMap implements Diffable { + + private final Map innerMap; + + DiffableStringMap(final Map map) { + this.innerMap = map; + } + + @SuppressWarnings("unchecked") + DiffableStringMap(final StreamInput in) throws IOException { + this.innerMap = (Map) (Map) in.readMap(); + } + + @Override + public String put(String key, String value) { + return innerMap.put(key, value); + } + + @Override + public Set> entrySet() { + return innerMap.entrySet(); + } + + @Override + @SuppressWarnings("unchecked") + public void writeTo(StreamOutput out) throws IOException { + out.writeMap((Map) (Map) innerMap); + } + + @Override + public Diff diff(DiffableStringMap previousState) { + return new DiffableStringMapDiff(previousState, this); + } + + public static Diff readDiffFrom(StreamInput in) throws IOException { + return new DiffableStringMapDiff(in); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj instanceof DiffableStringMap) { + DiffableStringMap other = (DiffableStringMap) obj; + return innerMap.equals(other.innerMap); + } else if (obj instanceof Map) { + Map other = (Map) obj; + return innerMap.equals(other); + } else { + return false; + } + } + + @Override + public int hashCode() { + return innerMap.hashCode(); + } + + @Override + public String toString() { + return "DiffableStringMap[" + innerMap.toString() + "]"; + } + + /** + * Represents differences between two DiffableStringMaps. + */ + public static class DiffableStringMapDiff implements Diff { + + private final List deletes; + private final Map upserts; // diffs also become upserts + + private DiffableStringMapDiff(DiffableStringMap before, DiffableStringMap after) { + final List tempDeletes = new ArrayList<>(); + final Map tempUpserts = new HashMap<>(); + for (String key : before.keySet()) { + if (after.containsKey(key) == false) { + tempDeletes.add(key); + } + } + + for (Map.Entry partIter : after.entrySet()) { + String beforePart = before.get(partIter.getKey()); + if (beforePart == null) { + tempUpserts.put(partIter.getKey(), partIter.getValue()); + } else if (partIter.getValue().equals(beforePart) == false) { + tempUpserts.put(partIter.getKey(), partIter.getValue()); + } + } + deletes = tempDeletes; + upserts = tempUpserts; + } + + private DiffableStringMapDiff(StreamInput in) throws IOException { + deletes = new ArrayList<>(); + upserts = new HashMap<>(); + int deletesCount = in.readVInt(); + for (int i = 0; i < deletesCount; i++) { + deletes.add(in.readString()); + } + int upsertsCount = in.readVInt(); + for (int i = 0; i < upsertsCount; i++) { + String key = in.readString(); + String newValue = in.readString(); + upserts.put(key, newValue); + } + } + + public List getDeletes() { + return deletes; + } + + public Map> getDiffs() { + return Collections.emptyMap(); + } + + public Map getUpserts() { + return upserts; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(deletes.size()); + for (String delete : deletes) { + out.writeString(delete); + } + out.writeVInt(upserts.size()); + for (Map.Entry entry : upserts.entrySet()) { + out.writeString(entry.getKey()); + out.writeString(entry.getValue()); + } + } + + @Override + public DiffableStringMap apply(DiffableStringMap part) { + Map builder = new HashMap<>(part.innerMap); + List deletes = getDeletes(); + for (String delete : deletes) { + builder.remove(delete); + } + assert getDiffs().size() == 0 : "there should never be diffs for DiffableStringMap"; + + for (Map.Entry upsert : upserts.entrySet()) { + builder.put(upsert.getKey(), upsert.getValue()); + } + return new DiffableStringMap(builder); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 18b89db72a3..54089abae7e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -23,7 +23,7 @@ import com.carrotsearch.hppc.LongArrayList; import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - +import org.elasticsearch.Assertions; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; import org.elasticsearch.action.support.ActiveShardCount; @@ -45,6 +45,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -64,7 +65,6 @@ import java.time.ZonedDateTime; import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; -import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.Locale; @@ -80,59 +80,6 @@ import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; public class IndexMetaData implements Diffable, ToXContentFragment { - /** - * This class will be removed in v7.0 - */ - @Deprecated - public interface Custom extends Diffable, ToXContent { - - String type(); - - Custom fromMap(Map map) throws IOException; - - Custom fromXContent(XContentParser parser) throws IOException; - - /** - * Reads the {@link org.elasticsearch.cluster.Diff} from StreamInput - */ - Diff readDiffFrom(StreamInput in) throws IOException; - - /** - * Reads an object of this type from the provided {@linkplain StreamInput}. The receiving instance remains unchanged. - */ - Custom readFrom(StreamInput in) throws IOException; - - /** - * Merges from this to another, with this being more important, i.e., if something exists in this and another, - * this will prevail. - */ - Custom mergeWith(Custom another); - } - - public static Map customPrototypes = new HashMap<>(); - - /** - * Register a custom index meta data factory. Make sure to call it from a static block. - */ - public static void registerPrototype(String type, Custom proto) { - customPrototypes.put(type, proto); - } - - @Nullable - public static T lookupPrototype(String type) { - //noinspection unchecked - return (T) customPrototypes.get(type); - } - - public static T lookupPrototypeSafe(String type) { - //noinspection unchecked - T proto = (T) customPrototypes.get(type); - if (proto == null) { - throw new IllegalArgumentException("No custom metadata prototype registered for type [" + type + "]"); - } - return proto; - } - public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock(5, "index read-only (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); public static final ClusterBlock INDEX_READ_BLOCK = new ClusterBlock(7, "index read (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.READ)); public static final ClusterBlock INDEX_WRITE_BLOCK = new ClusterBlock(8, "index write (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE)); @@ -237,6 +184,10 @@ public class IndexMetaData implements Diffable, ToXContentFragmen Setting.boolSetting(SETTING_READ_ONLY_ALLOW_DELETE, false, Property.Dynamic, Property.IndexScope); public static final String SETTING_VERSION_CREATED = "index.version.created"; + + public static final Setting SETTING_INDEX_VERSION_CREATED = + Setting.versionSetting(SETTING_VERSION_CREATED, Version.V_EMPTY, Property.IndexScope, Property.PrivateIndex); + public static final String SETTING_VERSION_CREATED_STRING = "index.version.created_string"; public static final String SETTING_VERSION_UPGRADED = "index.version.upgraded"; public static final String SETTING_VERSION_UPGRADED_STRING = "index.version.upgraded_string"; @@ -291,6 +242,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen public static final String KEY_IN_SYNC_ALLOCATIONS = "in_sync_allocations"; static final String KEY_VERSION = "version"; + static final String KEY_MAPPING_VERSION = "mapping_version"; static final String KEY_ROUTING_NUM_SHARDS = "routing_num_shards"; static final String KEY_SETTINGS = "settings"; static final String KEY_STATE = "state"; @@ -309,6 +261,9 @@ public class IndexMetaData implements Diffable, ToXContentFragmen private final Index index; private final long version; + + private final long mappingVersion; + private final long[] primaryTerms; private final State state; @@ -319,7 +274,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen private final ImmutableOpenMap mappings; - private final ImmutableOpenMap customs; + private final ImmutableOpenMap customData; private final ImmutableOpenIntMap> inSyncAllocationIds; @@ -336,15 +291,17 @@ public class IndexMetaData implements Diffable, ToXContentFragmen private final ActiveShardCount waitForActiveShards; private final ImmutableOpenMap rolloverInfos; - private IndexMetaData(Index index, long version, long[] primaryTerms, State state, int numberOfShards, int numberOfReplicas, Settings settings, + private IndexMetaData(Index index, long version, long mappingVersion, long[] primaryTerms, State state, int numberOfShards, int numberOfReplicas, Settings settings, ImmutableOpenMap mappings, ImmutableOpenMap aliases, - ImmutableOpenMap customs, ImmutableOpenIntMap> inSyncAllocationIds, + ImmutableOpenMap customData, ImmutableOpenIntMap> inSyncAllocationIds, DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters initialRecoveryFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters, Version indexCreatedVersion, Version indexUpgradedVersion, int routingNumShards, int routingPartitionSize, ActiveShardCount waitForActiveShards, ImmutableOpenMap rolloverInfos) { this.index = index; this.version = version; + assert mappingVersion >= 0 : mappingVersion; + this.mappingVersion = mappingVersion; this.primaryTerms = primaryTerms; assert primaryTerms.length == numberOfShards; this.state = state; @@ -353,7 +310,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen this.totalNumberOfShards = numberOfShards * (numberOfReplicas + 1); this.settings = settings; this.mappings = mappings; - this.customs = customs; + this.customData = customData; this.aliases = aliases; this.inSyncAllocationIds = inSyncAllocationIds; this.requireFilters = requireFilters; @@ -394,6 +351,9 @@ public class IndexMetaData implements Diffable, ToXContentFragmen return this.version; } + public long getMappingVersion() { + return mappingVersion; + } /** * The term of the current selected primary. This is a non-negative number incremented when @@ -475,22 +435,14 @@ public class IndexMetaData implements Diffable, ToXContentFragmen return mappings.get(mappingType); } - // we keep the shrink settings for BWC - this can be removed in 8.0 - // we can't remove in 7 since this setting might be baked into an index coming in via a full cluster restart from 6.0 - public static final String INDEX_SHRINK_SOURCE_UUID_KEY = "index.shrink.source.uuid"; - public static final String INDEX_SHRINK_SOURCE_NAME_KEY = "index.shrink.source.name"; public static final String INDEX_RESIZE_SOURCE_UUID_KEY = "index.resize.source.uuid"; public static final String INDEX_RESIZE_SOURCE_NAME_KEY = "index.resize.source.name"; - public static final Setting INDEX_SHRINK_SOURCE_UUID = Setting.simpleString(INDEX_SHRINK_SOURCE_UUID_KEY); - public static final Setting INDEX_SHRINK_SOURCE_NAME = Setting.simpleString(INDEX_SHRINK_SOURCE_NAME_KEY); - public static final Setting INDEX_RESIZE_SOURCE_UUID = Setting.simpleString(INDEX_RESIZE_SOURCE_UUID_KEY, - INDEX_SHRINK_SOURCE_UUID); - public static final Setting INDEX_RESIZE_SOURCE_NAME = Setting.simpleString(INDEX_RESIZE_SOURCE_NAME_KEY, - INDEX_SHRINK_SOURCE_NAME); + public static final Setting INDEX_RESIZE_SOURCE_UUID = Setting.simpleString(INDEX_RESIZE_SOURCE_UUID_KEY); + public static final Setting INDEX_RESIZE_SOURCE_NAME = Setting.simpleString(INDEX_RESIZE_SOURCE_NAME_KEY); public Index getResizeSourceIndex() { - return INDEX_RESIZE_SOURCE_UUID.exists(settings) || INDEX_SHRINK_SOURCE_UUID.exists(settings) - ? new Index(INDEX_RESIZE_SOURCE_NAME.get(settings), INDEX_RESIZE_SOURCE_UUID.get(settings)) : null; + return INDEX_RESIZE_SOURCE_UUID.exists(settings) ? new Index(INDEX_RESIZE_SOURCE_NAME.get(settings), + INDEX_RESIZE_SOURCE_UUID.get(settings)) : null; } /** @@ -509,13 +461,12 @@ public class IndexMetaData implements Diffable, ToXContentFragmen return mappings.get(MapperService.DEFAULT_MAPPING); } - public ImmutableOpenMap getCustoms() { - return this.customs; + ImmutableOpenMap getCustomData() { + return this.customData; } - @SuppressWarnings("unchecked") - public T custom(String type) { - return (T) customs.get(type); + public Map getCustomData(final String key) { + return Collections.unmodifiableMap(this.customData.get(key)); } public ImmutableOpenIntMap> getInSyncAllocationIds() { @@ -581,7 +532,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen if (state != that.state) { return false; } - if (!customs.equals(that.customs)) { + if (!customData.equals(that.customData)) { return false; } if (routingNumShards != that.routingNumShards) { @@ -610,7 +561,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen result = 31 * result + aliases.hashCode(); result = 31 * result + settings.hashCode(); result = 31 * result + mappings.hashCode(); - result = 31 * result + customs.hashCode(); + result = 31 * result + customData.hashCode(); result = 31 * result + Long.hashCode(routingFactor); result = 31 * result + Long.hashCode(routingNumShards); result = 31 * result + Arrays.hashCode(primaryTerms); @@ -644,25 +595,27 @@ public class IndexMetaData implements Diffable, ToXContentFragmen private final String index; private final int routingNumShards; private final long version; + private final long mappingVersion; private final long[] primaryTerms; private final State state; private final Settings settings; private final Diff> mappings; private final Diff> aliases; - private final Diff> customs; + private final Diff> customData; private final Diff>> inSyncAllocationIds; private final Diff> rolloverInfos; IndexMetaDataDiff(IndexMetaData before, IndexMetaData after) { index = after.index.getName(); version = after.version; + mappingVersion = after.mappingVersion; routingNumShards = after.routingNumShards; state = after.state; settings = after.settings; primaryTerms = after.primaryTerms; mappings = DiffableUtils.diff(before.mappings, after.mappings, DiffableUtils.getStringKeySerializer()); aliases = DiffableUtils.diff(before.aliases, after.aliases, DiffableUtils.getStringKeySerializer()); - customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer()); + customData = DiffableUtils.diff(before.customData, after.customData, DiffableUtils.getStringKeySerializer()); inSyncAllocationIds = DiffableUtils.diff(before.inSyncAllocationIds, after.inSyncAllocationIds, DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance()); rolloverInfos = DiffableUtils.diff(before.rolloverInfos, after.rolloverInfos, DiffableUtils.getStringKeySerializer()); @@ -672,6 +625,11 @@ public class IndexMetaData implements Diffable, ToXContentFragmen index = in.readString(); routingNumShards = in.readInt(); version = in.readLong(); + if (in.getVersion().onOrAfter(Version.V_6_5_0)) { + mappingVersion = in.readVLong(); + } else { + mappingVersion = 1; + } state = State.fromId(in.readByte()); settings = Settings.readSettingsFromStream(in); primaryTerms = in.readVLongArray(); @@ -679,18 +637,8 @@ public class IndexMetaData implements Diffable, ToXContentFragmen MappingMetaData::readDiffFrom); aliases = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), AliasMetaData::new, AliasMetaData::readDiffFrom); - customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), - new DiffableUtils.DiffableValueSerializer() { - @Override - public Custom read(StreamInput in, String key) throws IOException { - return lookupPrototypeSafe(key).readFrom(in); - } - - @Override - public Diff readDiff(StreamInput in, String key) throws IOException { - return lookupPrototypeSafe(key).readDiffFrom(in); - } - }); + customData = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), DiffableStringMap::new, + DiffableStringMap::readDiffFrom); inSyncAllocationIds = DiffableUtils.readImmutableOpenIntMapDiff(in, DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance()); if (in.getVersion().onOrAfter(Version.V_6_4_0)) { @@ -707,12 +655,15 @@ public class IndexMetaData implements Diffable, ToXContentFragmen out.writeString(index); out.writeInt(routingNumShards); out.writeLong(version); + if (out.getVersion().onOrAfter(Version.V_6_5_0)) { + out.writeVLong(mappingVersion); + } out.writeByte(state.id); Settings.writeSettingsToStream(settings, out); out.writeVLongArray(primaryTerms); mappings.writeTo(out); aliases.writeTo(out); - customs.writeTo(out); + customData.writeTo(out); inSyncAllocationIds.writeTo(out); if (out.getVersion().onOrAfter(Version.V_6_4_0)) { rolloverInfos.writeTo(out); @@ -723,13 +674,14 @@ public class IndexMetaData implements Diffable, ToXContentFragmen public IndexMetaData apply(IndexMetaData part) { Builder builder = builder(index); builder.version(version); + builder.mappingVersion(mappingVersion); builder.setRoutingNumShards(routingNumShards); builder.state(state); builder.settings(settings); builder.primaryTerms(primaryTerms); builder.mappings.putAll(mappings.apply(part.mappings)); builder.aliases.putAll(aliases.apply(part.aliases)); - builder.customs.putAll(customs.apply(part.customs)); + builder.customMetaData.putAll(customData.apply(part.customData)); builder.inSyncAllocationIds.putAll(inSyncAllocationIds.apply(part.inSyncAllocationIds)); builder.rolloverInfos.putAll(rolloverInfos.apply(part.rolloverInfos)); return builder.build(); @@ -739,6 +691,11 @@ public class IndexMetaData implements Diffable, ToXContentFragmen public static IndexMetaData readFrom(StreamInput in) throws IOException { Builder builder = new Builder(in.readString()); builder.version(in.readLong()); + if (in.getVersion().onOrAfter(Version.V_6_5_0)) { + builder.mappingVersion(in.readVLong()); + } else { + builder.mappingVersion(1); + } builder.setRoutingNumShards(in.readInt()); builder.state(State.fromId(in.readByte())); builder.settings(readSettingsFromStream(in)); @@ -754,10 +711,17 @@ public class IndexMetaData implements Diffable, ToXContentFragmen builder.putAlias(aliasMd); } int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); + if (in.getVersion().onOrAfter(Version.V_6_5_0)) { + for (int i = 0; i < customSize; i++) { + String key = in.readString(); + DiffableStringMap custom = new DiffableStringMap(in); + builder.putCustom(key, custom); + } + } else { + assert customSize == 0 : "expected no custom index metadata"; + if (customSize > 0) { + throw new IllegalStateException("unexpected custom metadata when none is supported"); + } } int inSyncAllocationIdsSize = in.readVInt(); for (int i = 0; i < inSyncAllocationIdsSize; i++) { @@ -778,6 +742,9 @@ public class IndexMetaData implements Diffable, ToXContentFragmen public void writeTo(StreamOutput out) throws IOException { out.writeString(index.getName()); // uuid will come as part of settings out.writeLong(version); + if (out.getVersion().onOrAfter(Version.V_6_5_0)) { + out.writeVLong(mappingVersion); + } out.writeInt(routingNumShards); out.writeByte(state.id()); writeSettingsToStream(settings, out); @@ -790,10 +757,14 @@ public class IndexMetaData implements Diffable, ToXContentFragmen for (ObjectCursor cursor : aliases.values()) { cursor.value.writeTo(out); } - out.writeVInt(customs.size()); - for (ObjectObjectCursor cursor : customs) { - out.writeString(cursor.key); - cursor.value.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_6_5_0)) { + out.writeVInt(customData.size()); + for (final ObjectObjectCursor cursor : customData) { + out.writeString(cursor.key); + cursor.value.writeTo(out); + } + } else { + out.writeVInt(0); } out.writeVInt(inSyncAllocationIds.size()); for (IntObjectCursor> cursor : inSyncAllocationIds) { @@ -821,11 +792,12 @@ public class IndexMetaData implements Diffable, ToXContentFragmen private String index; private State state = State.OPEN; private long version = 1; + private long mappingVersion = 1; private long[] primaryTerms = null; private Settings settings = Settings.Builder.EMPTY_SETTINGS; private final ImmutableOpenMap.Builder mappings; private final ImmutableOpenMap.Builder aliases; - private final ImmutableOpenMap.Builder customs; + private final ImmutableOpenMap.Builder customMetaData; private final ImmutableOpenIntMap.Builder> inSyncAllocationIds; private final ImmutableOpenMap.Builder rolloverInfos; private Integer routingNumShards; @@ -834,7 +806,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen this.index = index; this.mappings = ImmutableOpenMap.builder(); this.aliases = ImmutableOpenMap.builder(); - this.customs = ImmutableOpenMap.builder(); + this.customMetaData = ImmutableOpenMap.builder(); this.inSyncAllocationIds = ImmutableOpenIntMap.builder(); this.rolloverInfos = ImmutableOpenMap.builder(); } @@ -843,11 +815,12 @@ public class IndexMetaData implements Diffable, ToXContentFragmen this.index = indexMetaData.getIndex().getName(); this.state = indexMetaData.state; this.version = indexMetaData.version; + this.mappingVersion = indexMetaData.mappingVersion; this.settings = indexMetaData.getSettings(); this.primaryTerms = indexMetaData.primaryTerms.clone(); this.mappings = ImmutableOpenMap.builder(indexMetaData.mappings); this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases); - this.customs = ImmutableOpenMap.builder(indexMetaData.customs); + this.customMetaData = ImmutableOpenMap.builder(indexMetaData.customData); this.routingNumShards = indexMetaData.routingNumShards; this.inSyncAllocationIds = ImmutableOpenIntMap.builder(indexMetaData.inSyncAllocationIds); this.rolloverInfos = ImmutableOpenMap.builder(indexMetaData.rolloverInfos); @@ -977,8 +950,8 @@ public class IndexMetaData implements Diffable, ToXContentFragmen return this; } - public Builder putCustom(String type, Custom customIndexMetaData) { - this.customs.put(type, customIndexMetaData); + public Builder putCustom(String type, Map customIndexMetaData) { + this.customMetaData.put(type, new DiffableStringMap(customIndexMetaData)); return this; } @@ -1009,6 +982,15 @@ public class IndexMetaData implements Diffable, ToXContentFragmen return this; } + public long mappingVersion() { + return mappingVersion; + } + + public Builder mappingVersion(final long mappingVersion) { + this.mappingVersion = mappingVersion; + return this; + } + /** * returns the primary term for the given shard. * See {@link IndexMetaData#primaryTerm(int)} for more information. @@ -1136,8 +1118,8 @@ public class IndexMetaData implements Diffable, ToXContentFragmen final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE); - return new IndexMetaData(new Index(index, uuid), version, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(), - tmpAliases.build(), customs.build(), filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters, + return new IndexMetaData(new Index(index, uuid), version, mappingVersion, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(), + tmpAliases.build(), customMetaData.build(), filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters, indexCreatedVersion, indexUpgradedVersion, getRoutingNumShards(), routingPartitionSize, waitForActiveShards, rolloverInfos.build()); } @@ -1145,6 +1127,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen builder.startObject(indexMetaData.getIndex().getName()); builder.field(KEY_VERSION, indexMetaData.getVersion()); + builder.field(KEY_MAPPING_VERSION, indexMetaData.getMappingVersion()); builder.field(KEY_ROUTING_NUM_SHARDS, indexMetaData.getRoutingNumShards()); builder.field(KEY_STATE, indexMetaData.getState().toString().toLowerCase(Locale.ENGLISH)); @@ -1164,10 +1147,9 @@ public class IndexMetaData implements Diffable, ToXContentFragmen } builder.endArray(); - for (ObjectObjectCursor cursor : indexMetaData.getCustoms()) { - builder.startObject(cursor.key); - cursor.value.toXContent(builder, params); - builder.endObject(); + for (ObjectObjectCursor cursor : indexMetaData.customData) { + builder.field(cursor.key); + builder.map(cursor.value); } builder.startObject(KEY_ALIASES); @@ -1218,6 +1200,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen if (token != XContentParser.Token.START_OBJECT) { throw new IllegalArgumentException("expected object but got a " + token); } + boolean mappingVersion = false; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); @@ -1275,15 +1258,8 @@ public class IndexMetaData implements Diffable, ToXContentFragmen assert Version.CURRENT.major <= 5; parser.skipChildren(); } else { - // check if its a custom index metadata - Custom proto = lookupPrototype(currentFieldName); - if (proto == null) { - //TODO warn - parser.skipChildren(); - } else { - Custom custom = proto.fromXContent(parser); - builder.putCustom(custom.type(), custom); - } + // assume it's custom index metadata + builder.putCustom(currentFieldName, parser.mapStrings()); } } else if (token == XContentParser.Token.START_ARRAY) { if (KEY_MAPPINGS.equals(currentFieldName)) { @@ -1316,6 +1292,9 @@ public class IndexMetaData implements Diffable, ToXContentFragmen builder.state(State.fromString(parser.text())); } else if (KEY_VERSION.equals(currentFieldName)) { builder.version(parser.longValue()); + } else if (KEY_MAPPING_VERSION.equals(currentFieldName)) { + mappingVersion = true; + builder.mappingVersion(parser.longValue()); } else if (KEY_ROUTING_NUM_SHARDS.equals(currentFieldName)) { builder.setRoutingNumShards(parser.intValue()); } else { @@ -1325,6 +1304,9 @@ public class IndexMetaData implements Diffable, ToXContentFragmen throw new IllegalArgumentException("Unexpected token " + token); } } + if (Assertions.ENABLED && Version.indexCreated(builder.settings).onOrAfter(Version.V_6_5_0)) { + assert mappingVersion : "mapping version should be present for indices created on or after 6.5.0"; + } return builder.build(); } } @@ -1335,8 +1317,8 @@ public class IndexMetaData implements Diffable, ToXContentFragmen */ public static Settings addHumanReadableSettings(Settings settings) { Settings.Builder builder = Settings.builder().put(settings); - Version version = settings.getAsVersion(SETTING_VERSION_CREATED, null); - if (version != null) { + Version version = SETTING_INDEX_VERSION_CREATED.get(settings); + if (version != Version.V_EMPTY) { builder.put(SETTING_VERSION_CREATED_STRING, version.toString()); } Version versionUpgraded = settings.getAsVersion(SETTING_VERSION_UPGRADED, null); @@ -1365,6 +1347,8 @@ public class IndexMetaData implements Diffable, ToXContentFragmen @Override public IndexMetaData fromXContent(XContentParser parser) throws IOException { + assert parser.getXContentRegistry() != NamedXContentRegistry.EMPTY + : "loading index metadata requires a working named xcontent registry"; return Builder.fromXContent(parser); } }; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java index d35a4baa1e6..7e2d9256303 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java @@ -20,7 +20,7 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; @@ -87,13 +87,10 @@ public class IndexTemplateMetaData extends AbstractDiffable aliases; - private final ImmutableOpenMap customs; - public IndexTemplateMetaData(String name, int order, Integer version, List patterns, Settings settings, ImmutableOpenMap mappings, - ImmutableOpenMap aliases, - ImmutableOpenMap customs) { + ImmutableOpenMap aliases) { if (patterns == null || patterns.isEmpty()) { throw new IllegalArgumentException("Index patterns must not be null or empty; got " + patterns); } @@ -104,7 +101,6 @@ public class IndexTemplateMetaData extends AbstractDiffable customs() { - return this.customs; - } - - public ImmutableOpenMap getCustoms() { - return this.customs; - } - - @SuppressWarnings("unchecked") - public T custom(String type) { - return (T) customs.get(type); - } - public static Builder builder(String name) { return new Builder(name); } @@ -227,11 +210,13 @@ public class IndexTemplateMetaData extends AbstractDiffable 0) { + throw new IllegalStateException("unexpected custom metadata when none is supported"); + } } builder.version(in.readOptionalVInt()); return builder.build(); @@ -260,10 +245,8 @@ public class IndexTemplateMetaData extends AbstractDiffable cursor : aliases.values()) { cursor.value.writeTo(out); } - out.writeVInt(customs.size()); - for (ObjectObjectCursor cursor : customs) { - out.writeString(cursor.key); - cursor.value.writeTo(out); + if (out.getVersion().before(Version.V_6_5_0)) { + out.writeVInt(0); } out.writeOptionalVInt(version); } @@ -272,9 +255,6 @@ public class IndexTemplateMetaData extends AbstractDiffable VALID_FIELDS = Sets.newHashSet( "template", "order", "mappings", "settings", "index_patterns", "aliases", "version"); - static { - VALID_FIELDS.addAll(IndexMetaData.customPrototypes.keySet()); - } private String name; @@ -290,13 +270,10 @@ public class IndexTemplateMetaData extends AbstractDiffable aliases; - private final ImmutableOpenMap.Builder customs; - public Builder(String name) { this.name = name; mappings = ImmutableOpenMap.builder(); aliases = ImmutableOpenMap.builder(); - customs = ImmutableOpenMap.builder(); } public Builder(IndexTemplateMetaData indexTemplateMetaData) { @@ -308,7 +285,6 @@ public class IndexTemplateMetaData extends AbstractDiffable cursor : indexTemplateMetaData.customs()) { - builder.startObject(cursor.key); - cursor.value.toXContent(builder, params); - builder.endObject(); - } - builder.startObject("aliases"); for (ObjectCursor cursor : indexTemplateMetaData.aliases().values()) { AliasMetaData.Builder.toXContent(cursor.value, builder, params); @@ -468,15 +423,7 @@ public class IndexTemplateMetaData extends AbstractDiffable, Diffable, To return ImmutableOpenMap.of(); } - boolean matchAllAliases = matchAllAliases(aliases); + String[] patterns = new String[aliases.length]; + boolean[] include = new boolean[aliases.length]; + for (int i = 0; i < aliases.length; i++) { + String alias = aliases[i]; + if (alias.charAt(0) == '-') { + patterns[i] = alias.substring(1); + include[i] = false; + } else { + patterns[i] = alias; + include[i] = true; + } + } + boolean matchAllAliases = patterns.length == 0; ImmutableOpenMap.Builder> mapBuilder = ImmutableOpenMap.builder(); for (String index : concreteIndices) { IndexMetaData indexMetaData = indices.get(index); List filteredValues = new ArrayList<>(); for (ObjectCursor cursor : indexMetaData.getAliases().values()) { AliasMetaData value = cursor.value; - if (matchAllAliases || Regex.simpleMatch(aliases, value.alias())) { + boolean matched = matchAllAliases; + String alias = value.alias(); + for (int i = 0; i < patterns.length; i++) { + if (include[i]) { + if (matched == false) { + String pattern = patterns[i]; + matched = ALL.equals(pattern) || Regex.simpleMatch(pattern, alias); + } + } else if (matched) { + matched = Regex.simpleMatch(patterns[i], alias) == false; + } + } + if (matched) { filteredValues.add(value); } } @@ -317,15 +341,6 @@ public class MetaData implements Iterable, Diffable, To return mapBuilder.build(); } - private static boolean matchAllAliases(final String[] aliases) { - for (String alias : aliases) { - if (alias.equals(ALL)) { - return true; - } - } - return aliases.length == 0; - } - /** * Checks if at least one of the specified aliases exists in the specified concrete indices. Wildcards are supported in the * alias names for partial matches. diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index b19d65090c6..9466b03c442 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -38,7 +38,6 @@ import org.elasticsearch.cluster.ack.CreateIndexClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlocks; -import org.elasticsearch.cluster.metadata.IndexMetaData.Custom; import org.elasticsearch.cluster.metadata.IndexMetaData.State; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexRoutingTable; @@ -53,7 +52,6 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; @@ -95,7 +93,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_INDEX_UUID; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED; /** * Service responsible for submitting create index requests @@ -112,13 +109,19 @@ public class MetaDataCreateIndexService extends AbstractComponent { private final IndexScopedSettings indexScopedSettings; private final ActiveShardsObserver activeShardsObserver; private final NamedXContentRegistry xContentRegistry; + private final boolean forbidPrivateIndexSettings; - @Inject - public MetaDataCreateIndexService(Settings settings, ClusterService clusterService, - IndicesService indicesService, AllocationService allocationService, - AliasValidator aliasValidator, Environment env, - IndexScopedSettings indexScopedSettings, ThreadPool threadPool, - NamedXContentRegistry xContentRegistry) { + public MetaDataCreateIndexService( + final Settings settings, + final ClusterService clusterService, + final IndicesService indicesService, + final AllocationService allocationService, + final AliasValidator aliasValidator, + final Environment env, + final IndexScopedSettings indexScopedSettings, + final ThreadPool threadPool, + final NamedXContentRegistry xContentRegistry, + final boolean forbidPrivateIndexSettings) { super(settings); this.clusterService = clusterService; this.indicesService = indicesService; @@ -128,6 +131,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { this.indexScopedSettings = indexScopedSettings; this.activeShardsObserver = new ActiveShardsObserver(settings, clusterService, threadPool); this.xContentRegistry = xContentRegistry; + this.forbidPrivateIndexSettings = forbidPrivateIndexSettings; } /** @@ -287,7 +291,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { List templates = MetaDataIndexTemplateService.findTemplates(currentState.metaData(), request.index()); - Map customs = new HashMap<>(); + Map> customs = new HashMap<>(); // add the request mapping Map> mappings = new HashMap<>(); @@ -300,10 +304,6 @@ public class MetaDataCreateIndexService extends AbstractComponent { mappings.put(entry.getKey(), MapperService.parseMapping(xContentRegistry, entry.getValue())); } - for (Map.Entry entry : request.customs().entrySet()) { - customs.put(entry.getKey(), entry.getValue()); - } - final Index recoverFromIndex = request.recoverFrom(); if (recoverFromIndex == null) { @@ -320,18 +320,6 @@ public class MetaDataCreateIndexService extends AbstractComponent { MapperService.parseMapping(xContentRegistry, mappingString)); } } - // handle custom - for (ObjectObjectCursor cursor : template.customs()) { - String type = cursor.key; - IndexMetaData.Custom custom = cursor.value; - IndexMetaData.Custom existing = customs.get(type); - if (existing == null) { - customs.put(type, custom); - } else { - IndexMetaData.Custom merged = existing.mergeWith(custom); - customs.put(type, merged); - } - } //handle aliases for (ObjectObjectCursor cursor : template.aliases()) { AliasMetaData aliasMetaData = cursor.value; @@ -365,10 +353,10 @@ public class MetaDataCreateIndexService extends AbstractComponent { } // now, put the request settings, so they override templates indexSettingsBuilder.put(request.settings()); - if (indexSettingsBuilder.get(SETTING_VERSION_CREATED) == null) { - DiscoveryNodes nodes = currentState.nodes(); + if (indexSettingsBuilder.get(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey()) == null) { + final DiscoveryNodes nodes = currentState.nodes(); final Version createdVersion = Version.min(Version.CURRENT, nodes.getSmallestNonClientNodeVersion()); - indexSettingsBuilder.put(SETTING_VERSION_CREATED, createdVersion); + indexSettingsBuilder.put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), createdVersion); } if (indexSettingsBuilder.get(SETTING_NUMBER_OF_SHARDS) == null) { final int numberOfShards = getNumberOfShards(indexSettingsBuilder); @@ -390,7 +378,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { final Settings idxSettings = indexSettingsBuilder.build(); int numTargetShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(idxSettings); final int routingNumShards; - final Version indexVersionCreated = idxSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null); + final Version indexVersionCreated = IndexMetaData.SETTING_INDEX_VERSION_CREATED.get(idxSettings); final IndexMetaData sourceMetaData = recoverFromIndex == null ? null : currentState.metaData().getIndexSafe(recoverFromIndex); if (sourceMetaData == null || sourceMetaData.getNumberOfShards() == 1) { @@ -519,7 +507,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { indexMetaDataBuilder.putAlias(aliasMetaData); } - for (Map.Entry customEntry : customs.entrySet()) { + for (Map.Entry> customEntry : customs.entrySet()) { indexMetaDataBuilder.putCustom(customEntry.getKey(), customEntry.getValue()); } @@ -576,7 +564,9 @@ public class MetaDataCreateIndexService extends AbstractComponent { // TODO: this logic can be removed when the current major version is 8 assert Version.CURRENT.major == 7; final int numberOfShards; - if (Version.fromId(Integer.parseInt(indexSettingsBuilder.get(SETTING_VERSION_CREATED))).before(Version.V_7_0_0_alpha1)) { + final Version indexVersionCreated = + Version.fromId(Integer.parseInt(indexSettingsBuilder.get(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey()))); + if (indexVersionCreated.before(Version.V_7_0_0_alpha1)) { numberOfShards = 5; } else { numberOfShards = 1; @@ -597,11 +587,12 @@ public class MetaDataCreateIndexService extends AbstractComponent { private void validate(CreateIndexClusterStateUpdateRequest request, ClusterState state) { validateIndexName(request.index(), state); - validateIndexSettings(request.index(), request.settings()); + validateIndexSettings(request.index(), request.settings(), forbidPrivateIndexSettings); } - public void validateIndexSettings(String indexName, Settings settings) throws IndexCreationException { - List validationErrors = getIndexSettingsValidationErrors(settings); + public void validateIndexSettings( + final String indexName, final Settings settings, final boolean forbidPrivateIndexSettings) throws IndexCreationException { + List validationErrors = getIndexSettingsValidationErrors(settings, forbidPrivateIndexSettings); if (validationErrors.isEmpty() == false) { ValidationException validationException = new ValidationException(); validationException.addValidationErrors(validationErrors); @@ -609,7 +600,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { } } - List getIndexSettingsValidationErrors(Settings settings) { + List getIndexSettingsValidationErrors(final Settings settings, final boolean forbidPrivateIndexSettings) { String customPath = IndexMetaData.INDEX_DATA_PATH_SETTING.get(settings); List validationErrors = new ArrayList<>(); if (Strings.isEmpty(customPath) == false && env.sharedDataFile() == null) { @@ -620,6 +611,16 @@ public class MetaDataCreateIndexService extends AbstractComponent { validationErrors.add("custom path [" + customPath + "] is not a sub-path of path.shared_data [" + env.sharedDataFile() + "]"); } } + if (forbidPrivateIndexSettings) { + for (final String key : settings.keySet()) { + final Setting setting = indexScopedSettings.get(key); + if (setting == null) { + assert indexScopedSettings.isPrivateSetting(key); + } else if (setting.isPrivateIndex()) { + validationErrors.add("private index setting [" + key + "] can not be set explicitly"); + } + } + } return validationErrors; } @@ -723,10 +724,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { .put(IndexMetaData.INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.getKey() + "_id", Strings.arrayToCommaDelimitedString(nodesToAllocateOn.toArray())) // we only try once and then give up with a shrink index - .put("index.allocation.max_retries", 1) - // we add the legacy way of specifying it here for BWC. We can remove this once it's backported to 6.x - .put(IndexMetaData.INDEX_SHRINK_SOURCE_NAME.getKey(), resizeSourceIndex.getName()) - .put(IndexMetaData.INDEX_SHRINK_SOURCE_UUID.getKey(), resizeSourceIndex.getUUID()); + .put("index.allocation.max_retries", 1); } else if (type == ResizeType.SPLIT) { validateSplitIndex(currentState, resizeSourceIndex.getName(), mappingKeys, resizeIntoName, indexSettingsBuilder.build()); } else { @@ -751,13 +749,14 @@ public class MetaDataCreateIndexService extends AbstractComponent { } } else { final Predicate sourceSettingsPredicate = - (s) -> (s.startsWith("index.similarity.") || s.startsWith("index.analysis.") || s.startsWith("index.sort.")) + (s) -> (s.startsWith("index.similarity.") || s.startsWith("index.analysis.") || + s.startsWith("index.sort.") || s.equals("index.soft_deletes.enabled")) && indexSettingsBuilder.keys().contains(s) == false; builder.put(sourceMetaData.getSettings().filter(sourceSettingsPredicate)); } indexSettingsBuilder - .put(IndexMetaData.SETTING_VERSION_CREATED, sourceMetaData.getCreationVersion()) + .put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), sourceMetaData.getCreationVersion()) .put(IndexMetaData.SETTING_VERSION_UPGRADED, sourceMetaData.getUpgradedVersion()) .put(builder.build()) .put(IndexMetaData.SETTING_ROUTING_PARTITION_SIZE, sourceMetaData.getRoutingPartitionSize()) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index 507eaf412d5..40d2a697140 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.alias.Alias; @@ -179,9 +178,6 @@ public class MetaDataIndexTemplateService extends AbstractComponent { .indexRouting(alias.indexRouting()).searchRouting(alias.searchRouting()).build(); templateBuilder.putAlias(aliasMetaData); } - for (Map.Entry entry : request.customs.entrySet()) { - templateBuilder.putCustom(entry.getKey(), entry.getValue()); - } IndexTemplateMetaData template = templateBuilder.build(); MetaData.Builder builder = MetaData.builder(currentState.metaData()).put(template); @@ -304,7 +300,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent { validationErrors.add(t.getMessage()); } } - List indexSettingsValidation = metaDataCreateIndexService.getIndexSettingsValidationErrors(request.settings); + List indexSettingsValidation = metaDataCreateIndexService.getIndexSettingsValidationErrors(request.settings, true); validationErrors.addAll(indexSettingsValidation); if (!validationErrors.isEmpty()) { ValidationException validationException = new ValidationException(); @@ -339,7 +335,6 @@ public class MetaDataIndexTemplateService extends AbstractComponent { Settings settings = Settings.Builder.EMPTY_SETTINGS; Map mappings = new HashMap<>(); List aliases = new ArrayList<>(); - Map customs = new HashMap<>(); TimeValue masterTimeout = MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT; @@ -378,11 +373,6 @@ public class MetaDataIndexTemplateService extends AbstractComponent { return this; } - public PutRequest customs(Map customs) { - this.customs.putAll(customs); - return this; - } - public PutRequest putMapping(String mappingType, String mappingSource) { mappings.put(mappingType, mappingSource); return this; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 82d947b4158..616fd13d1fa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -287,6 +287,7 @@ public class MetaDataMappingService extends AbstractComponent { MetaData.Builder builder = MetaData.builder(metaData); boolean updated = false; for (IndexMetaData indexMetaData : updateList) { + boolean updatedMapping = false; // do the actual merge here on the master, and update the mapping source // we use the exact same indexService and metadata we used to validate above here to actually apply the update final Index index = indexMetaData.getIndex(); @@ -303,7 +304,7 @@ public class MetaDataMappingService extends AbstractComponent { if (existingSource.equals(updatedSource)) { // same source, no changes, ignore it } else { - updated = true; + updatedMapping = true; // use the merged mapping source if (logger.isDebugEnabled()) { logger.debug("{} update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource); @@ -313,7 +314,7 @@ public class MetaDataMappingService extends AbstractComponent { } } else { - updated = true; + updatedMapping = true; if (logger.isDebugEnabled()) { logger.debug("{} create_mapping [{}] with source [{}]", index, mappingType, updatedSource); } else if (logger.isInfoEnabled()) { @@ -329,7 +330,16 @@ public class MetaDataMappingService extends AbstractComponent { indexMetaDataBuilder.putMapping(new MappingMetaData(mapper.mappingSource())); } } + if (updatedMapping) { + indexMetaDataBuilder.mappingVersion(1 + indexMetaDataBuilder.mappingVersion()); + } + /* + * This implicitly increments the index metadata version and builds the index metadata. This means that we need to have + * already incremented the mapping version if necessary. Therefore, the mapping version increment must remain before this + * statement. + */ builder.put(indexMetaDataBuilder); + updated |= updatedMapping; } if (updated) { return ClusterState.builder(currentState).metaData(builder).build(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java index 38766c08e08..cc2b13677d1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java @@ -85,7 +85,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent { indexScopedSettings.validate( normalizedSettings.filter(s -> Regex.isSimpleMatchPattern(s) == false), // don't validate wildcards false, // don't validate dependencies here we check it below never allow to change the number of shards - true); // validate internal index settings + true); // validate internal or private index settings for (String key : normalizedSettings.keySet()) { Setting setting = indexScopedSettings.get(key); boolean isWildcard = setting == null && Regex.isSimpleMatchPattern(key); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index 4e7e81def87..e9d805d34c8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -27,10 +27,11 @@ import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.RecoverySource.EmptyStoreRecoverySource; +import org.elasticsearch.cluster.routing.RecoverySource.ExistingStoreRecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.LocalShardsRecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource; -import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.io.stream.StreamInput; @@ -386,7 +387,7 @@ public class IndexRoutingTable extends AbstractDiffable imple if (asNew && ignoreShards.contains(shardNumber)) { // This shards wasn't completely snapshotted - restore it as new shard indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, primary, - primary ? StoreRecoverySource.EMPTY_STORE_INSTANCE : PeerRecoverySource.INSTANCE, unassignedInfo)); + primary ? EmptyStoreRecoverySource.INSTANCE : PeerRecoverySource.INSTANCE, unassignedInfo)); } else { indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, primary, primary ? recoverySource : PeerRecoverySource.INSTANCE, unassignedInfo)); @@ -410,13 +411,13 @@ public class IndexRoutingTable extends AbstractDiffable imple final RecoverySource primaryRecoverySource; if (indexMetaData.inSyncAllocationIds(shardNumber).isEmpty() == false) { // we have previous valid copies for this shard. use them for recovery - primaryRecoverySource = StoreRecoverySource.EXISTING_STORE_INSTANCE; + primaryRecoverySource = ExistingStoreRecoverySource.INSTANCE; } else if (indexMetaData.getResizeSourceIndex() != null) { // this is a new index but the initial shards should merged from another index primaryRecoverySource = LocalShardsRecoverySource.INSTANCE; } else { // a freshly created index with no restriction - primaryRecoverySource = StoreRecoverySource.EMPTY_STORE_INSTANCE; + primaryRecoverySource = EmptyStoreRecoverySource.INSTANCE; } IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); for (int i = 0; i <= indexMetaData.getNumberOfReplicas(); i++) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RecoverySource.java b/server/src/main/java/org/elasticsearch/cluster/routing/RecoverySource.java index 13cb85ea399..b7cc95298c4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RecoverySource.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RecoverySource.java @@ -34,7 +34,8 @@ import java.util.Objects; /** * Represents the recovery source of a shard. Available recovery types are: * - * - {@link StoreRecoverySource} recovery from the local store (empty or with existing data) + * - {@link EmptyStoreRecoverySource} recovery from an empty store + * - {@link ExistingStoreRecoverySource} recovery from an existing store * - {@link PeerRecoverySource} recovery from a primary on another node * - {@link SnapshotRecoverySource} recovery from a snapshot * - {@link LocalShardsRecoverySource} recovery from other shards of another index on the same node @@ -59,8 +60,8 @@ public abstract class RecoverySource implements Writeable, ToXContentObject { public static RecoverySource readFrom(StreamInput in) throws IOException { Type type = Type.values()[in.readByte()]; switch (type) { - case EMPTY_STORE: return StoreRecoverySource.EMPTY_STORE_INSTANCE; - case EXISTING_STORE: return StoreRecoverySource.EXISTING_STORE_INSTANCE; + case EMPTY_STORE: return EmptyStoreRecoverySource.INSTANCE; + case EXISTING_STORE: return new ExistingStoreRecoverySource(in); case PEER: return PeerRecoverySource.INSTANCE; case SNAPSHOT: return new SnapshotRecoverySource(in); case LOCAL_SHARDS: return LocalShardsRecoverySource.INSTANCE; @@ -91,6 +92,10 @@ public abstract class RecoverySource implements Writeable, ToXContentObject { public abstract Type getType(); + public boolean shouldBootstrapNewHistoryUUID() { + return false; + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -107,25 +112,68 @@ public abstract class RecoverySource implements Writeable, ToXContentObject { } /** - * recovery from an existing on-disk store or a fresh copy + * Recovery from a fresh copy */ - public abstract static class StoreRecoverySource extends RecoverySource { - public static final StoreRecoverySource EMPTY_STORE_INSTANCE = new StoreRecoverySource() { - @Override - public Type getType() { - return Type.EMPTY_STORE; - } - }; - public static final StoreRecoverySource EXISTING_STORE_INSTANCE = new StoreRecoverySource() { - @Override - public Type getType() { - return Type.EXISTING_STORE; - } - }; + public static final class EmptyStoreRecoverySource extends RecoverySource { + public static final EmptyStoreRecoverySource INSTANCE = new EmptyStoreRecoverySource(); + + @Override + public Type getType() { + return Type.EMPTY_STORE; + } @Override public String toString() { - return getType() == Type.EMPTY_STORE ? "new shard recovery" : "existing recovery"; + return "new shard recovery"; + } + } + + /** + * Recovery from an existing on-disk store + */ + public static final class ExistingStoreRecoverySource extends RecoverySource { + public static final ExistingStoreRecoverySource INSTANCE = new ExistingStoreRecoverySource(false); + public static final ExistingStoreRecoverySource FORCE_STALE_PRIMARY_INSTANCE = new ExistingStoreRecoverySource(true); + + private final boolean bootstrapNewHistoryUUID; + + private ExistingStoreRecoverySource(boolean bootstrapNewHistoryUUID) { + this.bootstrapNewHistoryUUID = bootstrapNewHistoryUUID; + } + + private ExistingStoreRecoverySource(StreamInput in) throws IOException { + if (in.getVersion().onOrAfter(Version.V_6_5_0)) { + bootstrapNewHistoryUUID = in.readBoolean(); + } else { + bootstrapNewHistoryUUID = false; + } + } + + @Override + public void addAdditionalFields(XContentBuilder builder, Params params) throws IOException { + builder.field("bootstrap_new_history_uuid", bootstrapNewHistoryUUID); + } + + @Override + protected void writeAdditionalFields(StreamOutput out) throws IOException { + if (out.getVersion().onOrAfter(Version.V_6_5_0)) { + out.writeBoolean(bootstrapNewHistoryUUID); + } + } + + @Override + public boolean shouldBootstrapNewHistoryUUID() { + return bootstrapNewHistoryUUID; + } + + @Override + public Type getType() { + return Type.EXISTING_STORE; + } + + @Override + public String toString() { + return "existing store recovery; bootstrap_history_uuid=" + bootstrapNewHistoryUUID; } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index 6a9a105b6c4..74341ca271a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -19,14 +19,13 @@ package org.elasticsearch.cluster.routing; +import org.elasticsearch.cluster.routing.RecoverySource.ExistingStoreRecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource; -import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.Index; @@ -318,7 +317,7 @@ public final class ShardRouting implements Writeable, ToXContentObject { final RecoverySource recoverySource; if (active()) { if (primary()) { - recoverySource = StoreRecoverySource.EXISTING_STORE_INSTANCE; + recoverySource = ExistingStoreRecoverySource.INSTANCE; } else { recoverySource = PeerRecoverySource.INSTANCE; } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java index 153fc2cbe3e..8b97f1357fa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.Nullable; @@ -82,11 +81,7 @@ public class NodeAllocationResult implements ToXContentObject, Writeable, Compar public NodeAllocationResult(StreamInput in) throws IOException { node = new DiscoveryNode(in); shardStoreInfo = in.readOptionalWriteable(ShardStoreInfo::new); - if (in.getVersion().before(Version.V_5_2_1)) { - canAllocateDecision = Decision.readFrom(in); - } else { - canAllocateDecision = in.readOptionalWriteable(Decision::readFrom); - } + canAllocateDecision = in.readOptionalWriteable(Decision::readFrom); nodeDecision = AllocationDecision.readFrom(in); weightRanking = in.readVInt(); } @@ -95,15 +90,7 @@ public class NodeAllocationResult implements ToXContentObject, Writeable, Compar public void writeTo(StreamOutput out) throws IOException { node.writeTo(out); out.writeOptionalWriteable(shardStoreInfo); - if (out.getVersion().before(Version.V_5_2_1)) { - if (canAllocateDecision == null) { - Decision.NO.writeTo(out); - } else { - canAllocateDecision.writeTo(out); - } - } else { - out.writeOptionalWriteable(canAllocateDecision); - } + out.writeOptionalWriteable(canAllocateDecision); nodeDecision.writeTo(out); out.writeVInt(weightRanking); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java index 66281b73458..a42fd2765b5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java @@ -21,7 +21,7 @@ package org.elasticsearch.cluster.routing.allocation.command; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RecoverySource; -import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource; +import org.elasticsearch.cluster.routing.RecoverySource.EmptyStoreRecoverySource; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; @@ -136,7 +136,7 @@ public class AllocateEmptyPrimaryAllocationCommand extends BasePrimaryAllocation } initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, unassignedInfoToUpdate, - StoreRecoverySource.EMPTY_STORE_INSTANCE); + EmptyStoreRecoverySource.INSTANCE); return new RerouteExplanation(this, allocation.decision(Decision.YES, name() + " (allocation command)", "ignore deciders")); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java index 11c4420200e..f4c9aba17d7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java @@ -129,7 +129,8 @@ public class AllocateStalePrimaryAllocationCommand extends BasePrimaryAllocation "trying to allocate an existing primary shard [" + index + "][" + shardId + "], while no such shard has ever been active"); } - initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting); + initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, null, + RecoverySource.ExistingStoreRecoverySource.FORCE_STALE_PRIMARY_INSTANCE); return new RerouteExplanation(this, allocation.decision(Decision.YES, name() + " (allocation command)", "ignore deciders")); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java index f3146f6f771..df623aa8a5e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java @@ -158,13 +158,13 @@ public class FilterAllocationDecider extends AllocationDecider { private Decision shouldIndexFilter(IndexMetaData indexMd, RoutingNode node, RoutingAllocation allocation) { if (indexMd.requireFilters() != null) { - if (!indexMd.requireFilters().match(node.node())) { + if (indexMd.requireFilters().match(node.node()) == false) { return allocation.decision(Decision.NO, NAME, "node does not match index setting [%s] filters [%s]", IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_PREFIX, indexMd.requireFilters()); } } if (indexMd.includeFilters() != null) { - if (!indexMd.includeFilters().match(node.node())) { + if (indexMd.includeFilters().match(node.node()) == false) { return allocation.decision(Decision.NO, NAME, "node does not match index setting [%s] filters [%s]", IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_PREFIX, indexMd.includeFilters()); } @@ -180,13 +180,13 @@ public class FilterAllocationDecider extends AllocationDecider { private Decision shouldClusterFilter(RoutingNode node, RoutingAllocation allocation) { if (clusterRequireFilters != null) { - if (!clusterRequireFilters.match(node.node())) { + if (clusterRequireFilters.match(node.node()) == false) { return allocation.decision(Decision.NO, NAME, "node does not match cluster setting [%s] filters [%s]", CLUSTER_ROUTING_REQUIRE_GROUP_PREFIX, clusterRequireFilters); } } if (clusterIncludeFilters != null) { - if (!clusterIncludeFilters.match(node.node())) { + if (clusterIncludeFilters.match(node.node()) == false) { return allocation.decision(Decision.NO, NAME, "node does not cluster setting [%s] filters [%s]", CLUSTER_ROUTING_INCLUDE_GROUP_PREFIX, clusterIncludeFilters); } diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java index 5dd36b9b1bc..7272c9ed302 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java @@ -255,13 +255,6 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements localNodeMasterListeners.add(listener); } - /** - * Remove the given listener for on/off local master events - */ - public void removeLocalNodeMasterListener(LocalNodeMasterListener listener) { - localNodeMasterListeners.remove(listener); - } - /** * Adds a cluster state listener that is expected to be removed during a short period of time. * If provided, the listener will be notified once a specific time has elapsed. @@ -349,13 +342,6 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements } } - /** asserts that the current thread is the cluster state update thread */ - public static boolean assertClusterStateUpdateThread() { - assert Thread.currentThread().getName().contains(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME) : - "not called from the cluster state update thread"; - return true; - } - /** asserts that the current thread is NOT the cluster state update thread */ public static boolean assertNotClusterStateUpdateThread(String reason) { assert Thread.currentThread().getName().contains(CLUSTER_UPDATE_THREAD_NAME) == false : @@ -607,13 +593,6 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements listeners.add(listener); } - private void remove(LocalNodeMasterListener listener) { - listeners.remove(listener); - } - - private void clear() { - listeners.clear(); - } } private static class OnMasterRunnable implements Runnable { diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterService.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterService.java index fc5dc678bd0..58cace44754 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterService.java @@ -28,10 +28,8 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.LocalNodeMasterListener; import org.elasticsearch.cluster.NodeConnectionsService; -import org.elasticsearch.cluster.TimeoutClusterStateListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.OperationRouting; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -53,6 +51,9 @@ public class ClusterService extends AbstractLifecycleComponent { Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), Property.Dynamic, Property.NodeScope); + public static final org.elasticsearch.common.settings.Setting.AffixSetting USER_DEFINED_META_DATA = + Setting.prefixKeySetting("cluster.metadata.", (key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope)); + private final ClusterName clusterName; private final OperationRouting operationRouting; @@ -67,6 +68,8 @@ public class ClusterService extends AbstractLifecycleComponent { this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); this.clusterSettings.addSettingsUpdateConsumer(CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, this::setSlowTaskLoggingThreshold); + // Add a no-op update consumer so changes are logged + this.clusterSettings.addAffixUpdateConsumer(USER_DEFINED_META_DATA, (first, second) -> {}, (first, second) -> {}); this.clusterApplierService = new ClusterApplierService(settings, clusterSettings, threadPool); } @@ -162,13 +165,6 @@ public class ClusterService extends AbstractLifecycleComponent { clusterApplierService.removeListener(listener); } - /** - * Removes a timeout listener for updated cluster states. - */ - public void removeTimeoutListener(TimeoutClusterStateListener listener) { - clusterApplierService.removeTimeoutListener(listener); - } - /** * Add a listener for on/off local node master events */ @@ -176,23 +172,6 @@ public class ClusterService extends AbstractLifecycleComponent { clusterApplierService.addLocalNodeMasterListener(listener); } - /** - * Remove the given listener for on/off local master events - */ - public void removeLocalNodeMasterListener(LocalNodeMasterListener listener) { - clusterApplierService.removeLocalNodeMasterListener(listener); - } - - /** - * Adds a cluster state listener that is expected to be removed during a short period of time. - * If provided, the listener will be notified once a specific time has elapsed. - * - * NOTE: the listener is not removed on timeout. This is the responsibility of the caller. - */ - public void addTimeoutListener(@Nullable final TimeValue timeout, final TimeoutClusterStateListener listener) { - clusterApplierService.addTimeoutListener(timeout, listener); - } - public MasterService getMasterService() { return masterService; } diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java b/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java index 0ee8d095f49..bf65162d215 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java @@ -25,6 +25,8 @@ import org.apache.lucene.util.BitUtil; import java.util.ArrayList; import java.util.Collection; +import static org.apache.lucene.geo.GeoUtils.MAX_LAT_INCL; + /** * Utilities for converting to/from the GeoHash standard * @@ -48,6 +50,8 @@ public class GeoHashUtils { private static final double LAT_SCALE = (0x1L<>>= 4; - // deinterleave and add 1 to lat and lon to get topRight - long lat = BitUtil.deinterleave(ghLong >>> 1) + 1; - long lon = BitUtil.deinterleave(ghLong) + 1; - GeoPoint topRight = GeoPoint.fromGeohash(BitUtil.interleave((int)lon, (int)lat) << 4 | len); - - return new Rectangle(bottomLeft.lat(), topRight.lat(), bottomLeft.lon(), topRight.lon()); + // deinterleave + long lon = BitUtil.deinterleave(ghLong >>> 1); + long lat = BitUtil.deinterleave(ghLong); + if (lat < MAX_LAT_BITS) { + // add 1 to lat and lon to get topRight + GeoPoint topRight = GeoPoint.fromGeohash(BitUtil.interleave((int)(lat + 1), (int)(lon + 1)) << 4 | len); + return new Rectangle(bottomLeft.lat(), topRight.lat(), bottomLeft.lon(), topRight.lon()); + } else { + // We cannot go north of north pole, so just using 90 degrees instead of calculating it using + // add 1 to lon to get lon of topRight, we are going to use 90 for lat + GeoPoint topRight = GeoPoint.fromGeohash(BitUtil.interleave((int)lat, (int)(lon + 1)) << 4 | len); + return new Rectangle(bottomLeft.lat(), MAX_LAT_INCL, bottomLeft.lon(), topRight.lon()); + } } /** diff --git a/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java b/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java index 4e3771f3668..531104a1a39 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java +++ b/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java @@ -134,6 +134,15 @@ public class LogConfigurator { PluginManager.addPackage(LogConfigurator.class.getPackage().getName()); } + /** + * Sets the node name. This is called before logging is configured if the + * node name is set in elasticsearch.yml. Otherwise it is called as soon + * as the node id is available. + */ + public static void setNodeName(String nodeName) { + NodeNamePatternConverter.setNodeName(nodeName); + } + private static void checkErrorListener() { assert errorListenerIsRegistered() : "expected error listener to be registered"; if (error.get()) { @@ -158,8 +167,8 @@ public class LogConfigurator { final LoggerContext context = (LoggerContext) LogManager.getContext(false); + final Set locationsWithDeprecatedPatterns = Collections.synchronizedSet(new HashSet<>()); final List configurations = new ArrayList<>(); - /* * Subclass the properties configurator to hack the new pattern in * place so users don't have to change log4j2.properties in @@ -170,7 +179,6 @@ public class LogConfigurator { * Everything in this subclass that isn't marked as a hack is copied * from log4j2's source. */ - Set locationsWithDeprecatedPatterns = Collections.synchronizedSet(new HashSet<>()); final PropertiesConfigurationFactory factory = new PropertiesConfigurationFactory() { @Override public PropertiesConfiguration getConfiguration(final LoggerContext loggerContext, final ConfigurationSource source) { diff --git a/server/src/main/java/org/elasticsearch/common/logging/Loggers.java b/server/src/main/java/org/elasticsearch/common/logging/Loggers.java index b2a24faf643..57bafbbdac4 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/Loggers.java +++ b/server/src/main/java/org/elasticsearch/common/logging/Loggers.java @@ -49,31 +49,26 @@ public class Loggers { Setting.prefixKeySetting("logger.", (key) -> new Setting<>(key, Level.INFO.name(), Level::valueOf, Setting.Property.Dynamic, Setting.Property.NodeScope)); - public static Logger getLogger(Class clazz, Settings settings, ShardId shardId, String... prefixes) { - return getLogger(clazz, settings, shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0])); + public static Logger getLogger(Class clazz, ShardId shardId, String... prefixes) { + return getLogger(clazz, shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0])); } /** - * Just like {@link #getLogger(Class, org.elasticsearch.common.settings.Settings, ShardId, String...)} but String loggerName instead of - * Class. + * Just like {@link #getLogger(Class, ShardId, String...)} but String loggerName instead of + * Class and no extra prefixes. */ - public static Logger getLogger(String loggerName, Settings settings, ShardId shardId, String... prefixes) { - return getLogger(loggerName, settings, - asArrayList(shardId.getIndexName(), Integer.toString(shardId.id()), prefixes).toArray(new String[0])); + public static Logger getLogger(String loggerName, ShardId shardId) { + return ESLoggerFactory.getLogger(formatPrefix(shardId.getIndexName(), Integer.toString(shardId.id())), loggerName); } - public static Logger getLogger(Class clazz, Settings settings, Index index, String... prefixes) { - return getLogger(clazz, settings, asArrayList(Loggers.SPACE, index.getName(), prefixes).toArray(new String[0])); + public static Logger getLogger(Class clazz, Index index, String... prefixes) { + return getLogger(clazz, Settings.EMPTY, asArrayList(Loggers.SPACE, index.getName(), prefixes).toArray(new String[0])); } public static Logger getLogger(Class clazz, Settings settings, String... prefixes) { return ESLoggerFactory.getLogger(formatPrefix(prefixes), clazz); } - public static Logger getLogger(String loggerName, Settings settings, String... prefixes) { - return ESLoggerFactory.getLogger(formatPrefix(prefixes), loggerName); - } - public static Logger getLogger(Logger parentLogger, String s) { String prefix = null; if (parentLogger instanceof PrefixLogger) { diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeNamePatternConverter.java b/server/src/main/java/org/elasticsearch/common/logging/NodeNamePatternConverter.java index ca4c9ab776f..b63db40276d 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeNamePatternConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeNamePatternConverter.java @@ -30,20 +30,22 @@ import org.apache.lucene.util.SetOnce; /** * Converts {@code %node_name} in log4j patterns into the current node name. - * We *could* use a system property lookup instead but this is very explicit - * and fails fast if we try to use the logger without initializing the node - * name. As a bonus it ought to be ever so slightly faster because it doesn't - * have to look up the system property every time. + * We can't use a system property for this because the node name system + * property is only set if the node name is explicitly defined in + * elasticsearch.yml. */ @Plugin(category = PatternConverter.CATEGORY, name = "NodeNamePatternConverter") @ConverterKeys({"node_name"}) -public class NodeNamePatternConverter extends LogEventPatternConverter { +public final class NodeNamePatternConverter extends LogEventPatternConverter { + /** + * The name of this node. + */ private static final SetOnce NODE_NAME = new SetOnce<>(); /** * Set the name of this node. */ - public static void setNodeName(String nodeName) { + static void setNodeName(String nodeName) { NODE_NAME.set(nodeName); } @@ -55,18 +57,21 @@ public class NodeNamePatternConverter extends LogEventPatternConverter { throw new IllegalArgumentException("no options supported but options provided: " + Arrays.toString(options)); } - return new NodeNamePatternConverter(NODE_NAME.get()); + return new NodeNamePatternConverter(); } - private final String nodeName; - - private NodeNamePatternConverter(String nodeName) { + private NodeNamePatternConverter() { super("NodeName", "node_name"); - this.nodeName = nodeName; } @Override public void format(LogEvent event, StringBuilder toAppendTo) { - toAppendTo.append(nodeName); + /* + * We're not thrilled about this volatile read on every line logged but + * the alternatives are slightly terrifying and/or don't work with the + * security manager. + */ + String nodeName = NODE_NAME.get(); + toAppendTo.append(nodeName == null ? "unknown" : nodeName); } } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java index ebd0d5ba2ef..dc8628f184e 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -27,8 +27,10 @@ import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.document.LatLonDocValuesField; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.FilterLeafReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFileNames; @@ -48,13 +50,16 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.ScorerSupplier; +import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; import org.apache.lucene.search.grouping.CollapseTopFieldDocs; @@ -71,6 +76,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -79,6 +85,7 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import java.io.IOException; import java.text.ParseException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -87,7 +94,7 @@ import java.util.Map; public class Lucene { public static final String LATEST_DOC_VALUES_FORMAT = "Lucene70"; public static final String LATEST_POSTINGS_FORMAT = "Lucene50"; - public static final String LATEST_CODEC = "Lucene70"; + public static final String LATEST_CODEC = "Lucene80"; static { Deprecated annotation = PostingsFormat.forName(LATEST_POSTINGS_FORMAT).getClass().getAnnotation(Deprecated.class); @@ -96,12 +103,14 @@ public class Lucene { assert annotation == null : "DocValuesFormat " + LATEST_DOC_VALUES_FORMAT + " is deprecated" ; } + public static final String SOFT_DELETES_FIELD = "__soft_deletes"; + public static final NamedAnalyzer STANDARD_ANALYZER = new NamedAnalyzer("_standard", AnalyzerScope.GLOBAL, new StandardAnalyzer()); public static final NamedAnalyzer KEYWORD_ANALYZER = new NamedAnalyzer("_keyword", AnalyzerScope.GLOBAL, new KeywordAnalyzer()); public static final ScoreDoc[] EMPTY_SCORE_DOCS = new ScoreDoc[0]; - public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(0, EMPTY_SCORE_DOCS, 0.0f); + public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), EMPTY_SCORE_DOCS); public static Version parseVersion(@Nullable String version, Version defaultVersion, Logger logger) { if (version == null) { @@ -140,7 +149,7 @@ public class Lucene { public static int getNumDocs(SegmentInfos info) { int numDocs = 0; for (SegmentCommitInfo si : info) { - numDocs += si.info.maxDoc() - si.getDelCount(); + numDocs += si.info.maxDoc() - si.getDelCount() - si.getSoftDelCount(); } return numDocs; } @@ -197,6 +206,7 @@ public class Lucene { } final CommitPoint cp = new CommitPoint(si, directory); try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setIndexCommit(cp) .setCommitOnClose(false) .setMergePolicy(NoMergePolicy.INSTANCE) @@ -220,6 +230,7 @@ public class Lucene { } } try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setMergePolicy(NoMergePolicy.INSTANCE) // no merges .setCommitOnClose(false) // no commits .setOpenMode(IndexWriterConfig.OpenMode.CREATE))) // force creation - don't append... @@ -245,7 +256,7 @@ public class Lucene { * Check whether there is one or more documents matching the provided query. */ public static boolean exists(IndexSearcher searcher, Query query) throws IOException { - final Weight weight = searcher.createNormalizedWeight(query, false); + final Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); // the scorer API should be more efficient at stopping after the first // match than the bulk scorer API for (LeafReaderContext context : searcher.getIndexReader().leaves()) { @@ -264,19 +275,28 @@ public class Lucene { return false; } - public static TopDocs readTopDocs(StreamInput in) throws IOException { + private static TotalHits readTotalHits(StreamInput in) throws IOException { + long totalHits = in.readVLong(); + TotalHits.Relation totalHitsRelation = TotalHits.Relation.EQUAL_TO; + if (in.getVersion().onOrAfter(org.elasticsearch.Version.V_7_0_0_alpha1)) { + totalHitsRelation = in.readEnum(TotalHits.Relation.class); + } + return new TotalHits(totalHits, totalHitsRelation); + } + + public static TopDocsAndMaxScore readTopDocs(StreamInput in) throws IOException { byte type = in.readByte(); if (type == 0) { - long totalHits = in.readVLong(); + TotalHits totalHits = readTotalHits(in); float maxScore = in.readFloat(); ScoreDoc[] scoreDocs = new ScoreDoc[in.readVInt()]; for (int i = 0; i < scoreDocs.length; i++) { scoreDocs[i] = new ScoreDoc(in.readVInt(), in.readFloat()); } - return new TopDocs(totalHits, scoreDocs, maxScore); + return new TopDocsAndMaxScore(new TopDocs(totalHits, scoreDocs), maxScore); } else if (type == 1) { - long totalHits = in.readVLong(); + TotalHits totalHits = readTotalHits(in); float maxScore = in.readFloat(); SortField[] fields = new SortField[in.readVInt()]; @@ -288,9 +308,9 @@ public class Lucene { for (int i = 0; i < fieldDocs.length; i++) { fieldDocs[i] = readFieldDoc(in); } - return new TopFieldDocs(totalHits, fieldDocs, fields, maxScore); + return new TopDocsAndMaxScore(new TopFieldDocs(totalHits, fieldDocs, fields), maxScore); } else if (type == 2) { - long totalHits = in.readVLong(); + TotalHits totalHits = readTotalHits(in); float maxScore = in.readFloat(); String field = in.readString(); @@ -305,7 +325,7 @@ public class Lucene { fieldDocs[i] = readFieldDoc(in); collapseValues[i] = readSortValue(in); } - return new CollapseTopFieldDocs(field, totalHits, fieldDocs, fields, collapseValues, maxScore); + return new TopDocsAndMaxScore(new CollapseTopFieldDocs(field, totalHits, fieldDocs, fields, collapseValues), maxScore); } else { throw new IllegalStateException("Unknown type " + type); } @@ -375,13 +395,22 @@ public class Lucene { private static final Class GEO_DISTANCE_SORT_TYPE_CLASS = LatLonDocValuesField.newDistanceSort("some_geo_field", 0, 0).getClass(); - public static void writeTopDocs(StreamOutput out, TopDocs topDocs) throws IOException { - if (topDocs instanceof CollapseTopFieldDocs) { - out.writeByte((byte) 2); - CollapseTopFieldDocs collapseDocs = (CollapseTopFieldDocs) topDocs; + private static void writeTotalHits(StreamOutput out, TotalHits totalHits) throws IOException { + out.writeVLong(totalHits.value); + if (out.getVersion().onOrAfter(org.elasticsearch.Version.V_7_0_0_alpha1)) { + out.writeEnum(totalHits.relation); + } else if (totalHits.value > 0 && totalHits.relation != TotalHits.Relation.EQUAL_TO) { + throw new IllegalArgumentException("Cannot serialize approximate total hit counts to nodes that are on a version < 7.0.0"); + } + } - out.writeVLong(topDocs.totalHits); - out.writeFloat(topDocs.getMaxScore()); + public static void writeTopDocs(StreamOutput out, TopDocsAndMaxScore topDocs) throws IOException { + if (topDocs.topDocs instanceof CollapseTopFieldDocs) { + out.writeByte((byte) 2); + CollapseTopFieldDocs collapseDocs = (CollapseTopFieldDocs) topDocs.topDocs; + + writeTotalHits(out, topDocs.topDocs.totalHits); + out.writeFloat(topDocs.maxScore); out.writeString(collapseDocs.field); @@ -390,35 +419,35 @@ public class Lucene { writeSortField(out, sortField); } - out.writeVInt(topDocs.scoreDocs.length); - for (int i = 0; i < topDocs.scoreDocs.length; i++) { + out.writeVInt(topDocs.topDocs.scoreDocs.length); + for (int i = 0; i < topDocs.topDocs.scoreDocs.length; i++) { ScoreDoc doc = collapseDocs.scoreDocs[i]; writeFieldDoc(out, (FieldDoc) doc); writeSortValue(out, collapseDocs.collapseValues[i]); } - } else if (topDocs instanceof TopFieldDocs) { + } else if (topDocs.topDocs instanceof TopFieldDocs) { out.writeByte((byte) 1); - TopFieldDocs topFieldDocs = (TopFieldDocs) topDocs; + TopFieldDocs topFieldDocs = (TopFieldDocs) topDocs.topDocs; - out.writeVLong(topDocs.totalHits); - out.writeFloat(topDocs.getMaxScore()); + writeTotalHits(out, topDocs.topDocs.totalHits); + out.writeFloat(topDocs.maxScore); out.writeVInt(topFieldDocs.fields.length); for (SortField sortField : topFieldDocs.fields) { writeSortField(out, sortField); } - out.writeVInt(topDocs.scoreDocs.length); + out.writeVInt(topDocs.topDocs.scoreDocs.length); for (ScoreDoc doc : topFieldDocs.scoreDocs) { writeFieldDoc(out, (FieldDoc) doc); } } else { out.writeByte((byte) 0); - out.writeVLong(topDocs.totalHits); - out.writeFloat(topDocs.getMaxScore()); + writeTotalHits(out, topDocs.topDocs.totalHits); + out.writeFloat(topDocs.maxScore); - out.writeVInt(topDocs.scoreDocs.length); - for (ScoreDoc doc : topDocs.scoreDocs) { + out.writeVInt(topDocs.topDocs.scoreDocs.length); + for (ScoreDoc doc : topDocs.topDocs.scoreDocs) { writeScoreDoc(out, doc); } } @@ -572,6 +601,24 @@ public class Lucene { out.writeBoolean(sortField.getReverse()); } + private static Number readExplanationValue(StreamInput in) throws IOException { + if (in.getVersion().onOrAfter(org.elasticsearch.Version.V_7_0_0_alpha1)) { + final int numberType = in.readByte(); + switch (numberType) { + case 0: + return in.readFloat(); + case 1: + return in.readDouble(); + case 2: + return in.readZLong(); + default: + throw new IOException("Unexpected number type: " + numberType); + } + } else { + return in.readFloat(); + } + } + public static Explanation readExplanation(StreamInput in) throws IOException { boolean match = in.readBoolean(); String description = in.readString(); @@ -580,12 +627,29 @@ public class Lucene { subExplanations[i] = readExplanation(in); } if (match) { - return Explanation.match(in.readFloat(), description, subExplanations); + return Explanation.match(readExplanationValue(in), description, subExplanations); } else { return Explanation.noMatch(description, subExplanations); } } + private static void writeExplanationValue(StreamOutput out, Number value) throws IOException { + if (out.getVersion().onOrAfter(org.elasticsearch.Version.V_7_0_0_alpha1)) { + if (value instanceof Float) { + out.writeByte((byte) 0); + out.writeFloat(value.floatValue()); + } else if (value instanceof Double) { + out.writeByte((byte) 1); + out.writeDouble(value.doubleValue()); + } else { + out.writeByte((byte) 2); + out.writeZLong(value.longValue()); + } + } else { + out.writeFloat(value.floatValue()); + } + } + public static void writeExplanation(StreamOutput out, Explanation explanation) throws IOException { out.writeBoolean(explanation.isMatch()); out.writeString(explanation.getDescription()); @@ -595,7 +659,7 @@ public class Lucene { writeExplanation(out, subExp); } if (explanation.isMatch()) { - out.writeFloat(explanation.getValue()); + writeExplanationValue(out, explanation.getValue()); } } @@ -681,27 +745,6 @@ public class Lucene { } } - /** - * Return a Scorer that throws an ElasticsearchIllegalStateException - * on all operations with the given message. - */ - public static Scorer illegalScorer(final String message) { - return new Scorer(null) { - @Override - public float score() throws IOException { - throw new IllegalStateException(message); - } - @Override - public int docID() { - throw new IllegalStateException(message); - } - @Override - public DocIdSetIterator iterator() { - throw new IllegalStateException(message); - } - }; - } - private static final class CommitPoint extends IndexCommit { private String segmentsFileName; private final Collection files; @@ -829,4 +872,95 @@ public class Lucene { } }; } + + /** + * Whether a query sorted by {@code searchSort} can be early-terminated if the index is sorted by {@code indexSort}. + */ + public static boolean canEarlyTerminate(Sort searchSort, Sort indexSort) { + final SortField[] fields1 = searchSort.getSort(); + final SortField[] fields2 = indexSort.getSort(); + // early termination is possible if fields1 is a prefix of fields2 + if (fields1.length > fields2.length) { + return false; + } + return Arrays.asList(fields1).equals(Arrays.asList(fields2).subList(0, fields1.length)); + } + + /** + * Wraps a directory reader to make all documents live except those were rolled back + * or hard-deleted due to non-aborting exceptions during indexing. + * The wrapped reader can be used to query all documents. + * + * @param in the input directory reader + * @return the wrapped reader + */ + public static DirectoryReader wrapAllDocsLive(DirectoryReader in) throws IOException { + return new DirectoryReaderWithAllLiveDocs(in); + } + + private static final class DirectoryReaderWithAllLiveDocs extends FilterDirectoryReader { + static final class LeafReaderWithLiveDocs extends FilterLeafReader { + final Bits liveDocs; + final int numDocs; + LeafReaderWithLiveDocs(LeafReader in, Bits liveDocs, int numDocs) { + super(in); + this.liveDocs = liveDocs; + this.numDocs = numDocs; + } + @Override + public Bits getLiveDocs() { + return liveDocs; + } + @Override + public int numDocs() { + return numDocs; + } + @Override + public CacheHelper getCoreCacheHelper() { + return in.getCoreCacheHelper(); + } + @Override + public CacheHelper getReaderCacheHelper() { + return null; // Modifying liveDocs + } + } + + DirectoryReaderWithAllLiveDocs(DirectoryReader in) throws IOException { + super(in, new SubReaderWrapper() { + @Override + public LeafReader wrap(LeafReader leaf) { + SegmentReader segmentReader = segmentReader(leaf); + Bits hardLiveDocs = segmentReader.getHardLiveDocs(); + if (hardLiveDocs == null) { + return new LeafReaderWithLiveDocs(leaf, null, leaf.maxDoc()); + } + // TODO: Can we avoid calculate numDocs by using SegmentReader#getSegmentInfo with LUCENE-8458? + int numDocs = 0; + for (int i = 0; i < hardLiveDocs.length(); i++) { + if (hardLiveDocs.get(i)) { + numDocs++; + } + } + return new LeafReaderWithLiveDocs(segmentReader, hardLiveDocs, numDocs); + } + }); + } + + @Override + protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException { + return wrapAllDocsLive(in); + } + + @Override + public CacheHelper getReaderCacheHelper() { + return null; // Modifying liveDocs + } + } + + /** + * Returns a numeric docvalues which can be used to soft-delete documents. + */ + public static NumericDocValuesField newSoftDeletesField() { + return new NumericDocValuesField(SOFT_DELETES_FIELD, 1); + } } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/MinimumScoreCollector.java b/server/src/main/java/org/elasticsearch/common/lucene/MinimumScoreCollector.java index 2552309450b..f99d68952e5 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/MinimumScoreCollector.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/MinimumScoreCollector.java @@ -22,8 +22,9 @@ package org.elasticsearch.common.lucene; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Collector; import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreCachingWrappingScorer; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.SimpleCollector; import java.io.IOException; @@ -33,7 +34,7 @@ public class MinimumScoreCollector extends SimpleCollector { private final Collector collector; private final float minimumScore; - private Scorer scorer; + private Scorable scorer; private LeafCollector leafCollector; public MinimumScoreCollector(Collector collector, float minimumScore) { @@ -42,7 +43,7 @@ public class MinimumScoreCollector extends SimpleCollector { } @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { if (!(scorer instanceof ScoreCachingWrappingScorer)) { scorer = new ScoreCachingWrappingScorer(scorer); } @@ -63,7 +64,7 @@ public class MinimumScoreCollector extends SimpleCollector { } @Override - public boolean needsScores() { - return true; + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE; } } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/ScorerAware.java b/server/src/main/java/org/elasticsearch/common/lucene/ScorerAware.java index df17f8d7757..13a2a23ec56 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/ScorerAware.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/ScorerAware.java @@ -18,10 +18,10 @@ */ package org.elasticsearch.common.lucene; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; public interface ScorerAware { - void setScorer(Scorer scorer); + void setScorer(Scorable scorer); } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java b/server/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java index 0ff0008a430..67f1495c79c 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.lucene.index; +import org.apache.lucene.index.ImpactsEnum; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PostingsEnum; @@ -28,6 +29,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FilteredDocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.BitSet; @@ -80,7 +82,7 @@ public class FilterableTermsEnum extends TermsEnum { } else { final IndexSearcher searcher = new IndexSearcher(reader); searcher.setQueryCache(null); - weight = searcher.createNormalizedWeight(filter, false); + weight = searcher.createWeight(searcher.rewrite(filter), ScoreMode.COMPLETE_NO_SCORES, 1f); } for (LeafReaderContext context : leaves) { Terms terms = context.reader().terms(field); @@ -207,6 +209,11 @@ public class FilterableTermsEnum extends TermsEnum { throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE); } + @Override + public ImpactsEnum impacts(int flags) throws IOException { + throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE); + } + @Override public BytesRef next() throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE); diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java b/server/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java index e9db2928ca7..f1e55d76296 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java @@ -22,6 +22,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Collector; import org.apache.lucene.search.FilterLeafCollector; import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; @@ -56,7 +57,7 @@ public class FilteredCollector implements Collector { } @Override - public boolean needsScores() { - return collector.needsScores(); + public ScoreMode scoreMode() { + return collector.scoreMode(); } } \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/TopDocsAndMaxScore.java b/server/src/main/java/org/elasticsearch/common/lucene/search/TopDocsAndMaxScore.java new file mode 100644 index 00000000000..7cc1f9142de --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/TopDocsAndMaxScore.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.lucene.search; + +import org.apache.lucene.search.TopDocs; + +/** + * Wrapper around a {@link TopDocs} instance and the maximum score. + */ +// TODO: Remove this class when https://github.com/elastic/elasticsearch/issues/32981 is addressed. +public final class TopDocsAndMaxScore { + + public final TopDocs topDocs; + public float maxScore; + + public TopDocsAndMaxScore(TopDocs topDocs, float maxScore) { + this.topDocs = topDocs; + this.maxScore = maxScore; + } + +} diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java index 399f3d7a2e6..6d8a436c0b2 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java @@ -38,10 +38,10 @@ public enum CombineFunction implements Writeable { public Explanation explain(Explanation queryExpl, Explanation funcExpl, float maxBoost) { Explanation boostExpl = Explanation.match(maxBoost, "maxBoost"); Explanation minExpl = Explanation.match( - Math.min(funcExpl.getValue(), maxBoost), + Math.min(funcExpl.getValue().floatValue(), maxBoost), "min of:", funcExpl, boostExpl); - return Explanation.match(queryExpl.getValue() * minExpl.getValue(), + return Explanation.match(queryExpl.getValue().floatValue() * minExpl.getValue().floatValue(), "function score, product of:", queryExpl, minExpl); } }, @@ -55,7 +55,7 @@ public enum CombineFunction implements Writeable { public Explanation explain(Explanation queryExpl, Explanation funcExpl, float maxBoost) { Explanation boostExpl = Explanation.match(maxBoost, "maxBoost"); return Explanation.match( - Math.min(funcExpl.getValue(), maxBoost), + Math.min(funcExpl.getValue().floatValue(), maxBoost), "min of:", funcExpl, boostExpl); } @@ -69,9 +69,9 @@ public enum CombineFunction implements Writeable { @Override public Explanation explain(Explanation queryExpl, Explanation funcExpl, float maxBoost) { - Explanation minExpl = Explanation.match(Math.min(funcExpl.getValue(), maxBoost), "min of:", + Explanation minExpl = Explanation.match(Math.min(funcExpl.getValue().floatValue(), maxBoost), "min of:", funcExpl, Explanation.match(maxBoost, "maxBoost")); - return Explanation.match(Math.min(funcExpl.getValue(), maxBoost) + queryExpl.getValue(), "sum of", + return Explanation.match(Math.min(funcExpl.getValue().floatValue(), maxBoost) + queryExpl.getValue().floatValue(), "sum of", queryExpl, minExpl); } @@ -84,10 +84,10 @@ public enum CombineFunction implements Writeable { @Override public Explanation explain(Explanation queryExpl, Explanation funcExpl, float maxBoost) { - Explanation minExpl = Explanation.match(Math.min(funcExpl.getValue(), maxBoost), "min of:", + Explanation minExpl = Explanation.match(Math.min(funcExpl.getValue().floatValue(), maxBoost), "min of:", funcExpl, Explanation.match(maxBoost, "maxBoost")); return Explanation.match( - (float) ((Math.min(funcExpl.getValue(), maxBoost) + queryExpl.getValue()) / 2.0), "avg of", + (float) ((Math.min(funcExpl.getValue().floatValue(), maxBoost) + queryExpl.getValue().floatValue()) / 2.0), "avg of", queryExpl, minExpl); } @@ -101,10 +101,10 @@ public enum CombineFunction implements Writeable { @Override public Explanation explain(Explanation queryExpl, Explanation funcExpl, float maxBoost) { Explanation innerMinExpl = Explanation.match( - Math.min(funcExpl.getValue(), maxBoost), "min of:", + Math.min(funcExpl.getValue().floatValue(), maxBoost), "min of:", funcExpl, Explanation.match(maxBoost, "maxBoost")); return Explanation.match( - Math.min(Math.min(funcExpl.getValue(), maxBoost), queryExpl.getValue()), "min of", + Math.min(Math.min(funcExpl.getValue().floatValue(), maxBoost), queryExpl.getValue().floatValue()), "min of", queryExpl, innerMinExpl); } @@ -118,10 +118,10 @@ public enum CombineFunction implements Writeable { @Override public Explanation explain(Explanation queryExpl, Explanation funcExpl, float maxBoost) { Explanation innerMinExpl = Explanation.match( - Math.min(funcExpl.getValue(), maxBoost), "min of:", + Math.min(funcExpl.getValue().floatValue(), maxBoost), "min of:", funcExpl, Explanation.match(maxBoost, "maxBoost")); return Explanation.match( - Math.max(Math.min(funcExpl.getValue(), maxBoost), queryExpl.getValue()), "max of:", + Math.max(Math.min(funcExpl.getValue().floatValue(), maxBoost), queryExpl.getValue().floatValue()), "max of:", queryExpl, innerMinExpl); } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java index c49487cfb7e..fb5a82bc098 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java @@ -90,7 +90,7 @@ public class FieldValueFactorFunction extends ScoreFunction { public Explanation explainScore(int docId, Explanation subQueryScore) throws IOException { String modifierStr = modifier != null ? modifier.toString() : ""; String defaultStr = missing != null ? "?:" + missing : ""; - double score = score(docId, subQueryScore.getValue()); + double score = score(docId, subQueryScore.getValue().floatValue()); return Explanation.match( (float) score, String.format(Locale.ROOT, diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java index c2263fc201e..05b74a8b7fe 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java @@ -212,22 +212,27 @@ public class FunctionScoreQuery extends Query { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { - if (needsScores == false && minScore == null) { - return subQuery.createWeight(searcher, needsScores, boost); + public Weight createWeight(IndexSearcher searcher, org.apache.lucene.search.ScoreMode scoreMode, float boost) throws IOException { + if (scoreMode == org.apache.lucene.search.ScoreMode.COMPLETE_NO_SCORES && minScore == null) { + return subQuery.createWeight(searcher, scoreMode, boost); } - boolean subQueryNeedsScores = combineFunction != CombineFunction.REPLACE; + org.apache.lucene.search.ScoreMode subQueryScoreMode = combineFunction != CombineFunction.REPLACE + ? org.apache.lucene.search.ScoreMode.COMPLETE + : org.apache.lucene.search.ScoreMode.COMPLETE_NO_SCORES; Weight[] filterWeights = new Weight[functions.length]; for (int i = 0; i < functions.length; ++i) { - subQueryNeedsScores |= functions[i].needsScores(); + if (functions[i].needsScores()) { + subQueryScoreMode = org.apache.lucene.search.ScoreMode.COMPLETE; + } if (functions[i] instanceof FilterScoreFunction) { Query filter = ((FilterScoreFunction) functions[i]).filter; - filterWeights[i] = searcher.createNormalizedWeight(filter, false); + filterWeights[i] = searcher.createWeight(searcher.rewrite(filter), + org.apache.lucene.search.ScoreMode.COMPLETE_NO_SCORES, 1f); } } - Weight subQueryWeight = subQuery.createWeight(searcher, subQueryNeedsScores, boost); - return new CustomBoostFactorWeight(this, subQueryWeight, filterWeights, subQueryNeedsScores); + Weight subQueryWeight = subQuery.createWeight(searcher, subQueryScoreMode, boost); + return new CustomBoostFactorWeight(this, subQueryWeight, filterWeights, subQueryScoreMode.needsScores()); } class CustomBoostFactorWeight extends Weight { @@ -299,10 +304,9 @@ public class FunctionScoreQuery extends Query { ScoreFunction function = functions[i]; Explanation functionExplanation = function.getLeafScoreFunction(context).explainScore(doc, expl); if (function instanceof FilterScoreFunction) { - double factor = functionExplanation.getValue(); - float sc = (float) factor; + float factor = functionExplanation.getValue().floatValue(); Query filterQuery = ((FilterScoreFunction) function).filter; - Explanation filterExplanation = Explanation.match(sc, "function score, product of:", + Explanation filterExplanation = Explanation.match(factor, "function score, product of:", Explanation.match(1.0f, "match filter: " + filterQuery.toString()), functionExplanation); functionsExplanations.add(filterExplanation); } else { @@ -319,14 +323,14 @@ public class FunctionScoreQuery extends Query { FunctionFactorScorer scorer = functionScorer(context); int actualDoc = scorer.iterator().advance(doc); assert (actualDoc == doc); - double score = scorer.computeScore(doc, expl.getValue()); + double score = scorer.computeScore(doc, expl.getValue().floatValue()); factorExplanation = Explanation.match( (float) score, "function score, score mode [" + scoreMode.toString().toLowerCase(Locale.ROOT) + "]", functionsExplanations); } expl = combineFunction.explain(expl, factorExplanation, maxBoost); } - if (minScore != null && minScore > expl.getValue()) { + if (minScore != null && minScore > expl.getValue().floatValue()) { expl = Explanation.noMatch("Score value is too low, expected at least " + minScore + " but got " + expl.getValue(), expl); } return expl; @@ -442,6 +446,11 @@ public class FunctionScoreQuery extends Query { } return factor; } + + @Override + public float getMaxScore(int upTo) throws IOException { + return Float.MAX_VALUE; // TODO: what would be a good upper bound? + } } @Override diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/MinScoreScorer.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/MinScoreScorer.java index 8e21c1af41a..204f69f1e0a 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/MinScoreScorer.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/MinScoreScorer.java @@ -19,14 +19,13 @@ package org.elasticsearch.common.lucene.search.function; -import java.io.IOException; - import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.search.ScoreCachingWrappingScorer; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; +import java.io.IOException; + /** A {@link Scorer} that filters out documents that have a score that is * lower than a configured constant. */ final class MinScoreScorer extends Scorer { @@ -34,13 +33,10 @@ final class MinScoreScorer extends Scorer { private final Scorer in; private final float minScore; + private float curScore; + MinScoreScorer(Weight weight, Scorer scorer, float minScore) { super(weight); - if (scorer instanceof ScoreCachingWrappingScorer == false) { - // when minScore is set, scores might be requested twice: once - // to verify the match, and once by the collector - scorer = new ScoreCachingWrappingScorer(scorer); - } this.in = scorer; this.minScore = minScore; } @@ -55,8 +51,18 @@ final class MinScoreScorer extends Scorer { } @Override - public float score() throws IOException { - return in.score(); + public float score() { + return curScore; + } + + @Override + public int advanceShallow(int target) throws IOException { + return in.advanceShallow(target); + } + + @Override + public float getMaxScore(int upTo) throws IOException { + return in.getMaxScore(upTo); } @Override @@ -77,7 +83,8 @@ final class MinScoreScorer extends Scorer { if (inTwoPhase != null && inTwoPhase.matches() == false) { return false; } - return in.score() >= minScore; + curScore = in.score(); + return curScore >= minScore; } @Override diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java index a104a416cc6..8694b6fa019 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java @@ -84,7 +84,7 @@ public class RandomScoreFunction extends ScoreFunction { public Explanation explainScore(int docId, Explanation subQueryScore) throws IOException { String field = fieldData == null ? null : fieldData.getFieldName(); return Explanation.match( - (float) score(docId, subQueryScore.getValue()), + (float) score(docId, subQueryScore.getValue().floatValue()), "random score function (seed: " + originalSeed + ", field: " + field + ")"); } }; diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java index 7f8b10349bc..5edc1659f54 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java @@ -20,9 +20,8 @@ package org.elasticsearch.common.lucene.search.function; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.script.ExplainableSearchScript; import org.elasticsearch.script.ScoreScript; import org.elasticsearch.script.Script; @@ -32,28 +31,19 @@ import java.util.Objects; public class ScriptScoreFunction extends ScoreFunction { - static final class CannedScorer extends Scorer { + static final class CannedScorer extends Scorable { protected int docid; protected float score; - CannedScorer() { - super(null); - } - @Override public int docID() { return docid; } @Override - public float score() throws IOException { + public float score() { return score; } - - @Override - public DocIdSetIterator iterator() { - throw new UnsupportedOperationException(); - } } private final Script sScript; @@ -88,10 +78,10 @@ public class ScriptScoreFunction extends ScoreFunction { if (leafScript instanceof ExplainableSearchScript) { leafScript.setDocument(docId); scorer.docid = docId; - scorer.score = subQueryScore.getValue(); + scorer.score = subQueryScore.getValue().floatValue(); exp = ((ExplainableSearchScript) leafScript).explain(subQueryScore); } else { - double score = score(docId, subQueryScore.getValue()); + double score = score(docId, subQueryScore.getValue().floatValue()); String explanation = "script score function, computed with script:\"" + sScript + "\""; if (sScript.getParams() != null) { explanation += " and parameters: \n" + sScript.getParams().toString(); diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java index 7d96426e869..87f6b21e9da 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java @@ -60,7 +60,7 @@ public class WeightFactorFunction extends ScoreFunction { public Explanation explainScore(int docId, Explanation subQueryScore) throws IOException { Explanation functionExplanation = leafFunction.explainScore(docId, subQueryScore); return Explanation.match( - functionExplanation.getValue() * (float) getWeight(), "product of:", + functionExplanation.getValue().floatValue() * (float) getWeight(), "product of:", functionExplanation, explainWeight()); } }; diff --git a/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java b/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java index 38fcdfe5f1b..3a037bed62b 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java @@ -28,6 +28,7 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndSeqNo; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion; import org.elasticsearch.index.mapper.SeqNoFieldMapper; @@ -66,15 +67,22 @@ final class PerThreadIDVersionAndSeqNoLookup { */ PerThreadIDVersionAndSeqNoLookup(LeafReader reader, String uidField) throws IOException { this.uidField = uidField; - Terms terms = reader.terms(uidField); + final Terms terms = reader.terms(uidField); if (terms == null) { - throw new IllegalArgumentException("reader misses the [" + uidField + "] field"); + // If a segment contains only no-ops, it does not have _uid but has both _soft_deletes and _tombstone fields. + final NumericDocValues softDeletesDV = reader.getNumericDocValues(Lucene.SOFT_DELETES_FIELD); + final NumericDocValues tombstoneDV = reader.getNumericDocValues(SeqNoFieldMapper.TOMBSTONE_NAME); + if (softDeletesDV == null || tombstoneDV == null) { + throw new IllegalArgumentException("reader does not have _uid terms but not a no-op segment; " + + "_soft_deletes [" + softDeletesDV + "], _tombstone [" + tombstoneDV + "]"); + } + termsEnum = null; + } else { + termsEnum = terms.iterator(); } - termsEnum = terms.iterator(); if (reader.getNumericDocValues(VersionFieldMapper.NAME) == null) { - throw new IllegalArgumentException("reader misses the [" + VersionFieldMapper.NAME + "] field"); + throw new IllegalArgumentException("reader misses the [" + VersionFieldMapper.NAME + "] field; _uid terms [" + terms + "]"); } - Object readerKey = null; assert (readerKey = reader.getCoreCacheHelper().getKey()) != null; this.readerKey = readerKey; @@ -111,7 +119,8 @@ final class PerThreadIDVersionAndSeqNoLookup { * {@link DocIdSetIterator#NO_MORE_DOCS} is returned if not found * */ private int getDocID(BytesRef id, Bits liveDocs) throws IOException { - if (termsEnum.seekExact(id)) { + // termsEnum can possibly be null here if this leaf contains only no-ops. + if (termsEnum != null && termsEnum.seekExact(id)) { int docID = DocIdSetIterator.NO_MORE_DOCS; // there may be more than one matching docID, in the case of nested docs, so we want the last one: docsEnum = termsEnum.postings(docsEnum, 0); diff --git a/server/src/main/java/org/elasticsearch/common/path/PathTrie.java b/server/src/main/java/org/elasticsearch/common/path/PathTrie.java index 5243809c64a..08787cea9df 100644 --- a/server/src/main/java/org/elasticsearch/common/path/PathTrie.java +++ b/server/src/main/java/org/elasticsearch/common/path/PathTrie.java @@ -104,24 +104,12 @@ public class PathTrie { namedWildcard = key.substring(key.indexOf('{') + 1, key.indexOf('}')); } - public boolean isWildcard() { - return isWildcard; - } - - public synchronized void addChild(TrieNode child) { - addInnerChild(child.key, child); - } - private void addInnerChild(String key, TrieNode child) { Map newChildren = new HashMap<>(children); newChildren.put(key, child); children = unmodifiableMap(newChildren); } - public TrieNode getChild(String key) { - return children.get(key); - } - public synchronized void insert(String[] path, int index, T value) { if (index >= path.length) return; @@ -302,7 +290,7 @@ public class PathTrie { } int index = 0; // Supports initial delimiter. - if (strings.length > 0 && strings[0].isEmpty()) { + if (strings[0].isEmpty()) { index = 1; } root.insert(strings, index, value); @@ -327,7 +315,7 @@ public class PathTrie { } int index = 0; // Supports initial delimiter. - if (strings.length > 0 && strings[0].isEmpty()) { + if (strings[0].isEmpty()) { index = 1; } root.insertOrUpdate(strings, index, value, updater); @@ -352,7 +340,7 @@ public class PathTrie { int index = 0; // Supports initial delimiter. - if (strings.length > 0 && strings[0].isEmpty()) { + if (strings[0].isEmpty()) { index = 1; } diff --git a/server/src/main/java/org/elasticsearch/common/regex/Regex.java b/server/src/main/java/org/elasticsearch/common/regex/Regex.java index bcf2dfba3ef..1f4e4651412 100644 --- a/server/src/main/java/org/elasticsearch/common/regex/Regex.java +++ b/server/src/main/java/org/elasticsearch/common/regex/Regex.java @@ -138,6 +138,15 @@ public class Regex { return false; } + /** + * Similar to {@link #simpleMatch(String[], String)}, but accepts a list of strings instead of an array of strings for the patterns to + * match. + */ + public static boolean simpleMatch(final List patterns, final String str) { + // #simpleMatch(String[], String) is likely to be inlined into this method + return patterns != null && simpleMatch(patterns.toArray(Strings.EMPTY_ARRAY), str); + } + public static boolean simpleMatch(String[] patterns, String[] types) { if (patterns != null && types != null) { for (String type : types) { diff --git a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index 8847c8138a7..e25d954aa4f 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -20,23 +20,26 @@ package org.elasticsearch.common.settings; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.lucene.search.spell.LevensteinDistance; +import org.apache.lucene.search.spell.LevenshteinDistance; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.regex.Regex; +import java.util.AbstractMap; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.function.BiConsumer; import java.util.function.Consumer; +import java.util.function.Function; import java.util.function.Predicate; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -51,14 +54,29 @@ public abstract class AbstractScopedSettings extends AbstractComponent { private final List> settingUpdaters = new CopyOnWriteArrayList<>(); private final Map> complexMatchers; private final Map> keySettings; + private final Map, Function, Map.Entry>> settingUpgraders; private final Setting.Property scope; private static final Pattern KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])*[-\\w]+$"); private static final Pattern GROUP_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+$"); private static final Pattern AFFIX_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+[*](?:[.][-\\w]+)+$"); - protected AbstractScopedSettings(Settings settings, Set> settingsSet, Setting.Property scope) { + protected AbstractScopedSettings( + final Settings settings, + final Set> settingsSet, + final Set> settingUpgraders, + final Setting.Property scope) { super(settings); this.lastSettingsApplied = Settings.EMPTY; + + this.settingUpgraders = + Collections.unmodifiableMap( + settingUpgraders + .stream() + .collect( + Collectors.toMap( + SettingUpgrader::getSetting, + u -> e -> new AbstractMap.SimpleEntry<>(u.getKey(e.getKey()), u.getValue(e.getValue()))))); + this.scope = scope; Map> complexMatchers = new HashMap<>(); Map> keySettings = new HashMap<>(); @@ -96,6 +114,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent { this.scope = other.scope; complexMatchers = other.complexMatchers; keySettings = other.keySettings; + settingUpgraders = Collections.unmodifiableMap(new HashMap<>(other.settingUpgraders)); settingUpdaters.addAll(other.settingUpdaters); } @@ -199,7 +218,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent { * Also automatically adds empty consumers for all settings in order to activate logging */ public synchronized void addSettingsUpdateConsumer(Consumer consumer, List> settings) { - addSettingsUpdater(Setting.groupedSettingsUpdater(consumer, logger, settings)); + addSettingsUpdater(Setting.groupedSettingsUpdater(consumer, settings)); } /** @@ -208,11 +227,78 @@ public abstract class AbstractScopedSettings extends AbstractComponent { */ public synchronized void addAffixUpdateConsumer(Setting.AffixSetting setting, BiConsumer consumer, BiConsumer validator) { + ensureSettingIsRegistered(setting); + addSettingsUpdater(setting.newAffixUpdater(consumer, logger, validator)); + } + + /** + * Adds a affix settings consumer that accepts the values for two settings. The consumer is only notified if one or both settings change + * and if the provided validator succeeded. + *

+ * Note: Only settings registered in {@link SettingsModule} can be changed dynamically. + *

+ * This method registers a compound updater that is useful if two settings are depending on each other. + * The consumer is always provided with both values even if only one of the two changes. + */ + public synchronized void addAffixUpdateConsumer(Setting.AffixSetting settingA, Setting.AffixSetting settingB, + BiConsumer> consumer, + BiConsumer> validator) { + // it would be awesome to have a generic way to do that ie. a set of settings that map to an object with a builder + // down the road this would be nice to have! + ensureSettingIsRegistered(settingA); + ensureSettingIsRegistered(settingB); + SettingUpdater, A>> affixUpdaterA = settingA.newAffixUpdater((a,b)-> {}, logger, (a,b)-> {}); + SettingUpdater, B>> affixUpdaterB = settingB.newAffixUpdater((a,b)-> {}, logger, (a,b)-> {}); + + addSettingsUpdater(new SettingUpdater>>() { + + @Override + public boolean hasChanged(Settings current, Settings previous) { + return affixUpdaterA.hasChanged(current, previous) || affixUpdaterB.hasChanged(current, previous); + } + + @Override + public Map> getValue(Settings current, Settings previous) { + Map> map = new HashMap<>(); + BiConsumer aConsumer = (key, value) -> { + assert map.containsKey(key) == false : "duplicate key: " + key; + map.put(key, new Tuple<>(value, settingB.getConcreteSettingForNamespace(key).get(current))); + }; + BiConsumer bConsumer = (key, value) -> { + Tuple abTuple = map.get(key); + if (abTuple != null) { + map.put(key, new Tuple<>(abTuple.v1(), value)); + } else { + assert settingA.getConcreteSettingForNamespace(key).get(current).equals(settingA.getConcreteSettingForNamespace + (key).get(previous)) : "expected: " + settingA.getConcreteSettingForNamespace(key).get(current) + + " but was " + settingA.getConcreteSettingForNamespace(key).get(previous); + map.put(key, new Tuple<>(settingA.getConcreteSettingForNamespace(key).get(current), value)); + } + }; + SettingUpdater, A>> affixUpdaterA = settingA.newAffixUpdater(aConsumer, logger, (a,b) ->{}); + SettingUpdater, B>> affixUpdaterB = settingB.newAffixUpdater(bConsumer, logger, (a,b) ->{}); + affixUpdaterA.apply(current, previous); + affixUpdaterB.apply(current, previous); + for (Map.Entry> entry : map.entrySet()) { + validator.accept(entry.getKey(), entry.getValue()); + } + return Collections.unmodifiableMap(map); + } + + @Override + public void apply(Map> values, Settings current, Settings previous) { + for (Map.Entry> entry : values.entrySet()) { + consumer.accept(entry.getKey(), entry.getValue()); + } + } + }); + } + + private void ensureSettingIsRegistered(Setting.AffixSetting setting) { final Setting registeredSetting = this.complexMatchers.get(setting.getKey()); if (setting != registeredSetting) { throw new IllegalArgumentException("Setting is not registered for key [" + setting.getKey() + "]"); } - addSettingsUpdater(setting.newAffixUpdater(consumer, logger, validator)); } /** @@ -285,13 +371,13 @@ public abstract class AbstractScopedSettings extends AbstractComponent { /** * Validates that all settings are registered and valid. * - * @param settings the settings to validate - * @param validateDependencies true if dependent settings should be validated - * @param validateInternalIndex true if internal index settings should be validated + * @param settings the settings to validate + * @param validateDependencies true if dependent settings should be validated + * @param validateInternalOrPrivateIndex true if internal index settings should be validated * @see Setting#getSettingsDependencies(String) */ - public final void validate(final Settings settings, final boolean validateDependencies, final boolean validateInternalIndex) { - validate(settings, validateDependencies, false, false, validateInternalIndex); + public final void validate(final Settings settings, final boolean validateDependencies, final boolean validateInternalOrPrivateIndex) { + validate(settings, validateDependencies, false, false, validateInternalOrPrivateIndex); } /** @@ -314,11 +400,11 @@ public abstract class AbstractScopedSettings extends AbstractComponent { /** * Validates that all settings are registered and valid. * - * @param settings the settings - * @param validateDependencies true if dependent settings should be validated - * @param ignorePrivateSettings true if private settings should be ignored during validation - * @param ignoreArchivedSettings true if archived settings should be ignored during validation - * @param validateInternalIndex true if index internal settings should be validated + * @param settings the settings + * @param validateDependencies true if dependent settings should be validated + * @param ignorePrivateSettings true if private settings should be ignored during validation + * @param ignoreArchivedSettings true if archived settings should be ignored during validation + * @param validateInternalOrPrivateIndex true if index internal settings should be validated * @see Setting#getSettingsDependencies(String) */ public final void validate( @@ -326,17 +412,18 @@ public abstract class AbstractScopedSettings extends AbstractComponent { final boolean validateDependencies, final boolean ignorePrivateSettings, final boolean ignoreArchivedSettings, - final boolean validateInternalIndex) { + final boolean validateInternalOrPrivateIndex) { final List exceptions = new ArrayList<>(); for (final String key : settings.keySet()) { // settings iterate in deterministic fashion - if (isPrivateSetting(key) && ignorePrivateSettings) { + final Setting setting = getRaw(key); + if (((isPrivateSetting(key) || (setting != null && setting.isPrivateIndex())) && ignorePrivateSettings)) { continue; } if (key.startsWith(ARCHIVED_SETTINGS_PREFIX) && ignoreArchivedSettings) { continue; } try { - validate(key, settings, validateDependencies, validateInternalIndex); + validate(key, settings, validateDependencies, validateInternalOrPrivateIndex); } catch (final RuntimeException ex) { exceptions.add(ex); } @@ -359,16 +446,17 @@ public abstract class AbstractScopedSettings extends AbstractComponent { /** * Validates that the settings is valid. * - * @param key the key of the setting to validate - * @param settings the settings - * @param validateDependencies true if dependent settings should be validated - * @param validateInternalIndex true if internal index settings should be validated + * @param key the key of the setting to validate + * @param settings the settings + * @param validateDependencies true if dependent settings should be validated + * @param validateInternalOrPrivateIndex true if internal index settings should be validated * @throws IllegalArgumentException if the setting is invalid */ - void validate(final String key, final Settings settings, final boolean validateDependencies, final boolean validateInternalIndex) { - Setting setting = getRaw(key); + void validate( + final String key, final Settings settings, final boolean validateDependencies, final boolean validateInternalOrPrivateIndex) { + Setting setting = getRaw(key); if (setting == null) { - LevensteinDistance ld = new LevensteinDistance(); + LevenshteinDistance ld = new LevenshteinDistance(); List> scoredKeys = new ArrayList<>(); for (String k : this.keySettings.keySet()) { float distance = ld.getDistance(key, k); @@ -392,23 +480,31 @@ public abstract class AbstractScopedSettings extends AbstractComponent { } throw new IllegalArgumentException(msg); } else { - Set settingsDependencies = setting.getSettingsDependencies(key); + Set> settingsDependencies = setting.getSettingsDependencies(key); if (setting.hasComplexMatcher()) { setting = setting.getConcreteSetting(key); } if (validateDependencies && settingsDependencies.isEmpty() == false) { - Set settingKeys = settings.keySet(); - for (String requiredSetting : settingsDependencies) { - if (settingKeys.contains(requiredSetting) == false) { - throw new IllegalArgumentException("Missing required setting [" - + requiredSetting + "] for setting [" + setting.getKey() + "]"); + for (final Setting settingDependency : settingsDependencies) { + if (settingDependency.existsOrFallbackExists(settings) == false) { + final String message = String.format( + Locale.ROOT, + "missing required setting [%s] for setting [%s]", + settingDependency.getKey(), + setting.getKey()); + throw new IllegalArgumentException(message); } } } - // the only time that validateInternalIndex should be true is if this call is coming via the update settings API - if (validateInternalIndex && setting.getProperties().contains(Setting.Property.InternalIndex)) { - throw new IllegalArgumentException( - "can not update internal setting [" + setting.getKey() + "]; this setting is managed via a dedicated API"); + // the only time that validateInternalOrPrivateIndex should be true is if this call is coming via the update settings API + if (validateInternalOrPrivateIndex) { + if (setting.isInternalIndex()) { + throw new IllegalArgumentException( + "can not update internal setting [" + setting.getKey() + "]; this setting is managed via a dedicated API"); + } else if (setting.isPrivateIndex()) { + throw new IllegalArgumentException( + "can not update private setting [" + setting.getKey() + "]; this setting is managed by Elasticsearch"); + } } } setting.get(settings); @@ -679,6 +775,32 @@ public abstract class AbstractScopedSettings extends AbstractComponent { return null; } + /** + * Upgrade all settings eligible for upgrade in the specified settings instance. + * + * @param settings the settings instance that might contain settings to be upgraded + * @return a new settings instance if any settings required upgrade, otherwise the same settings instance as specified + */ + public Settings upgradeSettings(final Settings settings) { + final Settings.Builder builder = Settings.builder(); + boolean changed = false; // track if any settings were upgraded + for (final String key : settings.keySet()) { + final Setting setting = getRaw(key); + final Function, Map.Entry> upgrader = settingUpgraders.get(setting); + if (upgrader == null) { + // the setting does not have an upgrader, copy the setting + builder.copy(key, settings); + } else { + // the setting has an upgrader, so mark that we have changed a setting and apply the upgrade logic + changed = true; + final Map.Entry upgrade = upgrader.apply(new Entry(key, settings)); + builder.put(upgrade.getKey(), upgrade.getValue()); + } + } + // we only return a new instance if there was an upgrade + return changed ? builder.build() : settings; + } + /** * Archives invalid or unknown settings. Any setting that is not recognized or fails validation * will be archived. This means the setting is prefixed with {@value ARCHIVED_SETTINGS_PREFIX} @@ -769,4 +891,5 @@ public abstract class AbstractScopedSettings extends AbstractComponent { public boolean isPrivateSetting(String key) { return false; } + } diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index acfecdf665a..49b916ce7ea 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -65,6 +65,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.http.HttpTransportSettings; +import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.indices.IndexingMemoryController; import org.elasticsearch.indices.IndicesQueryCache; @@ -101,6 +102,7 @@ import org.elasticsearch.watcher.ResourceWatcherService; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Set; import java.util.function.Predicate; @@ -108,8 +110,13 @@ import java.util.function.Predicate; * Encapsulates all valid cluster level settings. */ public final class ClusterSettings extends AbstractScopedSettings { - public ClusterSettings(Settings nodeSettings, Set> settingsSet) { - super(nodeSettings, settingsSet, Property.NodeScope); + public ClusterSettings(final Settings nodeSettings, final Set> settingsSet) { + this(nodeSettings, settingsSet, Collections.emptySet()); + } + + public ClusterSettings( + final Settings nodeSettings, final Set> settingsSet, final Set> settingUpgraders) { + super(nodeSettings, settingsSet, settingUpgraders, Property.NodeScope); addSettingsUpdater(new LoggingSettingUpdater(nodeSettings)); } @@ -266,17 +273,27 @@ public final class ClusterSettings extends AbstractScopedSettings { HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_LIMIT_SETTING, HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_OVERHEAD_SETTING, + IndexModule.NODE_STORE_ALLOW_MMAPFS, ClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, + ClusterService.USER_DEFINED_META_DATA, SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, SearchService.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS, ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, TransportSearchAction.SHARD_COUNT_LIMIT_SETTING, RemoteClusterAware.REMOTE_CLUSTERS_SEEDS, + RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_SEEDS, + RemoteClusterAware.REMOTE_CLUSTERS_PROXY, + RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_PROXY, RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE, + RemoteClusterService.SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE, RemoteClusterService.REMOTE_CONNECTIONS_PER_CLUSTER, + RemoteClusterService.SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER, RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING, + RemoteClusterService.SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING, RemoteClusterService.REMOTE_NODE_ATTRIBUTE, + RemoteClusterService.SEARCH_REMOTE_NODE_ATTRIBUTE, RemoteClusterService.ENABLE_REMOTE_CLUSTERS, + RemoteClusterService.SEARCH_ENABLE_REMOTE_CLUSTERS, TransportService.TRACE_LOG_EXCLUDE_SETTING, TransportService.TRACE_LOG_INCLUDE_SETTING, TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, @@ -431,4 +448,7 @@ public final class ClusterSettings extends AbstractScopedSettings { ElectionSchedulerFactory.ELECTION_BACK_OFF_TIME_SETTING, ElectionSchedulerFactory.ELECTION_MAX_TIMEOUT_SETTING ))); + + public static List> BUILT_IN_SETTING_UPGRADERS = Collections.emptyList(); + } diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 137378f509d..ae8529af5b5 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -61,6 +61,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { MergeSchedulerConfig.AUTO_THROTTLE_SETTING, MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING, MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING, + IndexMetaData.SETTING_INDEX_VERSION_CREATED, IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING, IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING, IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING, @@ -94,6 +95,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING, MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, + MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, @@ -129,6 +131,8 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.MAX_REGEX_LENGTH_SETTING, ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING, IndexSettings.INDEX_GC_DELETES_SETTING, + IndexSettings.INDEX_SOFT_DELETES_SETTING, + IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING, IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING, UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING, @@ -174,7 +178,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, BUILT_IN_INDEX_SETTINGS); public IndexScopedSettings(Settings settings, Set> settingsSet) { - super(settings, settingsSet, Property.IndexScope); + super(settings, settingsSet, Collections.emptySet(), Property.IndexScope); } private IndexScopedSettings(Settings settings, IndexScopedSettings other, IndexMetaData metaData) { @@ -198,12 +202,13 @@ public final class IndexScopedSettings extends AbstractScopedSettings { switch (key) { case IndexMetaData.SETTING_CREATION_DATE: case IndexMetaData.SETTING_INDEX_UUID: - case IndexMetaData.SETTING_VERSION_CREATED: case IndexMetaData.SETTING_VERSION_UPGRADED: case IndexMetaData.SETTING_INDEX_PROVIDED_NAME: case MergePolicyConfig.INDEX_MERGE_ENABLED: - case IndexMetaData.INDEX_SHRINK_SOURCE_UUID_KEY: - case IndexMetaData.INDEX_SHRINK_SOURCE_NAME_KEY: + // we keep the shrink settings for BWC - this can be removed in 8.0 + // we can't remove in 7 since this setting might be baked into an index coming in via a full cluster restart from 6.0 + case "index.shrink.source.uuid": + case "index.shrink.source.name": case IndexMetaData.INDEX_RESIZE_SOURCE_UUID_KEY: case IndexMetaData.INDEX_RESIZE_SOURCE_NAME_KEY: return true; diff --git a/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java b/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java index c23a0bd42e3..33f4718aa45 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java @@ -69,7 +69,7 @@ public abstract class SecureSetting extends Setting { } @Override - public String getRaw(Settings settings) { + String innerGetRaw(final Settings settings) { throw new UnsupportedOperationException("secure settings are not strings"); } diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index 2cb5da56c44..cb957db43c2 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -16,11 +16,13 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.common.settings; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -126,7 +128,12 @@ public class Setting implements ToXContentObject { * Indicates an index-level setting that is managed internally. Such a setting can only be added to an index on index creation but * can not be updated via the update API. */ - InternalIndex + InternalIndex, + + /** + * Indicates an index-level setting that is privately managed. Such a setting can not even be set on index creation. + */ + PrivateIndex } private final Key key; @@ -160,6 +167,7 @@ public class Setting implements ToXContentObject { } checkPropertyRequiresIndexScope(propertiesAsSet, Property.NotCopyableOnResize); checkPropertyRequiresIndexScope(propertiesAsSet, Property.InternalIndex); + checkPropertyRequiresIndexScope(propertiesAsSet, Property.PrivateIndex); this.properties = propertiesAsSet; } } @@ -284,6 +292,14 @@ public class Setting implements ToXContentObject { return properties.contains(Property.Final); } + public final boolean isInternalIndex() { + return properties.contains(Property.InternalIndex); + } + + public final boolean isPrivateIndex() { + return properties.contains(Property.PrivateIndex); + } + /** * Returns the setting properties * @see Property @@ -329,6 +345,11 @@ public class Setting implements ToXContentObject { return false; } + + final boolean isListSetting() { + return this instanceof ListSetting; + } + boolean hasComplexMatcher() { return isGroupSetting(); } @@ -350,12 +371,25 @@ public class Setting implements ToXContentObject { } /** - * Returns true iff this setting is present in the given settings object. Otherwise false + * Returns true if and only if this setting is present in the given settings instance. Note that fallback settings are excluded. + * + * @param settings the settings + * @return true if the setting is present in the given settings instance, otherwise false */ - public boolean exists(Settings settings) { + public boolean exists(final Settings settings) { return settings.keySet().contains(getKey()); } + /** + * Returns true if and only if this setting including fallback settings is present in the given settings instance. + * + * @param settings the settings + * @return true if the setting including fallback settings is present in the given settings instance, otherwise false + */ + public boolean existsOrFallbackExists(final Settings settings) { + return settings.keySet().contains(getKey()) || (fallbackSetting != null && fallbackSetting.existsOrFallbackExists(settings)); + } + /** * Returns the settings value. If the setting is not present in the given settings object the default value is returned * instead. @@ -411,9 +445,20 @@ public class Setting implements ToXContentObject { * Returns the raw (string) settings value. If the setting is not present in the given settings object the default value is returned * instead. This is useful if the value can't be parsed due to an invalid value to access the actual value. */ - public String getRaw(Settings settings) { + public final String getRaw(final Settings settings) { checkDeprecation(settings); - return settings.get(getKey(), defaultValue.apply(settings)); + return innerGetRaw(settings); + } + + /** + * The underlying implementation for {@link #getRaw(Settings)}. Setting specializations can override this as needed to convert the + * actual settings value to raw strings. + * + * @param settings the settings instance + * @return the raw string representation of the setting value + */ + String innerGetRaw(final Settings settings) { + return settings.get(getKey(), defaultValue.apply(settings), isListSetting()); } /** Logs a deprecation warning if the setting is deprecated and used. */ @@ -484,7 +529,7 @@ public class Setting implements ToXContentObject { * Returns a set of settings that are required at validation time. Unless all of the dependencies are present in the settings * object validation of setting must fail. */ - public Set getSettingsDependencies(String key) { + public Set> getSettingsDependencies(String key) { return Collections.emptySet(); } @@ -547,7 +592,7 @@ public class Setting implements ToXContentObject { }; } - static AbstractScopedSettings.SettingUpdater groupedSettingsUpdater(Consumer consumer, Logger logger, + static AbstractScopedSettings.SettingUpdater groupedSettingsUpdater(Consumer consumer, final List> configuredSettings) { return new AbstractScopedSettings.SettingUpdater() { @@ -607,12 +652,12 @@ public class Setting implements ToXContentObject { return settings.keySet().stream().filter(this::match).map(key::getConcreteString); } - public Set getSettingsDependencies(String settingsKey) { + public Set> getSettingsDependencies(String settingsKey) { if (dependencies.isEmpty()) { return Collections.emptySet(); } else { String namespace = key.getNamespace(settingsKey); - return dependencies.stream().map(s -> s.key.toConcreteKey(namespace).key).collect(Collectors.toSet()); + return dependencies.stream().map(s -> (Setting)s.getConcreteSettingForNamespace(namespace)).collect(Collectors.toSet()); } } @@ -698,7 +743,7 @@ public class Setting implements ToXContentObject { } @Override - public String getRaw(Settings settings) { + public String innerGetRaw(final Settings settings) { throw new UnsupportedOperationException("affix settings can't return values" + " use #getConcreteSetting to obtain a concrete setting"); } @@ -727,7 +772,7 @@ public class Setting implements ToXContentObject { /** * Returns the namespace for a concrete setting. Ie. an affix setting with prefix: {@code search.} and suffix: {@code username} - * will return {@code remote} as a namespace for the setting {@code search.remote.username} + * will return {@code remote} as a namespace for the setting {@code cluster.remote.username} */ public String getNamespace(Setting concreteSetting) { return key.getNamespace(concreteSetting.getKey()); @@ -805,7 +850,7 @@ public class Setting implements ToXContentObject { } @Override - public String getRaw(Settings settings) { + public String innerGetRaw(final Settings settings) { Settings subSettings = get(settings); try { XContentBuilder builder = XContentFactory.jsonBuilder(); @@ -887,40 +932,6 @@ public class Setting implements ToXContentObject { } } - private static class ListSetting extends Setting> { - private final Function> defaultStringValue; - - private ListSetting(String key, Function> defaultStringValue, Function> parser, - Property... properties) { - super(new ListKey(key), (s) -> Setting.arrayToParsableString(defaultStringValue.apply(s)), parser, - properties); - this.defaultStringValue = defaultStringValue; - } - - @Override - public String getRaw(Settings settings) { - List array = settings.getAsList(getKey(), null); - return array == null ? defaultValue.apply(settings) : arrayToParsableString(array); - } - - @Override - boolean hasComplexMatcher() { - return true; - } - - @Override - public void diff(Settings.Builder builder, Settings source, Settings defaultSettings) { - if (exists(source) == false) { - List asList = defaultSettings.getAsList(getKey(), null); - if (asList == null) { - builder.putList(getKey(), defaultStringValue.apply(defaultSettings)); - } else { - builder.putList(getKey(), asList); - } - } - } - } - private final class Updater implements AbstractScopedSettings.SettingUpdater { private final Consumer consumer; private final Logger logger; @@ -968,6 +979,9 @@ public class Setting implements ToXContentObject { } } + public static Setting versionSetting(final String key, final Version defaultValue, Property... properties) { + return new Setting<>(key, s -> Integer.toString(defaultValue.id), s -> Version.fromId(Integer.parseInt(s)), properties); + } public static Setting floatSetting(String key, float defaultValue, Property... properties) { return new Setting<>(key, (s) -> Float.toString(defaultValue), Float::parseFloat, properties); @@ -1009,8 +1023,20 @@ public class Setting implements ToXContentObject { return new Setting<>(key, s -> "", Function.identity(), properties); } + public static Setting simpleString(String key, Function parser, Property... properties) { + return new Setting<>(key, s -> "", parser, properties); + } + public static Setting simpleString(String key, Setting fallback, Property... properties) { - return new Setting<>(key, fallback, Function.identity(), properties); + return simpleString(key, fallback, Function.identity(), properties); + } + + public static Setting simpleString( + final String key, + final Setting fallback, + final Function parser, + final Property... properties) { + return new Setting<>(key, fallback, parser, properties); } public static Setting simpleString(String key, Validator validator, Property... properties) { @@ -1167,26 +1193,44 @@ public class Setting implements ToXContentObject { return new Setting<>(key, (s) -> defaultPercentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), properties); } - public static Setting> listSetting(String key, List defaultStringValue, Function singleValueParser, - Property... properties) { - return listSetting(key, (s) -> defaultStringValue, singleValueParser, properties); + public static Setting> listSetting( + final String key, + final List defaultStringValue, + final Function singleValueParser, + final Property... properties) { + return listSetting(key, null, singleValueParser, (s) -> defaultStringValue, properties); } // TODO this one's two argument get is still broken - public static Setting> listSetting(String key, Setting> fallbackSetting, Function singleValueParser, - Property... properties) { - return listSetting(key, (s) -> parseableStringToList(fallbackSetting.getRaw(s)), singleValueParser, properties); + public static Setting> listSetting( + final String key, + final Setting> fallbackSetting, + final Function singleValueParser, + final Property... properties) { + return listSetting(key, fallbackSetting, singleValueParser, (s) -> parseableStringToList(fallbackSetting.getRaw(s)), properties); } - public static Setting> listSetting(String key, Function> defaultStringValue, - Function singleValueParser, Property... properties) { + public static Setting> listSetting( + final String key, + final Function singleValueParser, + final Function> defaultStringValue, + final Property... properties) { + return listSetting(key, null, singleValueParser, defaultStringValue, properties); + } + + public static Setting> listSetting( + final String key, + final @Nullable Setting> fallbackSetting, + final Function singleValueParser, + final Function> defaultStringValue, + final Property... properties) { if (defaultStringValue.apply(Settings.EMPTY) == null) { throw new IllegalArgumentException("default value function must not return null"); } Function> parser = (s) -> parseableStringToList(s).stream().map(singleValueParser).collect(Collectors.toList()); - return new ListSetting<>(key, defaultStringValue, parser, properties); + return new ListSetting<>(key, fallbackSetting, defaultStringValue, parser, properties); } private static List parseableStringToList(String parsableString) { @@ -1224,6 +1268,50 @@ public class Setting implements ToXContentObject { } } + private static class ListSetting extends Setting> { + + private final Function> defaultStringValue; + + private ListSetting( + final String key, + final @Nullable Setting> fallbackSetting, + final Function> defaultStringValue, + final Function> parser, + final Property... properties) { + super( + new ListKey(key), + fallbackSetting, + (s) -> Setting.arrayToParsableString(defaultStringValue.apply(s)), + parser, + (v,s) -> {}, + properties); + this.defaultStringValue = defaultStringValue; + } + + @Override + String innerGetRaw(final Settings settings) { + List array = settings.getAsList(getKey(), null); + return array == null ? defaultValue.apply(settings) : arrayToParsableString(array); + } + + @Override + boolean hasComplexMatcher() { + return true; + } + + @Override + public void diff(Settings.Builder builder, Settings source, Settings defaultSettings) { + if (exists(source) == false) { + List asList = defaultSettings.getAsList(getKey(), null); + if (asList == null) { + builder.putList(getKey(), defaultStringValue.apply(defaultSettings)); + } else { + builder.putList(getKey(), asList); + } + } + } + } + static void logSettingUpdate(Setting setting, Settings current, Settings previous, Logger logger) { if (logger.isInfoEnabled()) { if (setting.isFiltered()) { @@ -1242,8 +1330,31 @@ public class Setting implements ToXContentObject { return new GroupSetting(key, validator, properties); } - public static Setting timeSetting(String key, TimeValue defaultValue, TimeValue minValue, TimeValue maxValue, - Property... properties) { + public static Setting timeSetting( + final String key, + final Setting fallbackSetting, + final TimeValue minValue, + final Property... properties) { + final SimpleKey simpleKey = new SimpleKey(key); + return new Setting<>( + simpleKey, + fallbackSetting, + fallbackSetting::getRaw, + minTimeValueParser(key, minValue), + (v, s) -> {}, + properties); + } + + public static Setting timeSetting( + final String key, Function defaultValue, final TimeValue minValue, final Property... properties) { + final SimpleKey simpleKey = new SimpleKey(key); + return new Setting<>(simpleKey, s -> defaultValue.apply(s).getStringRep(), minTimeValueParser(key, minValue), properties); + } + + public static Setting timeSetting( + final String key, TimeValue defaultValue, final TimeValue minValue, final TimeValue maxValue, final Property... properties) { + final SimpleKey simpleKey = new SimpleKey(key); + return new Setting<>(simpleKey, s -> defaultValue.getStringRep(), minMaxTimeValueParser(key, minValue), properties); return new Setting<>(key, (s) -> defaultValue.getStringRep(), (s) -> { TimeValue timeValue = TimeValue.parseTimeValue(s, null, key); if (timeValue.millis() < minValue.millis()) { @@ -1256,15 +1367,37 @@ public class Setting implements ToXContentObject { }, properties); } - public static Setting timeSetting(String key, Function defaultValue, TimeValue minValue, - Property... properties) { - return new Setting<>(key, (s) -> defaultValue.apply(s).getStringRep(), (s) -> { - TimeValue timeValue = TimeValue.parseTimeValue(s, null, key); - if (timeValue.millis() < minValue.millis()) { - throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); + private static Function minTimeValueParser(final String key, final TimeValue minValue) { + return s -> { + final TimeValue value = TimeValue.parseTimeValue(s, null, key); + if (value.millis() < minValue.millis()) { + final String message = String.format( + Locale.ROOT, + "failed to parse value [%s] for setting [%s], must be >= [%s]", + s, + key, + minValue.getStringRep()); + throw new IllegalArgumentException(message); } - return timeValue; - }, properties); + return value; + }; + } + + private static Function minMaxTimeValueParser( + final String key, final TimeValue minValue, final TimeValue maxValue) { + return s -> { + final TimeValue value = minTimeValueParser(key, minValue).apply(s); + if (value.millis() > maxValue.millis()) { + final String message = String.format( + Locale.ROOT, + "failed to parse value [%s] for setting [%s], must be <= [%s]", + s, + key, + maxValue.getStringRep()); + throw new IllegalArgumentException(message); + } + return value; + }; } public static Setting timeSetting(String key, TimeValue defaultValue, TimeValue minValue, Property... properties) { @@ -1283,12 +1416,27 @@ public class Setting implements ToXContentObject { return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), properties); } + public static Setting positiveTimeSetting( + final String key, + final Setting fallbackSetting, + final TimeValue minValue, + final Property... properties) { + return timeSetting(key, fallbackSetting, minValue, properties); + } + public static Setting doubleSetting(String key, double defaultValue, double minValue, Property... properties) { + return doubleSetting(key, defaultValue, minValue, Double.POSITIVE_INFINITY, properties); + } + + public static Setting doubleSetting(String key, double defaultValue, double minValue, double maxValue, Property... properties) { return new Setting<>(key, (s) -> Double.toString(defaultValue), (s) -> { final double d = Double.parseDouble(s); if (d < minValue) { throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); } + if (d > maxValue) { + throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be <= " + maxValue); + } return d; }, properties); } diff --git a/server/src/main/java/org/elasticsearch/common/settings/SettingUpgrader.java b/server/src/main/java/org/elasticsearch/common/settings/SettingUpgrader.java new file mode 100644 index 00000000000..91f2bead300 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/settings/SettingUpgrader.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +/** + * Represents the logic to upgrade a setting. + * + * @param the type of the underlying setting + */ +public interface SettingUpgrader { + + /** + * The setting upgraded by this upgrader. + * + * @return the setting + */ + Setting getSetting(); + + /** + * The logic to upgrade the setting key, for example by mapping the old setting key to the new setting key. + * + * @param key the setting key to upgrade + * @return the upgraded setting key + */ + String getKey(String key); + + /** + * The logic to upgrade the setting value. + * + * @param value the setting value to upgrade + * @return the upgraded setting value + */ + default String getValue(final String value) { + return value; + } + +} diff --git a/server/src/main/java/org/elasticsearch/common/settings/Settings.java b/server/src/main/java/org/elasticsearch/common/settings/Settings.java index 2eb14f7ac65..1aeed2aee51 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -245,6 +245,30 @@ public final class Settings implements ToXContentFragment { return retVal == null ? defaultValue : retVal; } + /** + * Returns the setting value associated with the setting key. If it does not exists, + * returns the default value provided. + */ + String get(String setting, String defaultValue, boolean isList) { + Object value = settings.get(setting); + if (value != null) { + if (value instanceof List) { + if (isList == false) { + throw new IllegalArgumentException( + "Found list type value for setting [" + setting + "] but but did not expect a list for it." + ); + } + } else if (isList) { + throw new IllegalArgumentException( + "Expected list type value for setting [" + setting + "] but found [" + value.getClass() + ']' + ); + } + return toString(value); + } else { + return defaultValue; + } + } + /** * Returns the setting value (as float) associated with the setting key. If it does not exists, * returns the default value provided. diff --git a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index 67037b3708b..1eca3eb415f 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -54,10 +54,14 @@ public class SettingsModule implements Module { private final SettingsFilter settingsFilter; public SettingsModule(Settings settings, Setting... additionalSettings) { - this(settings, Arrays.asList(additionalSettings), Collections.emptyList()); + this(settings, Arrays.asList(additionalSettings), Collections.emptyList(), Collections.emptySet()); } - public SettingsModule(Settings settings, List> additionalSettings, List settingsFilter) { + public SettingsModule( + Settings settings, + List> additionalSettings, + List settingsFilter, + Set> settingUpgraders) { logger = Loggers.getLogger(getClass(), settings); this.settings = settings; for (Setting setting : ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) { @@ -70,12 +74,22 @@ public class SettingsModule implements Module { for (Setting setting : additionalSettings) { registerSetting(setting); } - for (String filter : settingsFilter) { registerSettingsFilter(filter); } + final Set> clusterSettingUpgraders = new HashSet<>(); + for (final SettingUpgrader settingUpgrader : ClusterSettings.BUILT_IN_SETTING_UPGRADERS) { + assert settingUpgrader.getSetting().hasNodeScope() : settingUpgrader.getSetting().getKey(); + final boolean added = clusterSettingUpgraders.add(settingUpgrader); + assert added : settingUpgrader.getSetting().getKey(); + } + for (final SettingUpgrader settingUpgrader : settingUpgraders) { + assert settingUpgrader.getSetting().hasNodeScope() : settingUpgrader.getSetting().getKey(); + final boolean added = clusterSettingUpgraders.add(settingUpgrader); + assert added : settingUpgrader.getSetting().getKey(); + } this.indexScopedSettings = new IndexScopedSettings(settings, new HashSet<>(this.indexSettings.values())); - this.clusterSettings = new ClusterSettings(settings, new HashSet<>(this.nodeSettings.values())); + this.clusterSettings = new ClusterSettings(settings, new HashSet<>(this.nodeSettings.values()), clusterSettingUpgraders); Settings indexSettings = settings.filter((s) -> (s.startsWith("index.") && // special case - we want to get Did you mean indices.query.bool.max_clause_count // which means we need to by-pass this check for this setting @@ -205,4 +219,5 @@ public class SettingsModule implements Module { public SettingsFilter getSettingsFilter() { return settingsFilter; } + } diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index 37efff5a0be..69b8cb0c85b 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -25,10 +25,12 @@ import java.time.DateTimeException; import java.time.DayOfWeek; import java.time.Instant; import java.time.LocalDate; +import java.time.ZoneId; import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; +import java.time.format.DateTimeParseException; import java.time.format.ResolverStyle; import java.time.format.SignStyle; import java.time.temporal.ChronoField; @@ -48,6 +50,7 @@ import static java.time.temporal.ChronoField.HOUR_OF_DAY; import static java.time.temporal.ChronoField.MILLI_OF_SECOND; import static java.time.temporal.ChronoField.MINUTE_OF_HOUR; import static java.time.temporal.ChronoField.MONTH_OF_YEAR; +import static java.time.temporal.ChronoField.NANO_OF_SECOND; import static java.time.temporal.ChronoField.SECOND_OF_MINUTE; public class DateFormatters { @@ -81,7 +84,7 @@ public class DateFormatters { .appendFraction(MILLI_OF_SECOND, 3, 3, true) .optionalEnd() .optionalStart() - .appendOffset("+HHmm", "Z") + .appendZoneOrOffsetId() .optionalEnd() .optionalEnd() .toFormatter(Locale.ROOT); @@ -95,7 +98,7 @@ public class DateFormatters { .appendFraction(MILLI_OF_SECOND, 3, 3, true) .optionalEnd() .optionalStart() - .appendZoneOrOffsetId() + .appendOffset("+HHmm", "Z") .optionalEnd() .optionalEnd() .toFormatter(Locale.ROOT); @@ -106,6 +109,40 @@ public class DateFormatters { private static final CompoundDateTimeFormatter STRICT_DATE_OPTIONAL_TIME = new CompoundDateTimeFormatter(STRICT_DATE_OPTIONAL_TIME_FORMATTER_1, STRICT_DATE_OPTIONAL_TIME_FORMATTER_2); + private static final DateTimeFormatter STRICT_DATE_OPTIONAL_TIME_FORMATTER_WITH_NANOS_1 = new DateTimeFormatterBuilder() + .append(STRICT_YEAR_MONTH_DAY_FORMATTER) + .optionalStart() + .appendLiteral('T') + .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) + .optionalStart() + .appendFraction(NANO_OF_SECOND, 3, 9, true) + .optionalEnd() + .optionalStart() + .appendZoneOrOffsetId() + .optionalEnd() + .optionalEnd() + .toFormatter(Locale.ROOT); + + private static final DateTimeFormatter STRICT_DATE_OPTIONAL_TIME_FORMATTER_WITH_NANOS_2 = new DateTimeFormatterBuilder() + .append(STRICT_YEAR_MONTH_DAY_FORMATTER) + .optionalStart() + .appendLiteral('T') + .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) + .optionalStart() + .appendFraction(NANO_OF_SECOND, 3, 9, true) + .optionalEnd() + .optionalStart() + .appendOffset("+HHmm", "Z") + .optionalEnd() + .optionalEnd() + .toFormatter(Locale.ROOT); + + /** + * Returns a generic ISO datetime parser where the date is mandatory and the time is optional with nanosecond resolution. + */ + private static final CompoundDateTimeFormatter STRICT_DATE_OPTIONAL_TIME_NANOS = + new CompoundDateTimeFormatter(STRICT_DATE_OPTIONAL_TIME_FORMATTER_WITH_NANOS_1, STRICT_DATE_OPTIONAL_TIME_FORMATTER_WITH_NANOS_2); + ///////////////////////////////////////// // // BEGIN basic time formatters @@ -844,11 +881,47 @@ public class DateFormatters { /* * Returns a formatter for parsing the milliseconds since the epoch + * This one needs a custom implementation, because the standard date formatter can not parse negative values + * or anything +- 999 milliseconds around the epoch + * + * This implementation just resorts to parsing the input directly to an Instant by trying to parse a number. */ - private static final CompoundDateTimeFormatter EPOCH_MILLIS = new CompoundDateTimeFormatter(new DateTimeFormatterBuilder() + private static final DateTimeFormatter EPOCH_MILLIS_FORMATTER = new DateTimeFormatterBuilder() .appendValue(ChronoField.INSTANT_SECONDS, 1, 19, SignStyle.NEVER) .appendValue(ChronoField.MILLI_OF_SECOND, 3) - .toFormatter(Locale.ROOT)); + .toFormatter(Locale.ROOT); + + private static final class EpochDateTimeFormatter extends CompoundDateTimeFormatter { + + private EpochDateTimeFormatter() { + super(EPOCH_MILLIS_FORMATTER); + } + + private EpochDateTimeFormatter(ZoneId zoneId) { + super(EPOCH_MILLIS_FORMATTER.withZone(zoneId)); + } + + @Override + public TemporalAccessor parse(String input) { + try { + return Instant.ofEpochMilli(Long.valueOf(input)).atZone(ZoneOffset.UTC); + } catch (NumberFormatException e) { + throw new DateTimeParseException("invalid number", input, 0, e); + } + } + + @Override + public CompoundDateTimeFormatter withZone(ZoneId zoneId) { + return new EpochDateTimeFormatter(zoneId); + } + + @Override + public String format(TemporalAccessor accessor) { + return String.valueOf(Instant.from(accessor).toEpochMilli()); + } + } + + private static final CompoundDateTimeFormatter EPOCH_MILLIS = new EpochDateTimeFormatter(); /* * Returns a formatter that combines a full date and two digit hour of @@ -1326,6 +1399,8 @@ public class DateFormatters { return STRICT_DATE_HOUR_MINUTE_SECOND_MILLIS; } else if ("strictDateOptionalTime".equals(input) || "strict_date_optional_time".equals(input)) { return STRICT_DATE_OPTIONAL_TIME; + } else if ("strictDateOptionalTimeNanos".equals(input) || "strict_date_optional_time_nanos".equals(input)) { + return STRICT_DATE_OPTIONAL_TIME_NANOS; } else if ("strictDateTime".equals(input) || "strict_date_time".equals(input)) { return STRICT_DATE_TIME; } else if ("strictDateTimeNoMillis".equals(input) || "strict_date_time_no_millis".equals(input)) { diff --git a/server/src/main/java/org/elasticsearch/common/util/CachedSupplier.java b/server/src/main/java/org/elasticsearch/common/util/CachedSupplier.java new file mode 100644 index 00000000000..eb15ee13052 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/util/CachedSupplier.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.util; + +import java.util.function.Supplier; + +/** + * A {@link Supplier} that caches its return value. This may be useful to make + * a {@link Supplier} idempotent or for performance reasons if always returning + * the same instance is acceptable. + */ +public final class CachedSupplier implements Supplier { + + private Supplier supplier; + private T result; + private boolean resultSet; + + public CachedSupplier(Supplier supplier) { + this.supplier = supplier; + } + + @Override + public synchronized T get() { + if (resultSet == false) { + result = supplier.get(); + resultSet = true; + } + return result; + } + +} diff --git a/server/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java b/server/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java deleted file mode 100644 index b709c48d8c2..00000000000 --- a/server/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.util; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexSettings; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.NoSuchFileException; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; - -/** - * Renames index folders from {index.name} to {index.uuid} - */ -public class IndexFolderUpgrader { - private final NodeEnvironment nodeEnv; - private final Settings settings; - private final Logger logger = Loggers.getLogger(IndexFolderUpgrader.class); - - /** - * Creates a new upgrader instance - * @param settings node settings - * @param nodeEnv the node env to operate on - */ - IndexFolderUpgrader(Settings settings, NodeEnvironment nodeEnv) { - this.settings = settings; - this.nodeEnv = nodeEnv; - } - - /** - * Moves the index folder found in source to target - */ - void upgrade(final Index index, final Path source, final Path target) throws IOException { - boolean success = false; - try { - Files.move(source, target, StandardCopyOption.ATOMIC_MOVE); - success = true; - } catch (NoSuchFileException | FileNotFoundException exception) { - // thrown when the source is non-existent because the folder was renamed - // by another node (shared FS) after we checked if the target exists - logger.error(() -> new ParameterizedMessage("multiple nodes trying to upgrade [{}] in parallel, retry " + - "upgrading with single node", target), exception); - throw exception; - } finally { - if (success) { - logger.info("{} moved from [{}] to [{}]", index, source, target); - logger.trace("{} syncing directory [{}]", index, target); - IOUtils.fsync(target, true); - } - } - } - - /** - * Renames indexFolderName index folders found in node paths and custom path - * iff {@link #needsUpgrade(Index, String)} is true. - * Index folder in custom paths are renamed first followed by index folders in each node path. - */ - void upgrade(final String indexFolderName) throws IOException { - for (NodeEnvironment.NodePath nodePath : nodeEnv.nodePaths()) { - final Path indexFolderPath = nodePath.indicesPath.resolve(indexFolderName); - final IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, indexFolderPath); - if (indexMetaData != null) { - final Index index = indexMetaData.getIndex(); - if (needsUpgrade(index, indexFolderName)) { - logger.info("{} upgrading [{}] to new naming convention", index, indexFolderPath); - final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); - if (indexSettings.hasCustomDataPath()) { - // we rename index folder in custom path before renaming them in any node path - // to have the index state under a not-yet-upgraded index folder, which we use to - // continue renaming after a incomplete upgrade. - final Path customLocationSource = nodeEnv.resolveBaseCustomLocation(indexSettings) - .resolve(indexFolderName); - final Path customLocationTarget = customLocationSource.resolveSibling(index.getUUID()); - // we rename the folder in custom path only the first time we encounter a state - // in a node path, which needs upgrading, it is a no-op for subsequent node paths - if (Files.exists(customLocationSource) // might not exist if no data was written for this index - && Files.exists(customLocationTarget) == false) { - upgrade(index, customLocationSource, customLocationTarget); - } else { - logger.info("[{}] no upgrade needed - already upgraded", customLocationTarget); - } - } - upgrade(index, indexFolderPath, indexFolderPath.resolveSibling(index.getUUID())); - } else { - logger.debug("[{}] no upgrade needed - already upgraded", indexFolderPath); - } - } else { - logger.warn("[{}] no index state found - ignoring", indexFolderPath); - } - } - } - - /** - * Upgrades all indices found under nodeEnv. Already upgraded indices are ignored. - */ - public static void upgradeIndicesIfNeeded(final Settings settings, final NodeEnvironment nodeEnv) throws IOException { - final IndexFolderUpgrader upgrader = new IndexFolderUpgrader(settings, nodeEnv); - for (String indexFolderName : nodeEnv.availableIndexFolders()) { - upgrader.upgrade(indexFolderName); - } - } - - static boolean needsUpgrade(Index index, String indexFolderName) { - return indexFolderName.equals(index.getUUID()) == false; - } -} diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index 057a970470b..d38eb03fae3 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -160,11 +160,13 @@ public class EsExecutors { if (Node.NODE_NAME_SETTING.exists(settings)) { return threadName(Node.NODE_NAME_SETTING.get(settings), namePrefix); } else { + // TODO this should only be allowed in tests return threadName("", namePrefix); } } public static String threadName(final String nodeName, final String namePrefix) { + // TODO missing node names should only be allowed in tests return "elasticsearch" + (nodeName.isEmpty() ? "" : "[") + nodeName + (nodeName.isEmpty() ? "" : "]") + "[" + namePrefix + "]"; } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java index 38abe90ad46..5793bcf8a0e 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java @@ -21,6 +21,8 @@ package org.elasticsearch.common.xcontent; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.time.CompoundDateTimeFormatter; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.joda.time.DateTime; @@ -33,6 +35,19 @@ import org.joda.time.format.ISODateTimeFormat; import org.joda.time.tz.CachedDateTimeZone; import org.joda.time.tz.FixedDateTimeZone; +import java.time.DayOfWeek; +import java.time.Duration; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.Month; +import java.time.MonthDay; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.time.Period; +import java.time.Year; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.Calendar; import java.util.Date; import java.util.GregorianCalendar; @@ -49,6 +64,9 @@ import java.util.function.Function; public class XContentElasticsearchExtension implements XContentBuilderExtension { public static final DateTimeFormatter DEFAULT_DATE_PRINTER = ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC); + public static final CompoundDateTimeFormatter DEFAULT_FORMATTER = DateFormatters.forPattern("strict_date_optional_time_nanos"); + public static final CompoundDateTimeFormatter LOCAL_TIME_FORMATTER = DateFormatters.forPattern("HH:mm:ss.SSS"); + public static final CompoundDateTimeFormatter OFFSET_TIME_FORMATTER = DateFormatters.forPattern("HH:mm:ss.SSSZZZZZ"); @Override public Map, XContentBuilder.Writer> getXContentWriters() { @@ -62,6 +80,19 @@ public class XContentElasticsearchExtension implements XContentBuilderExtension writers.put(MutableDateTime.class, XContentBuilder::timeValue); writers.put(DateTime.class, XContentBuilder::timeValue); writers.put(TimeValue.class, (b, v) -> b.value(v.toString())); + writers.put(ZonedDateTime.class, XContentBuilder::timeValue); + writers.put(OffsetDateTime.class, XContentBuilder::timeValue); + writers.put(OffsetTime.class, XContentBuilder::timeValue); + writers.put(java.time.Instant.class, XContentBuilder::timeValue); + writers.put(LocalDateTime.class, XContentBuilder::timeValue); + writers.put(LocalDate.class, XContentBuilder::timeValue); + writers.put(LocalTime.class, XContentBuilder::timeValue); + writers.put(DayOfWeek.class, (b, v) -> b.value(v.toString())); + writers.put(Month.class, (b, v) -> b.value(v.toString())); + writers.put(MonthDay.class, (b, v) -> b.value(v.toString())); + writers.put(Year.class, (b, v) -> b.value(v.toString())); + writers.put(Duration.class, (b, v) -> b.value(v.toString())); + writers.put(Period.class, (b, v) -> b.value(v.toString())); writers.put(BytesReference.class, (b, v) -> { if (v == null) { @@ -102,6 +133,14 @@ public class XContentElasticsearchExtension implements XContentBuilderExtension transformers.put(Calendar.class, d -> DEFAULT_DATE_PRINTER.print(((Calendar) d).getTimeInMillis())); transformers.put(GregorianCalendar.class, d -> DEFAULT_DATE_PRINTER.print(((Calendar) d).getTimeInMillis())); transformers.put(Instant.class, d -> DEFAULT_DATE_PRINTER.print((Instant) d)); + transformers.put(ZonedDateTime.class, d -> DEFAULT_FORMATTER.format((ZonedDateTime) d)); + transformers.put(OffsetDateTime.class, d -> DEFAULT_FORMATTER.format((OffsetDateTime) d)); + transformers.put(OffsetTime.class, d -> OFFSET_TIME_FORMATTER.format((OffsetTime) d)); + transformers.put(LocalDateTime.class, d -> DEFAULT_FORMATTER.format((LocalDateTime) d)); + transformers.put(java.time.Instant.class, + d -> DEFAULT_FORMATTER.format(ZonedDateTime.ofInstant((java.time.Instant) d, ZoneOffset.UTC))); + transformers.put(LocalDate.class, d -> ((LocalDate) d).toString()); + transformers.put(LocalTime.class, d -> LOCAL_TIME_FORMATTER.format((LocalTime) d)); return transformers; } } diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index e47fe7a7a70..f34798605d7 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.discovery.single.SingleNodeDiscovery; +import org.elasticsearch.discovery.zen.FileBasedUnicastHostsProvider; import org.elasticsearch.discovery.zen.SettingsBasedHostsProvider; import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.discovery.zen.ZenDiscovery; @@ -40,6 +41,7 @@ import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -69,10 +71,11 @@ public class DiscoveryModule { public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService, MasterService masterService, ClusterApplier clusterApplier, ClusterSettings clusterSettings, List plugins, - AllocationService allocationService) { + AllocationService allocationService, Path configFile) { final Collection> joinValidators = new ArrayList<>(); final Map> hostProviders = new HashMap<>(); hostProviders.put("settings", () -> new SettingsBasedHostsProvider(settings, transportService)); + hostProviders.put("file", () -> new FileBasedUnicastHostsProvider(settings, configFile)); for (DiscoveryPlugin plugin : plugins) { plugin.getZenHostsProviders(transportService, networkService).entrySet().forEach(entry -> { if (hostProviders.put(entry.getKey(), entry.getValue()) != null) { diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java b/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java new file mode 100644 index 00000000000..f339ae43a70 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.zen; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * An implementation of {@link UnicastHostsProvider} that reads hosts/ports + * from {@link #UNICAST_HOSTS_FILE}. + * + * Each unicast host/port that is part of the discovery process must be listed on + * a separate line. If the port is left off an entry, a default port of 9300 is + * assumed. An example unicast hosts file could read: + * + * 67.81.244.10 + * 67.81.244.11:9305 + * 67.81.244.15:9400 + */ +public class FileBasedUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { + + public static final String UNICAST_HOSTS_FILE = "unicast_hosts.txt"; + + private final Path unicastHostsFilePath; + private final Path legacyUnicastHostsFilePath; + + public FileBasedUnicastHostsProvider(Settings settings, Path configFile) { + super(settings); + this.unicastHostsFilePath = configFile.resolve(UNICAST_HOSTS_FILE); + this.legacyUnicastHostsFilePath = configFile.resolve("discovery-file").resolve(UNICAST_HOSTS_FILE); + } + + private List getHostsList() { + if (Files.exists(unicastHostsFilePath)) { + return readFileContents(unicastHostsFilePath); + } + + if (Files.exists(legacyUnicastHostsFilePath)) { + deprecationLogger.deprecated("Found dynamic hosts list at [{}] but this path is deprecated. This list should be at [{}] " + + "instead. Support for the deprecated path will be removed in future.", legacyUnicastHostsFilePath, unicastHostsFilePath); + return readFileContents(legacyUnicastHostsFilePath); + } + + logger.warn("expected, but did not find, a dynamic hosts list at [{}]", unicastHostsFilePath); + + return Collections.emptyList(); + } + + private List readFileContents(Path path) { + try (Stream lines = Files.lines(path)) { + return lines.filter(line -> line.startsWith("#") == false) // lines starting with `#` are comments + .collect(Collectors.toList()); + } catch (IOException e) { + logger.warn(() -> new ParameterizedMessage("failed to read file [{}]", unicastHostsFilePath), e); + return Collections.emptyList(); + } + } + + @Override + public List buildDynamicHosts(HostsResolver hostsResolver) { + final List transportAddresses = hostsResolver.resolveHosts(getHostsList(), 1); + logger.debug("seed addresses: {}", transportAddresses); + return transportAddresses; + } +} diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 87874bd4500..29d3207c73a 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -20,6 +20,7 @@ package org.elasticsearch.env; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.SegmentInfos; @@ -37,7 +38,6 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -53,7 +53,6 @@ import org.elasticsearch.index.store.FsDirectoryService; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.fs.FsProbe; import org.elasticsearch.monitor.jvm.JvmInfo; -import org.elasticsearch.node.Node; import java.io.Closeable; import java.io.IOException; @@ -76,6 +75,7 @@ import java.util.Set; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; import static java.util.Collections.unmodifiableSet; @@ -83,9 +83,6 @@ import static java.util.Collections.unmodifiableSet; * A component that holds all data paths for a single node. */ public final class NodeEnvironment implements Closeable { - - private final Logger logger; - public static class NodePath { /* ${data.paths}/nodes/{node.id} */ public final Path path; @@ -139,6 +136,7 @@ public final class NodeEnvironment implements Closeable { } + private final Logger logger = LogManager.getLogger(NodeEnvironment.class); private final NodePath[] nodePaths; private final Path sharedDataPath; private final Lock[] locks; @@ -173,24 +171,27 @@ public final class NodeEnvironment implements Closeable { public static final String INDICES_FOLDER = "indices"; public static final String NODE_LOCK_FILENAME = "node.lock"; - public NodeEnvironment(Settings settings, Environment environment) throws IOException { - + /** + * Setup the environment. + * @param settings settings from elasticsearch.yml + * @param nodeIdConsumer called as soon as the node id is available to the + * node name in log messages if it wasn't loaded from + * elasticsearch.yml + */ + public NodeEnvironment(Settings settings, Environment environment, Consumer nodeIdConsumer) throws IOException { if (!DiscoveryNode.nodeRequiresLocalStorage(settings)) { nodePaths = null; sharedDataPath = null; locks = null; nodeLockId = -1; nodeMetaData = new NodeMetaData(generateNodeId(settings)); - logger = Loggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId())); + nodeIdConsumer.accept(nodeMetaData.nodeId()); return; } final NodePath[] nodePaths = new NodePath[environment.dataWithClusterFiles().length]; final Lock[] locks = new Lock[nodePaths.length]; boolean success = false; - // trace logger to debug issues before the default node name is derived from the node id - Logger startupTraceLogger = Loggers.getLogger(getClass(), settings); - try { sharedDataPath = environment.sharedDataFile(); int nodeLockId = -1; @@ -203,13 +204,13 @@ public final class NodeEnvironment implements Closeable { Files.createDirectories(dir); try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) { - startupTraceLogger.trace("obtaining node lock on {} ...", dir.toAbsolutePath()); + logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath()); try { locks[dirIndex] = luceneDir.obtainLock(NODE_LOCK_FILENAME); nodePaths[dirIndex] = new NodePath(dir); nodeLockId = possibleLockId; } catch (LockObtainFailedException ex) { - startupTraceLogger.trace( + logger.trace( new ParameterizedMessage("failed to obtain node lock on {}", dir.toAbsolutePath()), ex); // release all the ones that were obtained up until now releaseAndNullLocks(locks); @@ -217,7 +218,7 @@ public final class NodeEnvironment implements Closeable { } } catch (IOException e) { - startupTraceLogger.trace(() -> new ParameterizedMessage( + logger.trace(() -> new ParameterizedMessage( "failed to obtain node lock on {}", dir.toAbsolutePath()), e); lastException = new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e); // release all the ones that were obtained up until now @@ -242,8 +243,8 @@ public final class NodeEnvironment implements Closeable { maxLocalStorageNodes); throw new IllegalStateException(message, lastException); } - this.nodeMetaData = loadOrCreateNodeMetaData(settings, startupTraceLogger, nodePaths); - this.logger = Loggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId())); + this.nodeMetaData = loadOrCreateNodeMetaData(settings, logger, nodePaths); + nodeIdConsumer.accept(nodeMetaData.nodeId()); this.nodeLockId = nodeLockId; this.locks = locks; diff --git a/server/src/main/java/org/elasticsearch/gateway/Gateway.java b/server/src/main/java/org/elasticsearch/gateway/Gateway.java index d2261e5d1b4..77d2c553c2c 100644 --- a/server/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/server/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -137,20 +137,25 @@ public class Gateway extends AbstractComponent { } } } + final ClusterState.Builder builder = upgradeAndArchiveUnknownOrInvalidSettings(metaDataBuilder); + listener.onSuccess(builder.build()); + } + + ClusterState.Builder upgradeAndArchiveUnknownOrInvalidSettings(MetaData.Builder metaDataBuilder) { final ClusterSettings clusterSettings = clusterService.getClusterSettings(); metaDataBuilder.persistentSettings( clusterSettings.archiveUnknownOrInvalidSettings( - metaDataBuilder.persistentSettings(), + clusterSettings.upgradeSettings(metaDataBuilder.persistentSettings()), e -> logUnknownSetting("persistent", e), (e, ex) -> logInvalidSetting("persistent", e, ex))); metaDataBuilder.transientSettings( clusterSettings.archiveUnknownOrInvalidSettings( - metaDataBuilder.transientSettings(), + clusterSettings.upgradeSettings(metaDataBuilder.transientSettings()), e -> logUnknownSetting("transient", e), (e, ex) -> logInvalidSetting("transient", e, ex))); ClusterState.Builder builder = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)); builder.metaData(metaDataBuilder); - listener.onSuccess(builder.build()); + return builder; } private void logUnknownSetting(String settingType, Map.Entry e) { diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index 719626b7e18..46ff2f960e7 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.IndexFolderUpgrader; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.plugins.MetaDataUpgrader; @@ -84,7 +83,6 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateA if (DiscoveryNode.isMasterNode(settings) || DiscoveryNode.isDataNode(settings)) { try { ensureNoPre019State(); - IndexFolderUpgrader.upgradeIndicesIfNeeded(settings, nodeEnv); final MetaData metaData = metaStateService.loadFullState(); final MetaData upgradedMetaData = upgradeMetaData(metaData, metaDataIndexUpgradeService, metaDataUpgrader); // We finished global state validation and successfully checked all indices for backward compatibility diff --git a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java index 95ecc418316..c3cbfea9141 100644 --- a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java @@ -69,15 +69,18 @@ public class TransportNodesListGatewayStartedShards extends public static final String ACTION_NAME = "internal:gateway/local/started_shards"; private final NodeEnvironment nodeEnv; private final IndicesService indicesService; + private final NamedXContentRegistry namedXContentRegistry; @Inject public TransportNodesListGatewayStartedShards(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, - NodeEnvironment env, IndicesService indicesService) { + NodeEnvironment env, IndicesService indicesService, + NamedXContentRegistry namedXContentRegistry) { super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, Request::new, NodeRequest::new, ThreadPool.Names.FETCH_SHARD_STARTED, NodeGatewayStartedShards.class); this.nodeEnv = env; this.indicesService = indicesService; + this.namedXContentRegistry = namedXContentRegistry; } @Override @@ -112,7 +115,7 @@ public class TransportNodesListGatewayStartedShards extends try { final ShardId shardId = request.getShardId(); logger.trace("{} loading local shard state info", shardId); - ShardStateMetaData shardStateMetaData = ShardStateMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, + ShardStateMetaData shardStateMetaData = ShardStateMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, nodeEnv.availableShardPaths(request.shardId)); if (shardStateMetaData != null) { IndexMetaData metaData = clusterService.state().metaData().index(shardId.getIndex()); @@ -120,7 +123,7 @@ public class TransportNodesListGatewayStartedShards extends // we may send this requests while processing the cluster state that recovered the index // sometimes the request comes in before the local node processed that cluster state // in such cases we can load it from disk - metaData = IndexMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, + metaData = IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, nodeEnv.indexPaths(shardId.getIndex())); } if (metaData == null) { diff --git a/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java b/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java index 25acdd06b44..7760e104140 100644 --- a/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java +++ b/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java @@ -33,7 +33,7 @@ public abstract class AbstractIndexComponent implements IndexComponent { * Constructs a new index component, with the index name and its settings. */ protected AbstractIndexComponent(IndexSettings indexSettings) { - this.logger = Loggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex()); + this.logger = Loggers.getLogger(getClass(), indexSettings.getIndex()); this.deprecationLogger = new DeprecationLogger(logger); this.indexSettings = indexSettings; } diff --git a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java index 1bdec683bfb..78cd8d462ea 100644 --- a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java +++ b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java @@ -51,7 +51,7 @@ final class CompositeIndexEventListener implements IndexEventListener { } } this.listeners = Collections.unmodifiableList(new ArrayList<>(listeners)); - this.logger = Loggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex()); + this.logger = Loggers.getLogger(getClass(), indexSettings.getIndex()); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/EsTieredMergePolicy.java b/server/src/main/java/org/elasticsearch/index/EsTieredMergePolicy.java index 68edf3a9b18..8d1dbeb2e1d 100644 --- a/server/src/main/java/org/elasticsearch/index/EsTieredMergePolicy.java +++ b/server/src/main/java/org/elasticsearch/index/EsTieredMergePolicy.java @@ -108,4 +108,13 @@ final class EsTieredMergePolicy extends FilterMergePolicy { public double getSegmentsPerTier() { return regularMergePolicy.getSegmentsPerTier(); } + + public void setDeletesPctAllowed(double deletesPctAllowed) { + regularMergePolicy.setDeletesPctAllowed(deletesPctAllowed); + forcedMergePolicy.setDeletesPctAllowed(deletesPctAllowed); + } + + public double getDeletesPctAllowed() { + return regularMergePolicy.getDeletesPctAllowed(); + } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java index 715b78b14ff..7f2eae492fd 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java @@ -21,10 +21,11 @@ package org.elasticsearch.index; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.Similarity; +import org.apache.lucene.store.MMapDirectory; +import org.apache.lucene.util.Constants; import org.apache.lucene.util.SetOnce; import org.elasticsearch.Version; import org.elasticsearch.client.Client; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; @@ -59,7 +60,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -84,8 +84,10 @@ import java.util.function.Function; */ public final class IndexModule { + public static final Setting NODE_STORE_ALLOW_MMAPFS = Setting.boolSetting("node.store.allow_mmapfs", true, Property.NodeScope); + public static final Setting INDEX_STORE_TYPE_SETTING = - new Setting<>("index.store.type", "", Function.identity(), Property.IndexScope, Property.NodeScope); + new Setting<>("index.store.type", "", Function.identity(), Property.IndexScope, Property.NodeScope); /** On which extensions to load data into the file-system cache upon opening of files. * This only works with the mmap directory, and even in that case is still @@ -289,7 +291,7 @@ public final class IndexModule { } } - private static boolean isBuiltinType(String storeType) { + public static boolean isBuiltinType(String storeType) { for (Type type : Type.values()) { if (type.match(storeType)) { return true; @@ -298,21 +300,48 @@ public final class IndexModule { return false; } + public enum Type { - NIOFS, - MMAPFS, - SIMPLEFS, - FS; + NIOFS("niofs"), + MMAPFS("mmapfs"), + SIMPLEFS("simplefs"), + FS("fs"); + + private final String settingsKey; + + Type(final String settingsKey) { + this.settingsKey = settingsKey; + } + + private static final Map TYPES; + + static { + final Map types = new HashMap<>(4); + for (final Type type : values()) { + types.put(type.settingsKey, type); + } + TYPES = Collections.unmodifiableMap(types); + } public String getSettingsKey() { - return this.name().toLowerCase(Locale.ROOT); + return this.settingsKey; } + + public static Type fromSettingsKey(final String key) { + final Type type = TYPES.get(key); + if (type == null) { + throw new IllegalArgumentException("no matching type for [" + key + "]"); + } + return type; + } + /** * Returns true iff this settings matches the type. */ public boolean match(String setting) { return getSettingsKey().equals(setting); } + } /** @@ -325,6 +354,16 @@ public final class IndexModule { IndexSearcherWrapper newWrapper(IndexService indexService); } + public static Type defaultStoreType(final boolean allowMmapfs) { + if (allowMmapfs && Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) { + return Type.MMAPFS; + } else if (Constants.WINDOWS) { + return Type.SIMPLEFS; + } else { + return Type.NIOFS; + } + } + public IndexService newIndexService( NodeEnvironment environment, NamedXContentRegistry xContentRegistry, @@ -343,20 +382,7 @@ public final class IndexModule { IndexSearcherWrapperFactory searcherWrapperFactory = indexSearcherWrapper.get() == null ? (shard) -> null : indexSearcherWrapper.get(); eventListener.beforeIndexCreated(indexSettings.getIndex(), indexSettings.getSettings()); - final String storeType = indexSettings.getValue(INDEX_STORE_TYPE_SETTING); - final IndexStore store; - if (Strings.isEmpty(storeType) || isBuiltinType(storeType)) { - store = new IndexStore(indexSettings); - } else { - Function factory = indexStoreFactories.get(storeType); - if (factory == null) { - throw new IllegalArgumentException("Unknown store type [" + storeType + "]"); - } - store = factory.apply(indexSettings); - if (store == null) { - throw new IllegalStateException("store must not be null"); - } - } + final IndexStore store = getIndexStore(indexSettings, indexStoreFactories); final QueryCache queryCache; if (indexSettings.getValue(INDEX_QUERY_CACHE_ENABLED_SETTING)) { BiFunction queryCacheProvider = forceQueryCacheProvider.get(); @@ -375,6 +401,39 @@ public final class IndexModule { indicesFieldDataCache, searchOperationListeners, indexOperationListeners, namedWriteableRegistry); } + private static IndexStore getIndexStore( + final IndexSettings indexSettings, final Map> indexStoreFactories) { + final String storeType = indexSettings.getValue(INDEX_STORE_TYPE_SETTING); + final Type type; + final Boolean allowMmapfs = NODE_STORE_ALLOW_MMAPFS.get(indexSettings.getNodeSettings()); + if (storeType.isEmpty() || Type.FS.getSettingsKey().equals(storeType)) { + type = defaultStoreType(allowMmapfs); + } else { + if (isBuiltinType(storeType)) { + type = Type.fromSettingsKey(storeType); + } else { + type = null; + } + } + if (type != null && type == Type.MMAPFS && allowMmapfs == false) { + throw new IllegalArgumentException("store type [mmapfs] is not allowed"); + } + final IndexStore store; + if (storeType.isEmpty() || isBuiltinType(storeType)) { + store = new IndexStore(indexSettings); + } else { + Function factory = indexStoreFactories.get(storeType); + if (factory == null) { + throw new IllegalArgumentException("Unknown store type [" + storeType + "]"); + } + store = factory.apply(indexSettings); + if (store == null) { + throw new IllegalStateException("store must not be null"); + } + } + return store; + } + /** * creates a new mapper service to do administrative work like mapping updates. This *should not* be used for document parsing. * doing so will result in an exception. diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 5e9e811bc32..047b3c5cd76 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -64,6 +64,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.index.shard.ShardPath; import org.elasticsearch.index.similarity.SimilarityService; +import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; @@ -377,7 +378,9 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust warmer.warm(searcher, shard, IndexService.this.indexSettings); } }; - store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, + // TODO we can remove either IndexStore or DirectoryService. All we need is a simple Supplier + DirectoryService directoryService = indexStore.newDirectoryService(path); + store = new Store(shardId, this.indexSettings, directoryService.newDirectory(), lock, new StoreCloseListener(shardId, () -> eventListener.onStoreClosed(shardId))); indexShard = new IndexShard(routing, this.indexSettings, path, store, indexSortSupplier, indexCache, mapperService, similarityService, engineFactory, @@ -522,8 +525,8 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust } @Override - public boolean updateMapping(IndexMetaData indexMetaData) throws IOException { - return mapperService().updateMapping(indexMetaData); + public boolean updateMapping(final IndexMetaData currentIndexMetaData, final IndexMetaData newIndexMetaData) throws IOException { + return mapperService().updateMapping(currentIndexMetaData, newIndexMetaData); } private class StoreCloseListener implements Store.OnClose { diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 44cd743bbd4..9801cc3e26b 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -75,11 +75,10 @@ public final class IndexSettings { switch(s) { case "false": case "true": - case "fix": case "checksum": return s; default: - throw new IllegalArgumentException("unknown value for [index.shard.check_on_startup] must be one of [true, false, fix, checksum] but was: " + s); + throw new IllegalArgumentException("unknown value for [index.shard.check_on_startup] must be one of [true, false, checksum] but was: " + s); } }, Property.IndexScope); @@ -237,6 +236,21 @@ public final class IndexSettings { public static final Setting INDEX_GC_DELETES_SETTING = Setting.timeSetting("index.gc_deletes", DEFAULT_GC_DELETES, new TimeValue(-1, TimeUnit.MILLISECONDS), Property.Dynamic, Property.IndexScope); + + /** + * Specifies if the index should use soft-delete instead of hard-delete for update/delete operations. + */ + public static final Setting INDEX_SOFT_DELETES_SETTING = + Setting.boolSetting("index.soft_deletes.enabled", false, Property.IndexScope, Property.Final); + + /** + * Controls how many soft-deleted documents will be kept around before being merged away. Keeping more deleted + * documents increases the chance of operation-based recoveries and allows querying a longer history of documents. + * If soft-deletes is enabled, an engine by default will retain all operations up to the global checkpoint. + **/ + public static final Setting INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING = + Setting.longSetting("index.soft_deletes.retention.operations", 0, 0, Property.IndexScope, Property.Dynamic); + /** * The maximum number of refresh listeners allows on this shard. */ @@ -289,6 +303,8 @@ public final class IndexSettings { private final IndexSortConfig indexSortConfig; private final IndexScopedSettings scopedSettings; private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis(); + private final boolean softDeleteEnabled; + private volatile long softDeleteRetentionOperations; private volatile boolean warmerEnabled; private volatile int maxResultWindow; private volatile int maxInnerResultWindow; @@ -380,8 +396,8 @@ public final class IndexSettings { this.nodeSettings = nodeSettings; this.settings = Settings.builder().put(nodeSettings).put(indexMetaData.getSettings()).build(); this.index = indexMetaData.getIndex(); - version = Version.indexCreated(settings); - logger = Loggers.getLogger(getClass(), settings, index); + version = IndexMetaData.SETTING_INDEX_VERSION_CREATED.get(settings); + logger = Loggers.getLogger(getClass(), index); nodeName = Node.NODE_NAME_SETTING.get(settings); this.indexMetaData = indexMetaData; numberOfShards = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null); @@ -400,6 +416,8 @@ public final class IndexSettings { generationThresholdSize = scopedSettings.get(INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING); mergeSchedulerConfig = new MergeSchedulerConfig(this); gcDeletesInMillis = scopedSettings.get(INDEX_GC_DELETES_SETTING).getMillis(); + softDeleteEnabled = version.onOrAfter(Version.V_6_5_0) && scopedSettings.get(INDEX_SOFT_DELETES_SETTING); + softDeleteRetentionOperations = scopedSettings.get(INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING); warmerEnabled = scopedSettings.get(INDEX_WARMER_ENABLED_SETTING); maxResultWindow = scopedSettings.get(MAX_RESULT_WINDOW_SETTING); maxInnerResultWindow = scopedSettings.get(MAX_INNER_RESULT_WINDOW_SETTING); @@ -421,6 +439,7 @@ public final class IndexSettings { defaultPipeline = scopedSettings.get(DEFAULT_PIPELINE); scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio); + scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, mergePolicyConfig::setDeletesPctAllowed); scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, mergePolicyConfig::setExpungeDeletesAllowed); scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, mergePolicyConfig::setFloorSegmentSetting); scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, mergePolicyConfig::setMaxMergesAtOnce); @@ -458,6 +477,7 @@ public final class IndexSettings { scopedSettings.addSettingsUpdateConsumer(INDEX_SEARCH_IDLE_AFTER, this::setSearchIdleAfter); scopedSettings.addSettingsUpdateConsumer(MAX_REGEX_LENGTH_SETTING, this::setMaxRegexLength); scopedSettings.addSettingsUpdateConsumer(DEFAULT_PIPELINE, this::setDefaultPipeline); + scopedSettings.addSettingsUpdateConsumer(INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING, this::setSoftDeleteRetentionOperations); } private void setSearchIdleAfter(TimeValue searchIdleAfter) { this.searchIdleAfter = searchIdleAfter; } @@ -841,4 +861,22 @@ public final class IndexSettings { public void setDefaultPipeline(String defaultPipeline) { this.defaultPipeline = defaultPipeline; } + + /** + * Returns true if soft-delete is enabled. + */ + public boolean isSoftDeleteEnabled() { + return softDeleteEnabled; + } + + private void setSoftDeleteRetentionOperations(long ops) { + this.softDeleteRetentionOperations = ops; + } + + /** + * Returns the number of extra operations (i.e. soft-deleted documents) to be kept for recoveries and history purpose. + */ + public long getSoftDeleteRetentionOperations() { + return this.softDeleteRetentionOperations; + } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index 8293f873c65..e69cfd2c7af 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -20,6 +20,7 @@ package org.elasticsearch.index; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.Loggers; @@ -89,7 +90,7 @@ public final class IndexingSlowLog implements IndexingOperationListener { }, Property.Dynamic, Property.IndexScope); IndexingSlowLog(IndexSettings indexSettings) { - this.indexLogger = Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index", indexSettings.getSettings()); + this.indexLogger = LogManager.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index"); this.index = indexSettings.getIndex(); indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, this::setReformat); diff --git a/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java b/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java index 8a264cd3cb7..bc0626b9920 100644 --- a/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java +++ b/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java @@ -82,11 +82,12 @@ import org.elasticsearch.common.unit.ByteSizeValue; * >= than the max_merge_at_once otherwise you'll force too many merges to * occur. * - *
  • index.merge.policy.reclaim_deletes_weight: + *
  • index.merge.policy.deletes_pct_allowed: * - * Controls how aggressively merges that reclaim more deletions are favored. - * Higher values favor selecting merges that reclaim deletions. A value of - * 0.0 means deletions don't impact merge selection. Defaults to 2.0. + * Controls the maximum percentage of deleted documents that is tolerated in + * the index. Lower values make the index more space efficient at the + * expense of increased CPU and I/O activity. Values must be between 20 and + * 50. Default value is 33. * * *

    @@ -126,6 +127,7 @@ public final class MergePolicyConfig { public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB); public static final double DEFAULT_SEGMENTS_PER_TIER = 10.0d; public static final double DEFAULT_RECLAIM_DELETES_WEIGHT = 2.0d; + public static final double DEFAULT_DELETES_PCT_ALLOWED = 33.0d; public static final Setting INDEX_COMPOUND_FORMAT_SETTING = new Setting<>("index.compound_format", Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO), MergePolicyConfig::parseNoCFSRatio, Property.Dynamic, Property.IndexScope); @@ -151,6 +153,9 @@ public final class MergePolicyConfig { public static final Setting INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING = Setting.doubleSetting("index.merge.policy.reclaim_deletes_weight", DEFAULT_RECLAIM_DELETES_WEIGHT, 0.0d, Property.Dynamic, Property.IndexScope, Property.Deprecated); + public static final Setting INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING = + Setting.doubleSetting("index.merge.policy.deletes_pct_allowed", DEFAULT_DELETES_PCT_ALLOWED, 20.0d, 50.0d, + Property.Dynamic, Property.IndexScope); public static final String INDEX_MERGE_ENABLED = "index.merge.enabled"; // don't convert to Setting<> and register... we only set this in tests and register via a plugin @@ -164,6 +169,7 @@ public final class MergePolicyConfig { ByteSizeValue maxMergedSegment = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING); double segmentsPerTier = indexSettings.getValue(INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING); double reclaimDeletesWeight = indexSettings.getValue(INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING); + double deletesPctAllowed = indexSettings.getValue(INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING); this.mergesEnabled = indexSettings.getSettings().getAsBoolean(INDEX_MERGE_ENABLED, true); if (mergesEnabled == false) { logger.warn("[{}] is set to false, this should only be used in tests and can cause serious problems in production environments", INDEX_MERGE_ENABLED); @@ -176,9 +182,10 @@ public final class MergePolicyConfig { mergePolicy.setMaxMergeAtOnceExplicit(maxMergeAtOnceExplicit); mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); mergePolicy.setSegmentsPerTier(segmentsPerTier); + mergePolicy.setDeletesPctAllowed(deletesPctAllowed); if (logger.isTraceEnabled()) { - logger.trace("using [tiered] merge mergePolicy with expunge_deletes_allowed[{}], floor_segment[{}], max_merge_at_once[{}], max_merge_at_once_explicit[{}], max_merged_segment[{}], segments_per_tier[{}], reclaim_deletes_weight[{}]", - forceMergeDeletesPctAllowed, floorSegment, maxMergeAtOnce, maxMergeAtOnceExplicit, maxMergedSegment, segmentsPerTier, reclaimDeletesWeight); + logger.trace("using [tiered] merge mergePolicy with expunge_deletes_allowed[{}], floor_segment[{}], max_merge_at_once[{}], max_merge_at_once_explicit[{}], max_merged_segment[{}], segments_per_tier[{}], deletes_pct_allowed[{}]", + forceMergeDeletesPctAllowed, floorSegment, maxMergeAtOnce, maxMergeAtOnceExplicit, maxMergedSegment, segmentsPerTier, deletesPctAllowed); } } @@ -210,6 +217,10 @@ public final class MergePolicyConfig { mergePolicy.setNoCFSRatio(noCFSRatio); } + void setDeletesPctAllowed(Double deletesPctAllowed) { + mergePolicy.setDeletesPctAllowed(deletesPctAllowed); + } + private int adjustMaxMergeAtOnceIfNeeded(int maxMergeAtOnce, double segmentsPerTier) { // fixing maxMergeAtOnce, see TieredMergePolicy#setMaxMergeAtOnce if (!(segmentsPerTier >= maxMergeAtOnce)) { diff --git a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java index 10b4c4318a3..200d72f601d 100644 --- a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java @@ -20,6 +20,7 @@ package org.elasticsearch.index; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; @@ -82,8 +83,8 @@ public final class SearchSlowLog implements SearchOperationListener { public SearchSlowLog(IndexSettings indexSettings) { - this.queryLogger = Loggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query", indexSettings.getSettings()); - this.fetchLogger = Loggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch", indexSettings.getSettings()); + this.queryLogger = LogManager.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query"); + this.fetchLogger = LogManager.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch"); indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING, this::setQueryWarnThreshold); this.queryWarnThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING).nanos(); diff --git a/server/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java b/server/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java index f3559a65070..d78d914a5ec 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.core.StopAnalyzer; +import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -32,7 +33,7 @@ public class StopAnalyzerProvider extends AbstractIndexAnalyzerProvider codecs = MapBuilder.newMapBuilder(); if (mapperService == null) { - codecs.put(DEFAULT_CODEC, new Lucene70Codec()); - codecs.put(BEST_COMPRESSION_CODEC, new Lucene70Codec(Mode.BEST_COMPRESSION)); + codecs.put(DEFAULT_CODEC, new Lucene80Codec()); + codecs.put(BEST_COMPRESSION_CODEC, new Lucene80Codec(Mode.BEST_COMPRESSION)); } else { codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java index bf1e48e7a6b..dfbbf350dcb 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java +++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java @@ -23,7 +23,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; -import org.apache.lucene.codecs.lucene70.Lucene70Codec; +import org.apache.lucene.codecs.lucene80.Lucene80Codec; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.index.mapper.CompletionFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -37,8 +37,7 @@ import org.elasticsearch.index.mapper.MapperService; * per index in real time via the mapping API. If no specific postings format is * configured for a specific field the default postings format is used. */ -// LUCENE UPGRADE: make sure to move to a new codec depending on the lucene version -public class PerFieldMappingPostingFormatCodec extends Lucene70Codec { +public class PerFieldMappingPostingFormatCodec extends Lucene80Codec { private final Logger logger; private final MapperService mapperService; diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index d0575c8a8c9..d10690379ed 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -46,14 +46,17 @@ import java.util.function.LongSupplier; public final class CombinedDeletionPolicy extends IndexDeletionPolicy { private final Logger logger; private final TranslogDeletionPolicy translogDeletionPolicy; + private final SoftDeletesPolicy softDeletesPolicy; private final LongSupplier globalCheckpointSupplier; private final ObjectIntHashMap snapshottedCommits; // Number of snapshots held against each commit point. private volatile IndexCommit safeCommit; // the most recent safe commit point - its max_seqno at most the persisted global checkpoint. private volatile IndexCommit lastCommit; // the most recent commit point - CombinedDeletionPolicy(Logger logger, TranslogDeletionPolicy translogDeletionPolicy, LongSupplier globalCheckpointSupplier) { + CombinedDeletionPolicy(Logger logger, TranslogDeletionPolicy translogDeletionPolicy, + SoftDeletesPolicy softDeletesPolicy, LongSupplier globalCheckpointSupplier) { this.logger = logger; this.translogDeletionPolicy = translogDeletionPolicy; + this.softDeletesPolicy = softDeletesPolicy; this.globalCheckpointSupplier = globalCheckpointSupplier; this.snapshottedCommits = new ObjectIntHashMap<>(); } @@ -80,7 +83,7 @@ public final class CombinedDeletionPolicy extends IndexDeletionPolicy { deleteCommit(commits.get(i)); } } - updateTranslogDeletionPolicy(); + updateRetentionPolicy(); } private void deleteCommit(IndexCommit commit) throws IOException { @@ -90,7 +93,7 @@ public final class CombinedDeletionPolicy extends IndexDeletionPolicy { assert commit.isDeleted() : "Deletion commit [" + commitDescription(commit) + "] was suppressed"; } - private void updateTranslogDeletionPolicy() throws IOException { + private void updateRetentionPolicy() throws IOException { assert Thread.holdsLock(this); logger.debug("Safe commit [{}], last commit [{}]", commitDescription(safeCommit), commitDescription(lastCommit)); assert safeCommit.isDeleted() == false : "The safe commit must not be deleted"; @@ -101,6 +104,9 @@ public final class CombinedDeletionPolicy extends IndexDeletionPolicy { assert minRequiredGen <= lastGen : "minRequiredGen must not be greater than lastGen"; translogDeletionPolicy.setTranslogGenerationOfLastCommit(lastGen); translogDeletionPolicy.setMinTranslogGenerationForRecovery(minRequiredGen); + + softDeletesPolicy.setLocalCheckpointOfSafeCommit( + Long.parseLong(safeCommit.getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY))); } /** diff --git a/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java b/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java index f4876149cac..7306b4e8cfd 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java @@ -71,7 +71,7 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler { this.config = indexSettings.getMergeSchedulerConfig(); this.shardId = shardId; this.indexSettings = indexSettings.getSettings(); - this.logger = Loggers.getLogger(getClass(), this.indexSettings, shardId); + this.logger = Loggers.getLogger(getClass(), shardId); refreshConfig(); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 31da7afc51a..fe27aea805e 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -33,6 +33,7 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.SegmentReader; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; @@ -58,6 +59,7 @@ import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; @@ -97,6 +99,7 @@ public abstract class Engine implements Closeable { public static final String SYNC_COMMIT_ID = "sync_id"; public static final String HISTORY_UUID_KEY = "history_uuid"; + public static final String MIN_RETAINED_SEQNO = "min_retained_seq_no"; protected final ShardId shardId; protected final String allocationId; @@ -132,7 +135,7 @@ public abstract class Engine implements Closeable { this.allocationId = engineConfig.getAllocationId(); this.store = engineConfig.getStore(); this.logger = Loggers.getLogger(Engine.class, // we use the engine class directly here to make sure all subclasses have the same logger name - engineConfig.getIndexSettings().getSettings(), engineConfig.getShardId()); + engineConfig.getShardId()); this.eventListener = engineConfig.getEventListener(); } @@ -567,7 +570,31 @@ public abstract class Engine implements Closeable { * * @see Searcher#close() */ - public abstract Searcher acquireSearcher(String source, SearcherScope scope) throws EngineException; + public Searcher acquireSearcher(String source, SearcherScope scope) throws EngineException { + /* Acquire order here is store -> manager since we need + * to make sure that the store is not closed before + * the searcher is acquired. */ + if (store.tryIncRef() == false) { + throw new AlreadyClosedException(shardId + " store is closed", failedEngine.get()); + } + Releasable releasable = store::decRef; + try { + EngineSearcher engineSearcher = new EngineSearcher(source, getReferenceManager(scope), store, logger); + releasable = null; // success - hand over the reference to the engine searcher + return engineSearcher; + } catch (AlreadyClosedException ex) { + throw ex; + } catch (Exception ex) { + maybeFailEngine("acquire_searcher", ex); + ensureOpen(ex); // throw EngineCloseException here if we are already closed + logger.error(() -> new ParameterizedMessage("failed to acquire searcher, source {}", source), ex); + throw new EngineException(shardId, "failed to acquire searcher, source " + source, ex); + } finally { + Releasables.close(releasable); + } + } + + protected abstract ReferenceManager getReferenceManager(SearcherScope scope); public enum SearcherScope { EXTERNAL, INTERNAL @@ -585,18 +612,32 @@ public abstract class Engine implements Closeable { public abstract void syncTranslog() throws IOException; - public abstract Closeable acquireTranslogRetentionLock(); + /** + * Acquires a lock on the translog files and Lucene soft-deleted documents to prevent them from being trimmed + */ + public abstract Closeable acquireRetentionLockForPeerRecovery(); /** - * Creates a new translog snapshot from this engine for reading translog operations whose seq# at least the provided seq#. - * The caller has to close the returned snapshot after finishing the reading. + * Creates a new history snapshot from Lucene for reading operations whose seqno in the requesting seqno range (both inclusive) */ - public abstract Translog.Snapshot newTranslogSnapshotFromMinSeqNo(long minSeqNo) throws IOException; + public abstract Translog.Snapshot newChangesSnapshot(String source, MapperService mapperService, + long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException; /** - * Returns the estimated number of translog operations in this engine whose seq# at least the provided seq#. + * Creates a new history snapshot for reading operations since {@code startingSeqNo} (inclusive). + * The returned snapshot can be retrieved from either Lucene index or translog files. */ - public abstract int estimateTranslogOperationsFromMinSeq(long minSeqNo); + public abstract Translog.Snapshot readHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException; + + /** + * Returns the estimated number of history operations whose seq# at least {@code startingSeqNo}(inclusive) in this engine. + */ + public abstract int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException; + + /** + * Checks if this engine has every operations since {@code startingSeqNo}(inclusive) in its history (either Lucene or translog) + */ + public abstract boolean hasCompleteOperationHistory(String source, MapperService mapperService, long startingSeqNo) throws IOException; public abstract TranslogStats getTranslogStats(); @@ -1623,10 +1664,13 @@ public abstract class Engine implements Closeable { public abstract int fillSeqNoGaps(long primaryTerm) throws IOException; /** - * Performs recovery from the transaction log. + * Performs recovery from the transaction log up to {@code recoverUpToSeqNo} (inclusive). * This operation will close the engine if the recovery fails. + * + * @param translogRecoveryRunner the translog recovery runner + * @param recoverUpToSeqNo the upper bound, inclusive, of sequence number to be recovered */ - public abstract Engine recoverFromTranslog() throws IOException; + public abstract Engine recoverFromTranslog(TranslogRecoveryRunner translogRecoveryRunner, long recoverUpToSeqNo) throws IOException; /** * Do not replay translog operations, but make the engine be ready. @@ -1644,4 +1688,9 @@ public abstract class Engine implements Closeable { * Tries to prune buffered deletes from the version map. */ public abstract void maybePruneDeletes(); + + @FunctionalInterface + public interface TranslogRecoveryRunner { + int run(Engine engine, Translog.Snapshot snapshot) throws IOException; + } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 2deae61bd52..f95ba96d343 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -34,15 +34,14 @@ import org.elasticsearch.common.unit.MemorySizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.codec.CodecService; +import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; -import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.IndexingMemoryController; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.threadpool.ThreadPool; -import java.io.IOException; import java.util.List; import java.util.function.LongSupplier; @@ -75,11 +74,11 @@ public final class EngineConfig { private final List internalRefreshListener; @Nullable private final Sort indexSort; - private final TranslogRecoveryRunner translogRecoveryRunner; @Nullable private final CircuitBreakerService circuitBreakerService; private final LongSupplier globalCheckpointSupplier; private final LongSupplier primaryTermSupplier; + private final TombstoneDocSupplier tombstoneDocSupplier; /** * Index setting to change the low level lucene codec used for writing new segments. @@ -125,8 +124,8 @@ public final class EngineConfig { TranslogConfig translogConfig, TimeValue flushMergesAfter, List externalRefreshListener, List internalRefreshListener, Sort indexSort, - TranslogRecoveryRunner translogRecoveryRunner, CircuitBreakerService circuitBreakerService, - LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier) { + CircuitBreakerService circuitBreakerService, LongSupplier globalCheckpointSupplier, + LongSupplier primaryTermSupplier, TombstoneDocSupplier tombstoneDocSupplier) { this.shardId = shardId; this.allocationId = allocationId; this.indexSettings = indexSettings; @@ -160,10 +159,10 @@ public final class EngineConfig { this.externalRefreshListener = externalRefreshListener; this.internalRefreshListener = internalRefreshListener; this.indexSort = indexSort; - this.translogRecoveryRunner = translogRecoveryRunner; this.circuitBreakerService = circuitBreakerService; this.globalCheckpointSupplier = globalCheckpointSupplier; this.primaryTermSupplier = primaryTermSupplier; + this.tombstoneDocSupplier = tombstoneDocSupplier; } /** @@ -320,18 +319,6 @@ public final class EngineConfig { */ public TimeValue getFlushMergesAfter() { return flushMergesAfter; } - @FunctionalInterface - public interface TranslogRecoveryRunner { - int run(Engine engine, Translog.Snapshot snapshot) throws IOException; - } - - /** - * Returns a runner that implements the translog recovery from the given snapshot - */ - public TranslogRecoveryRunner getTranslogRecoveryRunner() { - return translogRecoveryRunner; - } - /** * The refresh listeners to add to Lucene for externally visible refreshes */ @@ -373,4 +360,25 @@ public final class EngineConfig { public LongSupplier getPrimaryTermSupplier() { return primaryTermSupplier; } + + /** + * A supplier supplies tombstone documents which will be used in soft-update methods. + * The returned document consists only _uid, _seqno, _term and _version fields; other metadata fields are excluded. + */ + public interface TombstoneDocSupplier { + /** + * Creates a tombstone document for a delete operation. + */ + ParsedDocument newDeleteTombstoneDoc(String type, String id); + + /** + * Creates a tombstone document for a noop operation. + * @param reason the reason of an a noop + */ + ParsedDocument newNoopTombstoneDoc(String reason); + } + + public TombstoneDocSupplier getTombstoneDocSupplier() { + return tombstoneDocSupplier; + } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index a30127a24ae..d9b03777f1b 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -20,17 +20,20 @@ package org.elasticsearch.index.engine; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LiveIndexWriterConfig; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.SoftDeletesRetentionMergePolicy; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ReferenceManager; @@ -42,12 +45,12 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.InfoStream; +import org.elasticsearch.Assertions; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.LoggerInfoStream; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; @@ -61,7 +64,11 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.OnGoingMerge; import org.elasticsearch.index.seqno.LocalCheckpointTracker; @@ -140,6 +147,10 @@ public class InternalEngine extends Engine { private final CounterMetric numDocDeletes = new CounterMetric(); private final CounterMetric numDocAppends = new CounterMetric(); private final CounterMetric numDocUpdates = new CounterMetric(); + private final NumericDocValuesField softDeletesField = Lucene.newSoftDeletesField(); + private final boolean softDeleteEnabled; + private final SoftDeletesPolicy softDeletesPolicy; + private final LastRefreshedCheckpointListener lastRefreshedCheckpointListener; /** * How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this @@ -184,8 +195,10 @@ public class InternalEngine extends Engine { assert translog.getGeneration() != null; this.translog = translog; this.localCheckpointTracker = createLocalCheckpointTracker(localCheckpointTrackerSupplier); + this.softDeleteEnabled = engineConfig.getIndexSettings().isSoftDeleteEnabled(); + this.softDeletesPolicy = newSoftDeletesPolicy(); this.combinedDeletionPolicy = - new CombinedDeletionPolicy(logger, translogDeletionPolicy, translog::getLastSyncedGlobalCheckpoint); + new CombinedDeletionPolicy(logger, translogDeletionPolicy, softDeletesPolicy, translog::getLastSyncedGlobalCheckpoint); writer = createWriter(); bootstrapAppendOnlyInfoFromWriter(writer); historyUUID = loadHistoryUUID(writer); @@ -215,6 +228,8 @@ public class InternalEngine extends Engine { for (ReferenceManager.RefreshListener listener: engineConfig.getInternalRefreshListener()) { this.internalSearcherManager.addListener(listener); } + this.lastRefreshedCheckpointListener = new LastRefreshedCheckpointListener(localCheckpointTracker.getCheckpoint()); + this.internalSearcherManager.addListener(lastRefreshedCheckpointListener); success = true; } finally { if (success == false) { @@ -240,6 +255,18 @@ public class InternalEngine extends Engine { return localCheckpointTrackerSupplier.apply(maxSeqNo, localCheckpoint); } + private SoftDeletesPolicy newSoftDeletesPolicy() throws IOException { + final Map commitUserData = store.readLastCommittedSegmentsInfo().userData; + final long lastMinRetainedSeqNo; + if (commitUserData.containsKey(Engine.MIN_RETAINED_SEQNO)) { + lastMinRetainedSeqNo = Long.parseLong(commitUserData.get(Engine.MIN_RETAINED_SEQNO)); + } else { + lastMinRetainedSeqNo = Long.parseLong(commitUserData.get(SequenceNumbers.MAX_SEQ_NO)) + 1; + } + return new SoftDeletesPolicy(translog::getLastSyncedGlobalCheckpoint, lastMinRetainedSeqNo, + engineConfig.getIndexSettings().getSoftDeleteRetentionOperations()); + } + /** * This reference manager delegates all it's refresh calls to another (internal) SearcherManager * The main purpose for this is that if we have external refreshes happening we don't issue extra @@ -364,7 +391,7 @@ public class InternalEngine extends Engine { } @Override - public InternalEngine recoverFromTranslog() throws IOException { + public InternalEngine recoverFromTranslog(TranslogRecoveryRunner translogRecoveryRunner, long recoverUpToSeqNo) throws IOException { flushLock.lock(); try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); @@ -372,7 +399,7 @@ public class InternalEngine extends Engine { throw new IllegalStateException("Engine has already been recovered"); } try { - recoverFromTranslogInternal(); + recoverFromTranslogInternal(translogRecoveryRunner, recoverUpToSeqNo); } catch (Exception e) { try { pendingTranslogRecovery.set(true); // just play safe and never allow commits on this see #ensureCanFlush @@ -394,12 +421,13 @@ public class InternalEngine extends Engine { pendingTranslogRecovery.set(false); // we are good - now we can commit } - private void recoverFromTranslogInternal() throws IOException { + private void recoverFromTranslogInternal(TranslogRecoveryRunner translogRecoveryRunner, long recoverUpToSeqNo) throws IOException { Translog.TranslogGeneration translogGeneration = translog.getGeneration(); final int opsRecovered; - final long translogGen = Long.parseLong(lastCommittedSegmentInfos.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)); - try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(translogGen)) { - opsRecovered = config().getTranslogRecoveryRunner().run(this, snapshot); + final long translogFileGen = Long.parseLong(lastCommittedSegmentInfos.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)); + try (Translog.Snapshot snapshot = translog.newSnapshotFromGen( + new Translog.TranslogGeneration(translog.getTranslogUUID(), translogFileGen), recoverUpToSeqNo)) { + opsRecovered = translogRecoveryRunner.run(this, snapshot); } catch (Exception e) { throw new EngineException(shardId, "failed to recover from translog", e); } @@ -450,19 +478,31 @@ public class InternalEngine extends Engine { revisitIndexDeletionPolicyOnTranslogSynced(); } + /** + * Creates a new history snapshot for reading operations since the provided seqno. + * The returned snapshot can be retrieved from either Lucene index or translog files. + */ @Override - public Closeable acquireTranslogRetentionLock() { - return getTranslog().acquireRetentionLock(); + public Translog.Snapshot readHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException { + if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { + return newChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false); + } else { + return getTranslog().newSnapshotFromMinSeqNo(startingSeqNo); + } } + /** + * Returns the estimated number of history operations whose seq# at least the provided seq# in this engine. + */ @Override - public Translog.Snapshot newTranslogSnapshotFromMinSeqNo(long minSeqNo) throws IOException { - return getTranslog().newSnapshotFromMinSeqNo(minSeqNo); - } - - @Override - public int estimateTranslogOperationsFromMinSeq(long minSeqNo) { - return getTranslog().estimateTotalOperationsFromMinSeq(minSeqNo); + public int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException { + if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { + try (Translog.Snapshot snapshot = newChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false)) { + return snapshot.totalOperations(); + } + } else { + return getTranslog().estimateTotalOperationsFromMinSeq(startingSeqNo); + } } @Override @@ -478,6 +518,7 @@ public class InternalEngine extends Engine { private void revisitIndexDeletionPolicyOnTranslogSynced() throws IOException { if (combinedDeletionPolicy.hasUnreferencedCommits()) { indexWriter.deleteUnusedFiles(); + translog.trimUnreferencedReaders(); } } @@ -686,8 +727,7 @@ public class InternalEngine extends Engine { + index.getAutoGeneratedIdTimestamp(); switch (index.origin()) { case PRIMARY: - assert (index.version() == Versions.MATCH_ANY && index.versionType() == VersionType.INTERNAL) - : "version: " + index.version() + " type: " + index.versionType(); + assertPrimaryCanOptimizeAddDocument(index); return true; case PEER_RECOVERY: case REPLICA: @@ -704,9 +744,15 @@ public class InternalEngine extends Engine { return false; } + protected boolean assertPrimaryCanOptimizeAddDocument(final Index index) { + assert (index.version() == Versions.MATCH_ANY && index.versionType() == VersionType.INTERNAL) + : "version: " + index.version() + " type: " + index.versionType(); + return true; + } + private boolean assertIncomingSequenceNumber(final Engine.Operation.Origin origin, final long seqNo) { if (origin == Operation.Origin.PRIMARY) { - assert assertOriginPrimarySequenceNumber(seqNo); + assertPrimaryIncomingSequenceNumber(origin, seqNo); } else { // sequence number should be set when operation origin is not primary assert seqNo >= 0 : "recovery or replica ops should have an assigned seq no.; origin: " + origin; @@ -714,7 +760,7 @@ public class InternalEngine extends Engine { return true; } - protected boolean assertOriginPrimarySequenceNumber(final long seqNo) { + protected boolean assertPrimaryIncomingSequenceNumber(final Engine.Operation.Origin origin, final long seqNo) { // sequence number should not be set when operation origin is primary assert seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO : "primary operations must never have an assigned sequence number but was [" + seqNo + "]"; @@ -732,7 +778,7 @@ public class InternalEngine extends Engine { * @param operation the operation * @return the sequence number */ - protected long doGenerateSeqNoForOperation(final Operation operation) { + long doGenerateSeqNoForOperation(final Operation operation) { return localCheckpointTracker.generateSeqNo(); } @@ -775,20 +821,13 @@ public class InternalEngine extends Engine { * if A arrives on the shard first we use addDocument since maxUnsafeAutoIdTimestamp is < 10. A` will then just be skipped or calls * updateDocument. */ - final IndexingStrategy plan; - - if (index.origin() == Operation.Origin.PRIMARY) { - plan = planIndexingAsPrimary(index); - } else { - // non-primary mode (i.e., replica or recovery) - plan = planIndexingAsNonPrimary(index); - } + final IndexingStrategy plan = indexingStrategyForOperation(index); final IndexResult indexResult; if (plan.earlyResultOnPreFlightError.isPresent()) { indexResult = plan.earlyResultOnPreFlightError.get(); assert indexResult.getResultType() == Result.Type.FAILURE : indexResult.getResultType(); - } else if (plan.indexIntoLucene) { + } else if (plan.indexIntoLucene || plan.addStaleOpToLucene) { indexResult = indexIntoLucene(index, plan); } else { indexResult = new IndexResult( @@ -799,8 +838,10 @@ public class InternalEngine extends Engine { if (indexResult.getResultType() == Result.Type.SUCCESS) { location = translog.add(new Translog.Index(index, indexResult)); } else if (indexResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { - // if we have document failure, record it as a no-op in the translog with the generated seq_no - location = translog.add(new Translog.NoOp(indexResult.getSeqNo(), index.primaryTerm(), indexResult.getFailure().getMessage())); + // if we have document failure, record it as a no-op in the translog and Lucene with the generated seq_no + final NoOp noOp = new NoOp(indexResult.getSeqNo(), index.primaryTerm(), index.origin(), + index.startTime(), indexResult.getFailure().toString()); + location = innerNoOp(noOp).getTranslogLocation(); } else { location = null; } @@ -828,7 +869,8 @@ public class InternalEngine extends Engine { } } - private IndexingStrategy planIndexingAsNonPrimary(Index index) throws IOException { + protected final IndexingStrategy planIndexingAsNonPrimary(Index index) throws IOException { + assertNonPrimaryOrigin(index); final IndexingStrategy plan; final boolean appendOnlyRequest = canOptimizeAddDocument(index); if (appendOnlyRequest && mayHaveBeenIndexedBefore(index) == false && index.seqNo() > maxSeqNoOfNonAppendOnlyOperations.get()) { @@ -852,7 +894,6 @@ public class InternalEngine extends Engine { // unlike the primary, replicas don't really care to about creation status of documents // this allows to ignore the case where a document was found in the live version maps in // a delete state and return false for the created flag in favor of code simplicity - final OpVsLuceneDocStatus opVsLucene; if (index.seqNo() <= localCheckpointTracker.getCheckpoint()){ // the operation seq# is lower then the current local checkpoint and thus was already put into lucene // this can happen during recovery where older operations are sent from the translog that are already @@ -861,22 +902,30 @@ public class InternalEngine extends Engine { // question may have been deleted in an out of order op that is not replayed. // See testRecoverFromStoreWithOutOfOrderDelete for an example of local recovery // See testRecoveryWithOutOfOrderDelete for an example of peer recovery - opVsLucene = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL; - } else { - opVsLucene = compareOpToLuceneDocBasedOnSeqNo(index); - } - if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { plan = IndexingStrategy.processButSkipLucene(false, index.seqNo(), index.version()); } else { - plan = IndexingStrategy.processNormally( - opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, index.seqNo(), index.version() - ); + final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnSeqNo(index); + if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { + plan = IndexingStrategy.processAsStaleOp(softDeleteEnabled, index.seqNo(), index.version()); + } else { + plan = IndexingStrategy.processNormally(opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, + index.seqNo(), index.version()); + } } } return plan; } - private IndexingStrategy planIndexingAsPrimary(Index index) throws IOException { + protected IndexingStrategy indexingStrategyForOperation(final Index index) throws IOException { + if (index.origin() == Operation.Origin.PRIMARY) { + return planIndexingAsPrimary(index); + } else { + // non-primary mode (i.e., replica or recovery) + return planIndexingAsNonPrimary(index); + } + } + + protected final IndexingStrategy planIndexingAsPrimary(Index index) throws IOException { assert index.origin() == Operation.Origin.PRIMARY : "planing as primary but origin isn't. got " + index.origin(); final IndexingStrategy plan; // resolve an external operation into an internal one which is safe to replay @@ -919,7 +968,7 @@ public class InternalEngine extends Engine { throws IOException { assert plan.seqNoForIndexing >= 0 : "ops should have an assigned seq no.; origin: " + index.origin(); assert plan.versionForIndexing >= 0 : "version must be set. got " + plan.versionForIndexing; - assert plan.indexIntoLucene; + assert plan.indexIntoLucene || plan.addStaleOpToLucene; /* Update the document's sequence number and primary term; the sequence number here is derived here from either the sequence * number service if this is on the primary, or the existing document's sequence number if this is on the replica. The * primary term here has already been set, see IndexShard#prepareIndex where the Engine$Index operation is created. @@ -927,7 +976,9 @@ public class InternalEngine extends Engine { index.parsedDoc().updateSeqID(plan.seqNoForIndexing, index.primaryTerm()); index.parsedDoc().version().setLongValue(plan.versionForIndexing); try { - if (plan.useLuceneUpdateDocument) { + if (plan.addStaleOpToLucene) { + addStaleDocs(index.docs(), indexWriter); + } else if (plan.useLuceneUpdateDocument) { updateDocs(index.uid(), index.docs(), indexWriter); } else { // document does not exists, we can optimize for create, but double check if assertions are running @@ -991,16 +1042,29 @@ public class InternalEngine extends Engine { numDocAppends.inc(docs.size()); } - private static final class IndexingStrategy { + private void addStaleDocs(final List docs, final IndexWriter indexWriter) throws IOException { + assert softDeleteEnabled : "Add history documents but soft-deletes is disabled"; + for (ParseContext.Document doc : docs) { + doc.add(softDeletesField); // soft-deleted every document before adding to Lucene + } + if (docs.size() > 1) { + indexWriter.addDocuments(docs); + } else { + indexWriter.addDocument(docs.get(0)); + } + } + + protected static final class IndexingStrategy { final boolean currentNotFoundOrDeleted; final boolean useLuceneUpdateDocument; final long seqNoForIndexing; final long versionForIndexing; final boolean indexIntoLucene; + final boolean addStaleOpToLucene; final Optional earlyResultOnPreFlightError; private IndexingStrategy(boolean currentNotFoundOrDeleted, boolean useLuceneUpdateDocument, - boolean indexIntoLucene, long seqNoForIndexing, + boolean indexIntoLucene, boolean addStaleOpToLucene, long seqNoForIndexing, long versionForIndexing, IndexResult earlyResultOnPreFlightError) { assert useLuceneUpdateDocument == false || indexIntoLucene : "use lucene update is set to true, but we're not indexing into lucene"; @@ -1013,37 +1077,40 @@ public class InternalEngine extends Engine { this.seqNoForIndexing = seqNoForIndexing; this.versionForIndexing = versionForIndexing; this.indexIntoLucene = indexIntoLucene; + this.addStaleOpToLucene = addStaleOpToLucene; this.earlyResultOnPreFlightError = earlyResultOnPreFlightError == null ? Optional.empty() : Optional.of(earlyResultOnPreFlightError); } static IndexingStrategy optimizedAppendOnly(long seqNoForIndexing) { - return new IndexingStrategy(true, false, true, seqNoForIndexing, 1, null); + return new IndexingStrategy(true, false, true, false, seqNoForIndexing, 1, null); } static IndexingStrategy skipDueToVersionConflict( VersionConflictEngineException e, boolean currentNotFoundOrDeleted, long currentVersion, long term) { final IndexResult result = new IndexResult(e, currentVersion, term); return new IndexingStrategy( - currentNotFoundOrDeleted, false, false, SequenceNumbers.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, result); + currentNotFoundOrDeleted, false, false, false, SequenceNumbers.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, result); } static IndexingStrategy processNormally(boolean currentNotFoundOrDeleted, long seqNoForIndexing, long versionForIndexing) { return new IndexingStrategy(currentNotFoundOrDeleted, currentNotFoundOrDeleted == false, - true, seqNoForIndexing, versionForIndexing, null); + true, false, seqNoForIndexing, versionForIndexing, null); } static IndexingStrategy overrideExistingAsIfNotThere( long seqNoForIndexing, long versionForIndexing) { - return new IndexingStrategy(true, true, true, seqNoForIndexing, versionForIndexing, null); + return new IndexingStrategy(true, true, true, false, seqNoForIndexing, versionForIndexing, null); } - static IndexingStrategy processButSkipLucene(boolean currentNotFoundOrDeleted, - long seqNoForIndexing, long versionForIndexing) { - return new IndexingStrategy(currentNotFoundOrDeleted, false, - false, seqNoForIndexing, versionForIndexing, null); + static IndexingStrategy processButSkipLucene(boolean currentNotFoundOrDeleted, long seqNoForIndexing, long versionForIndexing) { + return new IndexingStrategy(currentNotFoundOrDeleted, false, false, false, seqNoForIndexing, versionForIndexing, null); + } + + static IndexingStrategy processAsStaleOp(boolean addStaleOpToLucene, long seqNoForIndexing, long versionForIndexing) { + return new IndexingStrategy(false, false, false, addStaleOpToLucene, seqNoForIndexing, versionForIndexing, null); } } @@ -1070,10 +1137,18 @@ public class InternalEngine extends Engine { } private void updateDocs(final Term uid, final List docs, final IndexWriter indexWriter) throws IOException { - if (docs.size() > 1) { - indexWriter.updateDocuments(uid, docs); + if (softDeleteEnabled) { + if (docs.size() > 1) { + indexWriter.softUpdateDocuments(uid, docs, softDeletesField); + } else { + indexWriter.softUpdateDocument(uid, docs.get(0), softDeletesField); + } } else { - indexWriter.updateDocument(uid, docs.get(0)); + if (docs.size() > 1) { + indexWriter.updateDocuments(uid, docs); + } else { + indexWriter.updateDocument(uid, docs.get(0)); + } } numDocUpdates.inc(docs.size()); } @@ -1088,16 +1163,11 @@ public class InternalEngine extends Engine { try (ReleasableLock ignored = readLock.acquire(); Releasable ignored2 = versionMap.acquireLock(delete.uid().bytes())) { ensureOpen(); lastWriteNanos = delete.startTime(); - final DeletionStrategy plan; - if (delete.origin() == Operation.Origin.PRIMARY) { - plan = planDeletionAsPrimary(delete); - } else { - plan = planDeletionAsNonPrimary(delete); - } + final DeletionStrategy plan = deletionStrategyForOperation(delete); if (plan.earlyResultOnPreflightError.isPresent()) { deleteResult = plan.earlyResultOnPreflightError.get(); - } else if (plan.deleteFromLucene) { + } else if (plan.deleteFromLucene || plan.addStaleOpToLucene) { deleteResult = deleteInLucene(delete, plan); } else { deleteResult = new DeleteResult( @@ -1108,8 +1178,10 @@ public class InternalEngine extends Engine { if (deleteResult.getResultType() == Result.Type.SUCCESS) { location = translog.add(new Translog.Delete(delete, deleteResult)); } else if (deleteResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { - location = translog.add(new Translog.NoOp(deleteResult.getSeqNo(), - delete.primaryTerm(), deleteResult.getFailure().getMessage())); + // if we have document failure, record it as a no-op in the translog and Lucene with the generated seq_no + final NoOp noOp = new NoOp(deleteResult.getSeqNo(), delete.primaryTerm(), delete.origin(), + delete.startTime(), deleteResult.getFailure().toString()); + location = innerNoOp(noOp).getTranslogLocation(); } else { location = null; } @@ -1132,15 +1204,24 @@ public class InternalEngine extends Engine { return deleteResult; } - private DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws IOException { - assert delete.origin() != Operation.Origin.PRIMARY : "planing as primary but got " + delete.origin(); + protected DeletionStrategy deletionStrategyForOperation(final Delete delete) throws IOException { + if (delete.origin() == Operation.Origin.PRIMARY) { + return planDeletionAsPrimary(delete); + } else { + // non-primary mode (i.e., replica or recovery) + return planDeletionAsNonPrimary(delete); + } + } + + protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws IOException { + assertNonPrimaryOrigin(delete); maxSeqNoOfNonAppendOnlyOperations.updateAndGet(curr -> Math.max(delete.seqNo(), curr)); assert maxSeqNoOfNonAppendOnlyOperations.get() >= delete.seqNo() : "max_seqno of non-append-only was not updated;" + "max_seqno non-append-only [" + maxSeqNoOfNonAppendOnlyOperations.get() + "], seqno of delete [" + delete.seqNo() + "]"; // unlike the primary, replicas don't really care to about found status of documents // this allows to ignore the case where a document was found in the live version maps in // a delete state and return true for the found flag in favor of code simplicity - final OpVsLuceneDocStatus opVsLucene; + final DeletionStrategy plan; if (delete.seqNo() <= localCheckpointTracker.getCheckpoint()) { // the operation seq# is lower then the current local checkpoint and thus was already put into lucene // this can happen during recovery where older operations are sent from the translog that are already @@ -1149,23 +1230,25 @@ public class InternalEngine extends Engine { // question may have been deleted in an out of order op that is not replayed. // See testRecoverFromStoreWithOutOfOrderDelete for an example of local recovery // See testRecoveryWithOutOfOrderDelete for an example of peer recovery - opVsLucene = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL; - } else { - opVsLucene = compareOpToLuceneDocBasedOnSeqNo(delete); - } - - final DeletionStrategy plan; - if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { plan = DeletionStrategy.processButSkipLucene(false, delete.seqNo(), delete.version()); } else { - plan = DeletionStrategy.processNormally( - opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, - delete.seqNo(), delete.version()); + final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnSeqNo(delete); + if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { + plan = DeletionStrategy.processAsStaleOp(softDeleteEnabled, false, delete.seqNo(), delete.version()); + } else { + plan = DeletionStrategy.processNormally(opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, + delete.seqNo(), delete.version()); + } } return plan; } - private DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOException { + protected boolean assertNonPrimaryOrigin(final Operation operation) { + assert operation.origin() != Operation.Origin.PRIMARY : "planing as primary but got " + operation.origin(); + return true; + } + + protected final DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOException { assert delete.origin() == Operation.Origin.PRIMARY : "planing as primary but got " + delete.origin(); // resolve operation from external to internal final VersionValue versionValue = resolveDocVersion(delete); @@ -1195,15 +1278,31 @@ public class InternalEngine extends Engine { private DeleteResult deleteInLucene(Delete delete, DeletionStrategy plan) throws IOException { try { - if (plan.currentlyDeleted == false) { + if (softDeleteEnabled) { + final ParsedDocument tombstone = engineConfig.getTombstoneDocSupplier().newDeleteTombstoneDoc(delete.type(), delete.id()); + assert tombstone.docs().size() == 1 : "Tombstone doc should have single doc [" + tombstone + "]"; + tombstone.updateSeqID(plan.seqNoOfDeletion, delete.primaryTerm()); + tombstone.version().setLongValue(plan.versionOfDeletion); + final ParseContext.Document doc = tombstone.docs().get(0); + assert doc.getField(SeqNoFieldMapper.TOMBSTONE_NAME) != null : + "Delete tombstone document but _tombstone field is not set [" + doc + " ]"; + doc.add(softDeletesField); + if (plan.addStaleOpToLucene || plan.currentlyDeleted) { + indexWriter.addDocument(doc); + } else { + indexWriter.softUpdateDocument(delete.uid(), doc, softDeletesField); + } + } else if (plan.currentlyDeleted == false) { // any exception that comes from this is a either an ACE or a fatal exception there // can't be any document failures coming from this indexWriter.deleteDocuments(delete.uid()); - numDocDeletes.inc(); } - versionMap.putDeleteUnderLock(delete.uid().bytes(), - new DeleteVersionValue(plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), - engineConfig.getThreadPool().relativeTimeInMillis())); + if (plan.deleteFromLucene) { + numDocDeletes.inc(); + versionMap.putDeleteUnderLock(delete.uid().bytes(), + new DeleteVersionValue(plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), + engineConfig.getThreadPool().relativeTimeInMillis())); + } return new DeleteResult( plan.versionOfDeletion, getPrimaryTerm(), plan.seqNoOfDeletion, plan.currentlyDeleted == false); } catch (Exception ex) { @@ -1217,15 +1316,16 @@ public class InternalEngine extends Engine { } } - private static final class DeletionStrategy { + protected static final class DeletionStrategy { // of a rare double delete final boolean deleteFromLucene; + final boolean addStaleOpToLucene; final boolean currentlyDeleted; final long seqNoOfDeletion; final long versionOfDeletion; final Optional earlyResultOnPreflightError; - private DeletionStrategy(boolean deleteFromLucene, boolean currentlyDeleted, + private DeletionStrategy(boolean deleteFromLucene, boolean addStaleOpToLucene, boolean currentlyDeleted, long seqNoOfDeletion, long versionOfDeletion, DeleteResult earlyResultOnPreflightError) { assert (deleteFromLucene && earlyResultOnPreflightError != null) == false : @@ -1233,6 +1333,7 @@ public class InternalEngine extends Engine { "deleteFromLucene: " + deleteFromLucene + " earlyResultOnPreFlightError:" + earlyResultOnPreflightError; this.deleteFromLucene = deleteFromLucene; + this.addStaleOpToLucene = addStaleOpToLucene; this.currentlyDeleted = currentlyDeleted; this.seqNoOfDeletion = seqNoOfDeletion; this.versionOfDeletion = versionOfDeletion; @@ -1244,16 +1345,22 @@ public class InternalEngine extends Engine { VersionConflictEngineException e, long currentVersion, long term, boolean currentlyDeleted) { final long unassignedSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; final DeleteResult deleteResult = new DeleteResult(e, currentVersion, term, unassignedSeqNo, currentlyDeleted == false); - return new DeletionStrategy(false, currentlyDeleted, unassignedSeqNo, Versions.NOT_FOUND, deleteResult); + return new DeletionStrategy(false, false, currentlyDeleted, unassignedSeqNo, Versions.NOT_FOUND, deleteResult); } static DeletionStrategy processNormally(boolean currentlyDeleted, long seqNoOfDeletion, long versionOfDeletion) { - return new DeletionStrategy(true, currentlyDeleted, seqNoOfDeletion, versionOfDeletion, null); + return new DeletionStrategy(true, false, currentlyDeleted, seqNoOfDeletion, versionOfDeletion, null); } - public static DeletionStrategy processButSkipLucene(boolean currentlyDeleted, long seqNoOfDeletion, long versionOfDeletion) { - return new DeletionStrategy(false, currentlyDeleted, seqNoOfDeletion, versionOfDeletion, null); + public static DeletionStrategy processButSkipLucene(boolean currentlyDeleted, + long seqNoOfDeletion, long versionOfDeletion) { + return new DeletionStrategy(false, false, currentlyDeleted, seqNoOfDeletion, versionOfDeletion, null); + } + + static DeletionStrategy processAsStaleOp(boolean addStaleOpToLucene, boolean currentlyDeleted, + long seqNoOfDeletion, long versionOfDeletion) { + return new DeletionStrategy(false, addStaleOpToLucene, currentlyDeleted, seqNoOfDeletion, versionOfDeletion, null); } } @@ -1282,7 +1389,28 @@ public class InternalEngine extends Engine { assert noOp.seqNo() > SequenceNumbers.NO_OPS_PERFORMED; final long seqNo = noOp.seqNo(); try { - final NoOpResult noOpResult = new NoOpResult(getPrimaryTerm(), noOp.seqNo()); + Exception failure = null; + if (softDeleteEnabled) { + try { + final ParsedDocument tombstone = engineConfig.getTombstoneDocSupplier().newNoopTombstoneDoc(noOp.reason()); + tombstone.updateSeqID(noOp.seqNo(), noOp.primaryTerm()); + // A noop tombstone does not require a _version but it's added to have a fully dense docvalues for the version field. + // 1L is selected to optimize the compression because it might probably be the most common value in version field. + tombstone.version().setLongValue(1L); + assert tombstone.docs().size() == 1 : "Tombstone should have a single doc [" + tombstone + "]"; + final ParseContext.Document doc = tombstone.docs().get(0); + assert doc.getField(SeqNoFieldMapper.TOMBSTONE_NAME) != null + : "Noop tombstone document but _tombstone field is not set [" + doc + " ]"; + doc.add(softDeletesField); + indexWriter.addDocument(doc); + } catch (Exception ex) { + if (maybeFailEngine("noop", ex)) { + throw ex; + } + failure = ex; + } + } + final NoOpResult noOpResult = failure != null ? new NoOpResult(getPrimaryTerm(), noOp.seqNo(), failure) : new NoOpResult(getPrimaryTerm(), noOp.seqNo()); if (noOp.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { final Translog.Location location = translog.add(new Translog.NoOp(noOp.seqNo(), noOp.primaryTerm(), noOp.reason())); noOpResult.setTranslogLocation(location); @@ -1307,6 +1435,7 @@ public class InternalEngine extends Engine { // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it) // both refresh types will result in an internal refresh but only the external will also // pass the new reader reference to the external reader manager. + final long localCheckpointBeforeRefresh = getLocalCheckpoint(); // this will also cause version map ram to be freed hence we always account for it. final long bytes = indexWriter.ramBytesUsed() + versionMap.ramBytesUsedForRefresh(); @@ -1316,22 +1445,15 @@ public class InternalEngine extends Engine { if (store.tryIncRef()) { // increment the ref just to ensure nobody closes the store during a refresh try { - switch (scope) { - case EXTERNAL: - // even though we maintain 2 managers we really do the heavy-lifting only once. - // the second refresh will only do the extra work we have to do for warming caches etc. - externalSearcherManager.maybeRefreshBlocking(); - // the break here is intentional we never refresh both internal / external together - break; - case INTERNAL: - internalSearcherManager.maybeRefreshBlocking(); - break; - default: - throw new IllegalArgumentException("unknown scope: " + scope); - } + // even though we maintain 2 managers we really do the heavy-lifting only once. + // the second refresh will only do the extra work we have to do for warming caches etc. + ReferenceManager referenceManager = getReferenceManager(scope); + // it is intentional that we never refresh both internal / external together + referenceManager.maybeRefreshBlocking(); } finally { store.decRef(); } + lastRefreshedCheckpointListener.updateRefreshedCheckpoint(localCheckpointBeforeRefresh); } } catch (AlreadyClosedException e) { failOnTragicEvent(e); @@ -1346,7 +1468,8 @@ public class InternalEngine extends Engine { } finally { writingBytes.addAndGet(-bytes); } - + assert lastRefreshedCheckpoint() >= localCheckpointBeforeRefresh : "refresh checkpoint was not advanced; " + + "local_checkpoint=" + localCheckpointBeforeRefresh + " refresh_checkpoint=" + lastRefreshedCheckpoint(); // TODO: maybe we should just put a scheduled job in threadPool? // We check for pruning in each delete request, but we also prune here e.g. in case a delete burst comes in and then no more deletes // for a long time: @@ -1736,6 +1859,8 @@ public class InternalEngine extends Engine { // Revisit the deletion policy if we can clean up the snapshotting commit. if (combinedDeletionPolicy.releaseCommit(snapshot)) { ensureOpen(); + // Here we don't have to trim translog because snapshotting an index commit + // does not lock translog or prevents unreferenced files from trimming. indexWriter.deleteUnusedFiles(); } } @@ -1875,35 +2000,14 @@ public class InternalEngine extends Engine { } @Override - public Searcher acquireSearcher(String source, SearcherScope scope) { - /* Acquire order here is store -> manager since we need - * to make sure that the store is not closed before - * the searcher is acquired. */ - store.incRef(); - Releasable releasable = store::decRef; - try { - final ReferenceManager referenceManager; - switch (scope) { - case INTERNAL: - referenceManager = internalSearcherManager; - break; - case EXTERNAL: - referenceManager = externalSearcherManager; - break; - default: - throw new IllegalStateException("unknown scope: " + scope); - } - EngineSearcher engineSearcher = new EngineSearcher(source, referenceManager, store, logger); - releasable = null; // success - hand over the reference to the engine searcher - return engineSearcher; - } catch (AlreadyClosedException ex) { - throw ex; - } catch (Exception ex) { - ensureOpen(ex); // throw EngineCloseException here if we are already closed - logger.error(() -> new ParameterizedMessage("failed to acquire searcher, source {}", source), ex); - throw new EngineException(shardId, "failed to acquire searcher, source " + source, ex); - } finally { - Releasables.close(releasable); + protected final ReferenceManager getReferenceManager(SearcherScope scope) { + switch (scope) { + case INTERNAL: + return internalSearcherManager; + case EXTERNAL: + return externalSearcherManager; + default: + throw new IllegalStateException("unknown scope: " + scope); } } @@ -1926,7 +2030,11 @@ public class InternalEngine extends Engine { // pkg-private for testing IndexWriter createWriter(Directory directory, IndexWriterConfig iwc) throws IOException { - return new IndexWriter(directory, iwc); + if (Assertions.ENABLED) { + return new AssertingIndexWriter(directory, iwc); + } else { + return new IndexWriter(directory, iwc); + } } private IndexWriterConfig getIndexWriterConfig() { @@ -1942,11 +2050,15 @@ public class InternalEngine extends Engine { } iwc.setInfoStream(verbose ? InfoStream.getDefault() : new LoggerInfoStream(logger)); iwc.setMergeScheduler(mergeScheduler); - MergePolicy mergePolicy = config().getMergePolicy(); // Give us the opportunity to upgrade old segments while performing // background merges - mergePolicy = new ElasticsearchMergePolicy(mergePolicy); - iwc.setMergePolicy(mergePolicy); + MergePolicy mergePolicy = config().getMergePolicy(); + if (softDeleteEnabled) { + iwc.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD); + mergePolicy = new RecoverySourcePruneMergePolicy(SourceFieldMapper.RECOVERY_SOURCE_NAME, softDeletesPolicy::getRetentionQuery, + new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, softDeletesPolicy::getRetentionQuery, mergePolicy)); + } + iwc.setMergePolicy(new ElasticsearchMergePolicy(mergePolicy)); iwc.setSimilarity(engineConfig.getSimilarity()); iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac()); iwc.setCodec(engineConfig.getCodec()); @@ -2143,6 +2255,9 @@ public class InternalEngine extends Engine { commitData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(localCheckpointTracker.getMaxSeqNo())); commitData.put(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, Long.toString(maxUnsafeAutoIdTimestamp.get())); commitData.put(HISTORY_UUID_KEY, historyUUID); + if (softDeleteEnabled) { + commitData.put(Engine.MIN_RETAINED_SEQNO, Long.toString(softDeletesPolicy.getMinRetainedSeqNo())); + } logger.trace("committing writer with commit data [{}]", commitData); return commitData.entrySet().iterator(); }); @@ -2198,6 +2313,7 @@ public class InternalEngine extends Engine { final IndexSettings indexSettings = engineConfig.getIndexSettings(); translogDeletionPolicy.setRetentionAgeInMillis(indexSettings.getTranslogRetentionAge().getMillis()); translogDeletionPolicy.setRetentionSizeInBytes(indexSettings.getTranslogRetentionSize().getBytes()); + softDeletesPolicy.setRetentionOperations(indexSettings.getSoftDeleteRetentionOperations()); } public MergeStats getMergeStats() { @@ -2292,6 +2408,69 @@ public class InternalEngine extends Engine { return numDocUpdates.count(); } + @Override + public Translog.Snapshot newChangesSnapshot(String source, MapperService mapperService, + long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException { + // TODO: Should we defer the refresh until we really need it? + ensureOpen(); + if (lastRefreshedCheckpoint() < toSeqNo) { + refresh(source, SearcherScope.INTERNAL); + } + Searcher searcher = acquireSearcher(source, SearcherScope.INTERNAL); + try { + LuceneChangesSnapshot snapshot = new LuceneChangesSnapshot( + searcher, mapperService, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE, fromSeqNo, toSeqNo, requiredFullRange); + searcher = null; + return snapshot; + } catch (Exception e) { + try { + maybeFailEngine("acquire changes snapshot", e); + } catch (Exception inner) { + e.addSuppressed(inner); + } + throw e; + } finally { + IOUtils.close(searcher); + } + } + + @Override + public boolean hasCompleteOperationHistory(String source, MapperService mapperService, long startingSeqNo) throws IOException { + if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { + return getMinRetainedSeqNo() <= startingSeqNo; + } else { + final long currentLocalCheckpoint = getLocalCheckpointTracker().getCheckpoint(); + final LocalCheckpointTracker tracker = new LocalCheckpointTracker(startingSeqNo, startingSeqNo - 1); + try (Translog.Snapshot snapshot = getTranslog().newSnapshotFromMinSeqNo(startingSeqNo)) { + Translog.Operation operation; + while ((operation = snapshot.next()) != null) { + if (operation.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { + tracker.markSeqNoAsCompleted(operation.seqNo()); + } + } + } + return tracker.getCheckpoint() >= currentLocalCheckpoint; + } + } + + /** + * Returns the minimum seqno that is retained in the Lucene index. + * Operations whose seq# are at least this value should exist in the Lucene index. + */ + final long getMinRetainedSeqNo() { + assert softDeleteEnabled : Thread.currentThread().getName(); + return softDeletesPolicy.getMinRetainedSeqNo(); + } + + @Override + public Closeable acquireRetentionLockForPeerRecovery() { + if (softDeleteEnabled) { + return softDeletesPolicy.acquireRetentionLock(); + } else { + return translog.acquireRetentionLock(); + } + } + @Override public boolean isRecovering() { return pendingTranslogRecovery.get(); @@ -2307,4 +2486,69 @@ public class InternalEngine extends Engine { } return commitData; } + + private final class AssertingIndexWriter extends IndexWriter { + AssertingIndexWriter(Directory d, IndexWriterConfig conf) throws IOException { + super(d, conf); + } + @Override + public long updateDocument(Term term, Iterable doc) throws IOException { + assert softDeleteEnabled == false : "Call #updateDocument but soft-deletes is enabled"; + return super.updateDocument(term, doc); + } + @Override + public long updateDocuments(Term delTerm, Iterable> docs) throws IOException { + assert softDeleteEnabled == false : "Call #updateDocuments but soft-deletes is enabled"; + return super.updateDocuments(delTerm, docs); + } + @Override + public long deleteDocuments(Term... terms) throws IOException { + assert softDeleteEnabled == false : "Call #deleteDocuments but soft-deletes is enabled"; + return super.deleteDocuments(terms); + } + @Override + public long softUpdateDocument(Term term, Iterable doc, Field... softDeletes) throws IOException { + assert softDeleteEnabled : "Call #softUpdateDocument but soft-deletes is disabled"; + return super.softUpdateDocument(term, doc, softDeletes); + } + @Override + public long softUpdateDocuments(Term term, Iterable> docs, Field... softDeletes) throws IOException { + assert softDeleteEnabled : "Call #softUpdateDocuments but soft-deletes is disabled"; + return super.softUpdateDocuments(term, docs, softDeletes); + } + } + + /** + * Returned the last local checkpoint value has been refreshed internally. + */ + final long lastRefreshedCheckpoint() { + return lastRefreshedCheckpointListener.refreshedCheckpoint.get(); + } + + private final class LastRefreshedCheckpointListener implements ReferenceManager.RefreshListener { + final AtomicLong refreshedCheckpoint; + private long pendingCheckpoint; + + LastRefreshedCheckpointListener(long initialLocalCheckpoint) { + this.refreshedCheckpoint = new AtomicLong(initialLocalCheckpoint); + } + + @Override + public void beforeRefresh() { + // all changes until this point should be visible after refresh + pendingCheckpoint = localCheckpointTracker.getCheckpoint(); + } + + @Override + public void afterRefresh(boolean didRefresh) { + if (didRefresh) { + updateRefreshedCheckpoint(pendingCheckpoint); + } + } + + void updateRefreshedCheckpoint(long checkpoint) { + refreshedCheckpoint.updateAndGet(curr -> Math.max(curr, checkpoint)); + assert refreshedCheckpoint.get() >= checkpoint : refreshedCheckpoint.get() + " < " + checkpoint; + } + } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java new file mode 100644 index 00000000000..a44f8a0f835 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java @@ -0,0 +1,368 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.util.ArrayUtil; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.index.fieldvisitor.FieldsVisitor; +import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.mapper.VersionFieldMapper; +import org.elasticsearch.index.translog.Translog; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Comparator; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * A {@link Translog.Snapshot} from changes in a Lucene index + */ +final class LuceneChangesSnapshot implements Translog.Snapshot { + static final int DEFAULT_BATCH_SIZE = 1024; + + private final int searchBatchSize; + private final long fromSeqNo, toSeqNo; + private long lastSeenSeqNo; + private int skippedOperations; + private final boolean requiredFullRange; + + private final IndexSearcher indexSearcher; + private final MapperService mapperService; + private int docIndex = 0; + private final int totalHits; + private ScoreDoc[] scoreDocs; + private final ParallelArray parallelArray; + private final Closeable onClose; + + /** + * Creates a new "translog" snapshot from Lucene for reading operations whose seq# in the specified range. + * + * @param engineSearcher the internal engine searcher which will be taken over if the snapshot is opened successfully + * @param mapperService the mapper service which will be mainly used to resolve the document's type and uid + * @param searchBatchSize the number of documents should be returned by each search + * @param fromSeqNo the min requesting seq# - inclusive + * @param toSeqNo the maximum requesting seq# - inclusive + * @param requiredFullRange if true, the snapshot will strictly check for the existence of operations between fromSeqNo and toSeqNo + */ + LuceneChangesSnapshot(Engine.Searcher engineSearcher, MapperService mapperService, int searchBatchSize, + long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException { + if (fromSeqNo < 0 || toSeqNo < 0 || fromSeqNo > toSeqNo) { + throw new IllegalArgumentException("Invalid range; from_seqno [" + fromSeqNo + "], to_seqno [" + toSeqNo + "]"); + } + if (searchBatchSize <= 0) { + throw new IllegalArgumentException("Search_batch_size must be positive [" + searchBatchSize + "]"); + } + final AtomicBoolean closed = new AtomicBoolean(); + this.onClose = () -> { + if (closed.compareAndSet(false, true)) { + IOUtils.close(engineSearcher); + } + }; + this.mapperService = mapperService; + this.searchBatchSize = searchBatchSize; + this.fromSeqNo = fromSeqNo; + this.toSeqNo = toSeqNo; + this.lastSeenSeqNo = fromSeqNo - 1; + this.requiredFullRange = requiredFullRange; + this.indexSearcher = new IndexSearcher(Lucene.wrapAllDocsLive(engineSearcher.getDirectoryReader())); + this.indexSearcher.setQueryCache(null); + this.parallelArray = new ParallelArray(searchBatchSize); + final TopDocs topDocs = searchOperations(null); + this.totalHits = Math.toIntExact(topDocs.totalHits.value); + this.scoreDocs = topDocs.scoreDocs; + fillParallelArray(scoreDocs, parallelArray); + } + + @Override + public void close() throws IOException { + onClose.close(); + } + + @Override + public int totalOperations() { + return totalHits; + } + + @Override + public int skippedOperations() { + return skippedOperations; + } + + @Override + public Translog.Operation next() throws IOException { + Translog.Operation op = null; + for (int idx = nextDocIndex(); idx != -1; idx = nextDocIndex()) { + op = readDocAsOp(idx); + if (op != null) { + break; + } + } + if (requiredFullRange) { + rangeCheck(op); + } + if (op != null) { + lastSeenSeqNo = op.seqNo(); + } + return op; + } + + private void rangeCheck(Translog.Operation op) { + if (op == null) { + if (lastSeenSeqNo < toSeqNo) { + throw new IllegalStateException("Not all operations between from_seqno [" + fromSeqNo + "] " + + "and to_seqno [" + toSeqNo + "] found; prematurely terminated last_seen_seqno [" + lastSeenSeqNo + "]"); + } + } else { + final long expectedSeqNo = lastSeenSeqNo + 1; + if (op.seqNo() != expectedSeqNo) { + throw new IllegalStateException("Not all operations between from_seqno [" + fromSeqNo + "] " + + "and to_seqno [" + toSeqNo + "] found; expected seqno [" + expectedSeqNo + "]; found [" + op + "]"); + } + } + } + + private int nextDocIndex() throws IOException { + // we have processed all docs in the current search - fetch the next batch + if (docIndex == scoreDocs.length && docIndex > 0) { + final ScoreDoc prev = scoreDocs[scoreDocs.length - 1]; + scoreDocs = searchOperations(prev).scoreDocs; + fillParallelArray(scoreDocs, parallelArray); + docIndex = 0; + } + if (docIndex < scoreDocs.length) { + int idx = docIndex; + docIndex++; + return idx; + } + return -1; + } + + private void fillParallelArray(ScoreDoc[] scoreDocs, ParallelArray parallelArray) throws IOException { + if (scoreDocs.length > 0) { + for (int i = 0; i < scoreDocs.length; i++) { + scoreDocs[i].shardIndex = i; + } + // for better loading performance we sort the array by docID and + // then visit all leaves in order. + ArrayUtil.introSort(scoreDocs, Comparator.comparingInt(i -> i.doc)); + int docBase = -1; + int maxDoc = 0; + List leaves = indexSearcher.getIndexReader().leaves(); + int readerIndex = 0; + CombinedDocValues combinedDocValues = null; + LeafReaderContext leaf = null; + for (int i = 0; i < scoreDocs.length; i++) { + ScoreDoc scoreDoc = scoreDocs[i]; + if (scoreDoc.doc >= docBase + maxDoc) { + do { + leaf = leaves.get(readerIndex++); + docBase = leaf.docBase; + maxDoc = leaf.reader().maxDoc(); + } while (scoreDoc.doc >= docBase + maxDoc); + combinedDocValues = new CombinedDocValues(leaf.reader()); + } + final int segmentDocID = scoreDoc.doc - docBase; + final int index = scoreDoc.shardIndex; + parallelArray.leafReaderContexts[index] = leaf; + parallelArray.seqNo[index] = combinedDocValues.docSeqNo(segmentDocID); + parallelArray.primaryTerm[index] = combinedDocValues.docPrimaryTerm(segmentDocID); + parallelArray.version[index] = combinedDocValues.docVersion(segmentDocID); + parallelArray.isTombStone[index] = combinedDocValues.isTombstone(segmentDocID); + parallelArray.hasRecoverySource[index] = combinedDocValues.hasRecoverySource(segmentDocID); + } + // now sort back based on the shardIndex. we use this to store the previous index + ArrayUtil.introSort(scoreDocs, Comparator.comparingInt(i -> i.shardIndex)); + } + } + + private TopDocs searchOperations(ScoreDoc after) throws IOException { + final Query rangeQuery = LongPoint.newRangeQuery(SeqNoFieldMapper.NAME, Math.max(fromSeqNo, lastSeenSeqNo), toSeqNo); + final Sort sortedBySeqNoThenByTerm = new Sort( + new SortField(SeqNoFieldMapper.NAME, SortField.Type.LONG), + new SortField(SeqNoFieldMapper.PRIMARY_TERM_NAME, SortField.Type.LONG, true) + ); + return indexSearcher.searchAfter(after, rangeQuery, searchBatchSize, sortedBySeqNoThenByTerm); + } + + private Translog.Operation readDocAsOp(int docIndex) throws IOException { + final LeafReaderContext leaf = parallelArray.leafReaderContexts[docIndex]; + final int segmentDocID = scoreDocs[docIndex].doc - leaf.docBase; + final long primaryTerm = parallelArray.primaryTerm[docIndex]; + // We don't have to read the nested child documents - those docs don't have primary terms. + if (primaryTerm == -1) { + skippedOperations++; + return null; + } + final long seqNo = parallelArray.seqNo[docIndex]; + // Only pick the first seen seq# + if (seqNo == lastSeenSeqNo) { + skippedOperations++; + return null; + } + final long version = parallelArray.version[docIndex]; + final String sourceField = parallelArray.hasRecoverySource[docIndex] ? SourceFieldMapper.RECOVERY_SOURCE_NAME : + SourceFieldMapper.NAME; + final FieldsVisitor fields = new FieldsVisitor(true, sourceField); + leaf.reader().document(segmentDocID, fields); + fields.postProcess(mapperService); + + final Translog.Operation op; + final boolean isTombstone = parallelArray.isTombStone[docIndex]; + if (isTombstone && fields.uid() == null) { + op = new Translog.NoOp(seqNo, primaryTerm, fields.source().utf8ToString()); + assert version == 1L : "Noop tombstone should have version 1L; actual version [" + version + "]"; + assert assertDocSoftDeleted(leaf.reader(), segmentDocID) : "Noop but soft_deletes field is not set [" + op + "]"; + } else { + final String id = fields.uid().id(); + final String type = fields.uid().type(); + final Term uid = new Term(IdFieldMapper.NAME, Uid.encodeId(id)); + if (isTombstone) { + op = new Translog.Delete(type, id, uid, seqNo, primaryTerm, version); + assert assertDocSoftDeleted(leaf.reader(), segmentDocID) : "Delete op but soft_deletes field is not set [" + op + "]"; + } else { + final BytesReference source = fields.source(); + if (source == null) { + // TODO: Callers should ask for the range that source should be retained. Thus we should always + // check for the existence source once we make peer-recovery to send ops after the local checkpoint. + if (requiredFullRange) { + throw new IllegalStateException("source not found for seqno=" + seqNo + + " from_seqno=" + fromSeqNo + " to_seqno=" + toSeqNo); + } else { + skippedOperations++; + return null; + } + } + // TODO: pass the latest timestamp from engine. + final long autoGeneratedIdTimestamp = -1; + op = new Translog.Index(type, id, seqNo, primaryTerm, version, + source.toBytesRef().bytes, fields.routing(), autoGeneratedIdTimestamp); + } + } + assert fromSeqNo <= op.seqNo() && op.seqNo() <= toSeqNo && lastSeenSeqNo < op.seqNo() : "Unexpected operation; " + + "last_seen_seqno [" + lastSeenSeqNo + "], from_seqno [" + fromSeqNo + "], to_seqno [" + toSeqNo + "], op [" + op + "]"; + return op; + } + + private boolean assertDocSoftDeleted(LeafReader leafReader, int segmentDocId) throws IOException { + final NumericDocValues ndv = leafReader.getNumericDocValues(Lucene.SOFT_DELETES_FIELD); + if (ndv == null || ndv.advanceExact(segmentDocId) == false) { + throw new IllegalStateException("DocValues for field [" + Lucene.SOFT_DELETES_FIELD + "] is not found"); + } + return ndv.longValue() == 1; + } + + private static final class ParallelArray { + final LeafReaderContext[] leafReaderContexts; + final long[] version; + final long[] seqNo; + final long[] primaryTerm; + final boolean[] isTombStone; + final boolean[] hasRecoverySource; + + ParallelArray(int size) { + version = new long[size]; + seqNo = new long[size]; + primaryTerm = new long[size]; + isTombStone = new boolean[size]; + hasRecoverySource = new boolean[size]; + leafReaderContexts = new LeafReaderContext[size]; + } + } + + private static final class CombinedDocValues { + private final NumericDocValues versionDV; + private final NumericDocValues seqNoDV; + private final NumericDocValues primaryTermDV; + private final NumericDocValues tombstoneDV; + private final NumericDocValues recoverySource; + + CombinedDocValues(LeafReader leafReader) throws IOException { + this.versionDV = Objects.requireNonNull(leafReader.getNumericDocValues(VersionFieldMapper.NAME), "VersionDV is missing"); + this.seqNoDV = Objects.requireNonNull(leafReader.getNumericDocValues(SeqNoFieldMapper.NAME), "SeqNoDV is missing"); + this.primaryTermDV = Objects.requireNonNull( + leafReader.getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME), "PrimaryTermDV is missing"); + this.tombstoneDV = leafReader.getNumericDocValues(SeqNoFieldMapper.TOMBSTONE_NAME); + this.recoverySource = leafReader.getNumericDocValues(SourceFieldMapper.RECOVERY_SOURCE_NAME); + } + + long docVersion(int segmentDocId) throws IOException { + assert versionDV.docID() < segmentDocId; + if (versionDV.advanceExact(segmentDocId) == false) { + throw new IllegalStateException("DocValues for field [" + VersionFieldMapper.NAME + "] is not found"); + } + return versionDV.longValue(); + } + + long docSeqNo(int segmentDocId) throws IOException { + assert seqNoDV.docID() < segmentDocId; + if (seqNoDV.advanceExact(segmentDocId) == false) { + throw new IllegalStateException("DocValues for field [" + SeqNoFieldMapper.NAME + "] is not found"); + } + return seqNoDV.longValue(); + } + + long docPrimaryTerm(int segmentDocId) throws IOException { + if (primaryTermDV == null) { + return -1L; + } + assert primaryTermDV.docID() < segmentDocId; + // Use -1 for docs which don't have primary term. The caller considers those docs as nested docs. + if (primaryTermDV.advanceExact(segmentDocId) == false) { + return -1; + } + return primaryTermDV.longValue(); + } + + boolean isTombstone(int segmentDocId) throws IOException { + if (tombstoneDV == null) { + return false; + } + assert tombstoneDV.docID() < segmentDocId; + return tombstoneDV.advanceExact(segmentDocId) && tombstoneDV.longValue() > 0; + } + + boolean hasRecoverySource(int segmentDocId) throws IOException { + if (recoverySource == null) { + return false; + } + assert recoverySource.docID() < segmentDocId; + return recoverySource.advanceExact(segmentDocId); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java new file mode 100644 index 00000000000..7faed37b2fd --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java @@ -0,0 +1,293 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.codecs.StoredFieldsReader; +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.CodecReader; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FilterCodecReader; +import org.apache.lucene.index.FilterNumericDocValues; +import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.OneMergeWrappingMergePolicy; +import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.ConjunctionDISI; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.DocValuesFieldExistsQuery; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.BitSet; +import org.apache.lucene.util.BitSetIterator; + +import java.io.IOException; +import java.util.Arrays; +import java.util.function.Supplier; + +final class RecoverySourcePruneMergePolicy extends OneMergeWrappingMergePolicy { + RecoverySourcePruneMergePolicy(String recoverySourceField, Supplier retainSourceQuerySupplier, MergePolicy in) { + super(in, toWrap -> new OneMerge(toWrap.segments) { + @Override + public CodecReader wrapForMerge(CodecReader reader) throws IOException { + CodecReader wrapped = toWrap.wrapForMerge(reader); + return wrapReader(recoverySourceField, wrapped, retainSourceQuerySupplier); + } + }); + } + + // pkg private for testing + static CodecReader wrapReader(String recoverySourceField, CodecReader reader, Supplier retainSourceQuerySupplier) + throws IOException { + NumericDocValues recoverySource = reader.getNumericDocValues(recoverySourceField); + if (recoverySource == null || recoverySource.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) { + return reader; // early terminate - nothing to do here since non of the docs has a recovery source anymore. + } + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + builder.add(new DocValuesFieldExistsQuery(recoverySourceField), BooleanClause.Occur.FILTER); + builder.add(retainSourceQuerySupplier.get(), BooleanClause.Occur.FILTER); + IndexSearcher s = new IndexSearcher(reader); + s.setQueryCache(null); + Weight weight = s.createWeight(s.rewrite(builder.build()), ScoreMode.COMPLETE_NO_SCORES, 1.0f); + Scorer scorer = weight.scorer(reader.getContext()); + if (scorer != null) { + return new SourcePruningFilterCodecReader(recoverySourceField, reader, BitSet.of(scorer.iterator(), reader.maxDoc())); + } else { + return new SourcePruningFilterCodecReader(recoverySourceField, reader, null); + } + } + + private static class SourcePruningFilterCodecReader extends FilterCodecReader { + private final BitSet recoverySourceToKeep; + private final String recoverySourceField; + + SourcePruningFilterCodecReader(String recoverySourceField, CodecReader reader, BitSet recoverySourceToKeep) { + super(reader); + this.recoverySourceField = recoverySourceField; + this.recoverySourceToKeep = recoverySourceToKeep; + } + + @Override + public DocValuesProducer getDocValuesReader() { + DocValuesProducer docValuesReader = super.getDocValuesReader(); + return new FilterDocValuesProducer(docValuesReader) { + @Override + public NumericDocValues getNumeric(FieldInfo field) throws IOException { + NumericDocValues numeric = super.getNumeric(field); + if (recoverySourceField.equals(field.name)) { + assert numeric != null : recoverySourceField + " must have numeric DV but was null"; + final DocIdSetIterator intersection; + if (recoverySourceToKeep == null) { + // we can't return null here lucenes DocIdMerger expects an instance + intersection = DocIdSetIterator.empty(); + } else { + intersection = ConjunctionDISI.intersectIterators(Arrays.asList(numeric, + new BitSetIterator(recoverySourceToKeep, recoverySourceToKeep.length()))); + } + return new FilterNumericDocValues(numeric) { + @Override + public int nextDoc() throws IOException { + return intersection.nextDoc(); + } + + @Override + public int advance(int target) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean advanceExact(int target) { + throw new UnsupportedOperationException(); + } + }; + + } + return numeric; + } + }; + } + + @Override + public StoredFieldsReader getFieldsReader() { + StoredFieldsReader fieldsReader = super.getFieldsReader(); + return new FilterStoredFieldsReader(fieldsReader) { + @Override + public void visitDocument(int docID, StoredFieldVisitor visitor) throws IOException { + if (recoverySourceToKeep != null && recoverySourceToKeep.get(docID)) { + super.visitDocument(docID, visitor); + } else { + super.visitDocument(docID, new FilterStoredFieldVisitor(visitor) { + @Override + public Status needsField(FieldInfo fieldInfo) throws IOException { + if (recoverySourceField.equals(fieldInfo.name)) { + return Status.NO; + } + return super.needsField(fieldInfo); + } + }); + } + } + }; + } + + @Override + public CacheHelper getCoreCacheHelper() { + return null; + } + + @Override + public CacheHelper getReaderCacheHelper() { + return null; + } + + private static class FilterDocValuesProducer extends DocValuesProducer { + private final DocValuesProducer in; + + FilterDocValuesProducer(DocValuesProducer in) { + this.in = in; + } + + @Override + public NumericDocValues getNumeric(FieldInfo field) throws IOException { + return in.getNumeric(field); + } + + @Override + public BinaryDocValues getBinary(FieldInfo field) throws IOException { + return in.getBinary(field); + } + + @Override + public SortedDocValues getSorted(FieldInfo field) throws IOException { + return in.getSorted(field); + } + + @Override + public SortedNumericDocValues getSortedNumeric(FieldInfo field) throws IOException { + return in.getSortedNumeric(field); + } + + @Override + public SortedSetDocValues getSortedSet(FieldInfo field) throws IOException { + return in.getSortedSet(field); + } + + @Override + public void checkIntegrity() throws IOException { + in.checkIntegrity(); + } + + @Override + public void close() throws IOException { + in.close(); + } + + @Override + public long ramBytesUsed() { + return in.ramBytesUsed(); + } + } + + private static class FilterStoredFieldsReader extends StoredFieldsReader { + + private final StoredFieldsReader fieldsReader; + + FilterStoredFieldsReader(StoredFieldsReader fieldsReader) { + this.fieldsReader = fieldsReader; + } + + @Override + public long ramBytesUsed() { + return fieldsReader.ramBytesUsed(); + } + + @Override + public void close() throws IOException { + fieldsReader.close(); + } + + @Override + public void visitDocument(int docID, StoredFieldVisitor visitor) throws IOException { + fieldsReader.visitDocument(docID, visitor); + } + + @Override + public StoredFieldsReader clone() { + return fieldsReader.clone(); + } + + @Override + public void checkIntegrity() throws IOException { + fieldsReader.checkIntegrity(); + } + } + + private static class FilterStoredFieldVisitor extends StoredFieldVisitor { + private final StoredFieldVisitor visitor; + + FilterStoredFieldVisitor(StoredFieldVisitor visitor) { + this.visitor = visitor; + } + + @Override + public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException { + visitor.binaryField(fieldInfo, value); + } + + @Override + public void stringField(FieldInfo fieldInfo, byte[] value) throws IOException { + visitor.stringField(fieldInfo, value); + } + + @Override + public void intField(FieldInfo fieldInfo, int value) throws IOException { + visitor.intField(fieldInfo, value); + } + + @Override + public void longField(FieldInfo fieldInfo, long value) throws IOException { + visitor.longField(fieldInfo, value); + } + + @Override + public void floatField(FieldInfo fieldInfo, float value) throws IOException { + visitor.floatField(fieldInfo, value); + } + + @Override + public void doubleField(FieldInfo fieldInfo, double value) throws IOException { + visitor.doubleField(fieldInfo, value); + } + + @Override + public Status needsField(FieldInfo fieldInfo) throws IOException { + return visitor.needsField(fieldInfo); + } + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/engine/SoftDeletesPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/SoftDeletesPolicy.java new file mode 100644 index 00000000000..af2ded8c466 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/engine/SoftDeletesPolicy.java @@ -0,0 +1,120 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.translog.Translog; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.LongSupplier; + +/** + * A policy that controls how many soft-deleted documents should be retained for peer-recovery and querying history changes purpose. + */ +final class SoftDeletesPolicy { + private final LongSupplier globalCheckpointSupplier; + private long localCheckpointOfSafeCommit; + // This lock count is used to prevent `minRetainedSeqNo` from advancing. + private int retentionLockCount; + // The extra number of operations before the global checkpoint are retained + private long retentionOperations; + // The min seq_no value that is retained - ops after this seq# should exist in the Lucene index. + private long minRetainedSeqNo; + + SoftDeletesPolicy(LongSupplier globalCheckpointSupplier, long minRetainedSeqNo, long retentionOperations) { + this.globalCheckpointSupplier = globalCheckpointSupplier; + this.retentionOperations = retentionOperations; + this.minRetainedSeqNo = minRetainedSeqNo; + this.localCheckpointOfSafeCommit = SequenceNumbers.NO_OPS_PERFORMED; + this.retentionLockCount = 0; + } + + /** + * Updates the number of soft-deleted documents prior to the global checkpoint to be retained + * See {@link org.elasticsearch.index.IndexSettings#INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING} + */ + synchronized void setRetentionOperations(long retentionOperations) { + this.retentionOperations = retentionOperations; + } + + /** + * Sets the local checkpoint of the current safe commit + */ + synchronized void setLocalCheckpointOfSafeCommit(long newCheckpoint) { + if (newCheckpoint < this.localCheckpointOfSafeCommit) { + throw new IllegalArgumentException("Local checkpoint can't go backwards; " + + "new checkpoint [" + newCheckpoint + "]," + "current checkpoint [" + localCheckpointOfSafeCommit + "]"); + } + this.localCheckpointOfSafeCommit = newCheckpoint; + } + + /** + * Acquires a lock on soft-deleted documents to prevent them from cleaning up in merge processes. This is necessary to + * make sure that all operations that are being retained will be retained until the lock is released. + * This is a analogy to the translog's retention lock; see {@link Translog#acquireRetentionLock()} + */ + synchronized Releasable acquireRetentionLock() { + assert retentionLockCount >= 0 : "Invalid number of retention locks [" + retentionLockCount + "]"; + retentionLockCount++; + final AtomicBoolean released = new AtomicBoolean(); + return () -> { + if (released.compareAndSet(false, true)) { + releaseRetentionLock(); + } + }; + } + + private synchronized void releaseRetentionLock() { + assert retentionLockCount > 0 : "Invalid number of retention locks [" + retentionLockCount + "]"; + retentionLockCount--; + } + + /** + * Returns the min seqno that is retained in the Lucene index. + * Operations whose seq# is least this value should exist in the Lucene index. + */ + synchronized long getMinRetainedSeqNo() { + // Do not advance if the retention lock is held + if (retentionLockCount == 0) { + // This policy retains operations for two purposes: peer-recovery and querying changes history. + // - Peer-recovery is driven by the local checkpoint of the safe commit. In peer-recovery, the primary transfers a safe commit, + // then sends ops after the local checkpoint of that commit. This requires keeping all ops after localCheckpointOfSafeCommit; + // - Changes APIs are driven the combination of the global checkpoint and retention ops. Here we prefer using the global + // checkpoint instead of max_seqno because only operations up to the global checkpoint are exposed in the the changes APIs. + final long minSeqNoForQueryingChanges = globalCheckpointSupplier.getAsLong() - retentionOperations; + final long minSeqNoToRetain = Math.min(minSeqNoForQueryingChanges, localCheckpointOfSafeCommit) + 1; + // This can go backward as the retentionOperations value can be changed in settings. + minRetainedSeqNo = Math.max(minRetainedSeqNo, minSeqNoToRetain); + } + return minRetainedSeqNo; + } + + /** + * Returns a soft-deletes retention query that will be used in {@link org.apache.lucene.index.SoftDeletesRetentionMergePolicy} + * Documents including tombstones are soft-deleted and matched this query will be retained and won't cleaned up by merges. + */ + Query getRetentionQuery() { + return LongPoint.newRangeQuery(SeqNoFieldMapper.NAME, getMinRetainedSeqNo(), Long.MAX_VALUE); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java index da3dc75f4ef..6896432bcdd 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java @@ -28,6 +28,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparatorSource; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.SortField; import org.apache.lucene.search.Weight; @@ -155,7 +156,7 @@ public interface IndexFieldData extends IndexCompone public DocIdSetIterator innerDocs(LeafReaderContext ctx) throws IOException { final IndexReaderContext topLevelCtx = ReaderUtil.getTopLevelContext(ctx); IndexSearcher indexSearcher = new IndexSearcher(topLevelCtx); - Weight weight = indexSearcher.createNormalizedWeight(innerQuery, false); + Weight weight = indexSearcher.createWeight(indexSearcher.rewrite(innerQuery), ScoreMode.COMPLETE_NO_SCORES, 1f); Scorer s = weight.scorer(ctx); return s == null ? null : s.iterator(); } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java index eaa16e9f07d..8e0a31859a1 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java @@ -25,7 +25,7 @@ import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.SortField; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BytesRef; @@ -71,7 +71,7 @@ public class BytesRefFieldComparatorSource extends IndexFieldData.XFieldComparat return indexFieldData.load(context).getBytesValues(); } - protected void setScorer(Scorer scorer) {} + protected void setScorer(Scorable scorer) {} @Override public FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed) { @@ -101,7 +101,7 @@ public class BytesRefFieldComparatorSource extends IndexFieldData.XFieldComparat } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { BytesRefFieldComparatorSource.this.setScorer(scorer); } @@ -125,7 +125,7 @@ public class BytesRefFieldComparatorSource extends IndexFieldData.XFieldComparat } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { BytesRefFieldComparatorSource.this.setScorer(scorer); } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java index 43bc19a12a3..1ae3fb692ec 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java @@ -23,7 +23,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.SortField; import org.apache.lucene.util.BitSet; import org.elasticsearch.common.Nullable; @@ -57,7 +57,7 @@ public class DoubleValuesComparatorSource extends IndexFieldData.XFieldComparato return indexFieldData.load(context).getDoubleValues(); } - protected void setScorer(Scorer scorer) {} + protected void setScorer(Scorable scorer) {} @Override public FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed) { @@ -81,7 +81,7 @@ public class DoubleValuesComparatorSource extends IndexFieldData.XFieldComparato return selectedValues.getRawDoubleValues(); } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { DoubleValuesComparatorSource.this.setScorer(scorer); } }; diff --git a/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java b/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java index 4c65635c61b..462f8ce8e68 100644 --- a/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java +++ b/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java @@ -54,13 +54,19 @@ public class FieldsVisitor extends StoredFieldVisitor { RoutingFieldMapper.NAME)); private final boolean loadSource; + private final String sourceFieldName; private final Set requiredFields; protected BytesReference source; protected String type, id; protected Map> fieldsValues; public FieldsVisitor(boolean loadSource) { + this(loadSource, SourceFieldMapper.NAME); + } + + public FieldsVisitor(boolean loadSource, String sourceFieldName) { this.loadSource = loadSource; + this.sourceFieldName = sourceFieldName; requiredFields = new HashSet<>(); reset(); } @@ -103,7 +109,7 @@ public class FieldsVisitor extends StoredFieldVisitor { @Override public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException { - if (SourceFieldMapper.NAME.equals(fieldInfo.name)) { + if (sourceFieldName.equals(fieldInfo.name)) { source = new BytesArray(value); } else if (IdFieldMapper.NAME.equals(fieldInfo.name)) { id = Uid.decodeId(value); @@ -175,7 +181,7 @@ public class FieldsVisitor extends StoredFieldVisitor { requiredFields.addAll(BASE_REQUIRED_FIELDS); if (loadSource) { - requiredFields.add(SourceFieldMapper.NAME); + requiredFields.add(sourceFieldName); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java index 83d9a8178ca..db04e64b164 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java @@ -430,13 +430,15 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp * else adds inputs as a {@link org.apache.lucene.search.suggest.document.SuggestField} */ @Override - public Mapper parse(ParseContext context) throws IOException { + public void parse(ParseContext context) throws IOException { // parse XContentParser parser = context.parser(); Token token = parser.currentToken(); Map inputMap = new HashMap<>(1); + + // ignore null values if (token == Token.VALUE_NULL) { - throw new MapperParsingException("completion field [" + fieldType().name() + "] does not support null values"); + return; } else if (token == Token.START_ARRAY) { while ((token = parser.nextToken()) != Token.END_ARRAY) { parse(context, token, parser, inputMap); @@ -475,7 +477,6 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp context.doc().add(field); } multiFields.parse(this, context); - return null; } /** diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index a0640ac68a9..fa1abe42939 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -19,11 +19,15 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.document.StoredField; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchGenerationException; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; @@ -39,12 +43,15 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.stream.Stream; public class DocumentMapper implements ToXContentFragment { @@ -121,6 +128,8 @@ public class DocumentMapper implements ToXContentFragment { private final Map objectMappers; private final boolean hasNestedObjects; + private final MetadataFieldMapper[] deleteTombstoneMetadataFieldMappers; + private final MetadataFieldMapper[] noopTombstoneMetadataFieldMappers; public DocumentMapper(MapperService mapperService, Mapping mapping) { this.mapperService = mapperService; @@ -171,6 +180,15 @@ public class DocumentMapper implements ToXContentFragment { } catch (Exception e) { throw new ElasticsearchGenerationException("failed to serialize source for type [" + type + "]", e); } + + final Collection deleteTombstoneMetadataFields = Arrays.asList(VersionFieldMapper.NAME, IdFieldMapper.NAME, + TypeFieldMapper.NAME, SeqNoFieldMapper.NAME, SeqNoFieldMapper.PRIMARY_TERM_NAME, SeqNoFieldMapper.TOMBSTONE_NAME); + this.deleteTombstoneMetadataFieldMappers = Stream.of(mapping.metadataMappers) + .filter(field -> deleteTombstoneMetadataFields.contains(field.name())).toArray(MetadataFieldMapper[]::new); + final Collection noopTombstoneMetadataFields = Arrays.asList( + VersionFieldMapper.NAME, SeqNoFieldMapper.NAME, SeqNoFieldMapper.PRIMARY_TERM_NAME, SeqNoFieldMapper.TOMBSTONE_NAME); + this.noopTombstoneMetadataFieldMappers = Stream.of(mapping.metadataMappers) + .filter(field -> noopTombstoneMetadataFields.contains(field.name())).toArray(MetadataFieldMapper[]::new); } public Mapping mapping() { @@ -242,7 +260,22 @@ public class DocumentMapper implements ToXContentFragment { } public ParsedDocument parse(SourceToParse source) throws MapperParsingException { - return documentParser.parseDocument(source); + return documentParser.parseDocument(source, mapping.metadataMappers); + } + + public ParsedDocument createDeleteTombstoneDoc(String index, String type, String id) throws MapperParsingException { + final SourceToParse emptySource = SourceToParse.source(index, type, id, new BytesArray("{}"), XContentType.JSON); + return documentParser.parseDocument(emptySource, deleteTombstoneMetadataFieldMappers).toTombstone(); + } + + public ParsedDocument createNoopTombstoneDoc(String index, String reason) throws MapperParsingException { + final String id = ""; // _id won't be used. + final SourceToParse sourceToParse = SourceToParse.source(index, type, id, new BytesArray("{}"), XContentType.JSON); + final ParsedDocument parsedDoc = documentParser.parseDocument(sourceToParse, noopTombstoneMetadataFieldMappers).toTombstone(); + // Store the reason of a noop as a raw string in the _source field + final BytesRef byteRef = new BytesRef(reason); + parsedDoc.rootDoc().add(new StoredField(SourceFieldMapper.NAME, byteRef.bytes, byteRef.offset, byteRef.length)); + return parsedDoc; } /** @@ -261,7 +294,7 @@ public class DocumentMapper implements ToXContentFragment { } // We can pass down 'null' as acceptedDocs, because nestedDocId is a doc to be fetched and // therefor is guaranteed to be a live doc. - final Weight nestedWeight = filter.createWeight(sc.searcher(), false, 1f); + final Weight nestedWeight = filter.createWeight(sc.searcher(), ScoreMode.COMPLETE_NO_SCORES, 1f); Scorer scorer = nestedWeight.scorer(context); if (scorer == null) { continue; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 0fd156c0905..3f8a7cd62dd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -55,7 +55,7 @@ final class DocumentParser { this.docMapper = docMapper; } - ParsedDocument parseDocument(SourceToParse source) throws MapperParsingException { + ParsedDocument parseDocument(SourceToParse source, MetadataFieldMapper[] metadataFieldsMappers) throws MapperParsingException { validateType(source); final Mapping mapping = docMapper.mapping(); @@ -64,9 +64,9 @@ final class DocumentParser { try (XContentParser parser = XContentHelper.createParser(docMapperParser.getXContentRegistry(), LoggingDeprecationHandler.INSTANCE, source.source(), xContentType)) { - context = new ParseContext.InternalParseContext(indexSettings.getSettings(), docMapperParser, docMapper, source, parser); + context = new ParseContext.InternalParseContext(indexSettings, docMapperParser, docMapper, source, parser); validateStart(parser); - internalParseDocument(mapping, context, parser); + internalParseDocument(mapping, metadataFieldsMappers, context, parser); validateEnd(parser); } catch (Exception e) { throw wrapInMapperParsingException(source, e); @@ -81,10 +81,11 @@ final class DocumentParser { return parsedDocument(source, context, createDynamicUpdate(mapping, docMapper, context.getDynamicMappers())); } - private static void internalParseDocument(Mapping mapping, ParseContext.InternalParseContext context, XContentParser parser) throws IOException { + private static void internalParseDocument(Mapping mapping, MetadataFieldMapper[] metadataFieldsMappers, + ParseContext.InternalParseContext context, XContentParser parser) throws IOException { final boolean emptyDoc = isEmptyDoc(mapping, parser); - for (MetadataFieldMapper metadataMapper : mapping.metadataMappers) { + for (MetadataFieldMapper metadataMapper : metadataFieldsMappers) { metadataMapper.preParse(context); } @@ -95,7 +96,7 @@ final class DocumentParser { parseObjectOrNested(context, mapping.root); } - for (MetadataFieldMapper metadataMapper : mapping.metadataMappers) { + for (MetadataFieldMapper metadataMapper : metadataFieldsMappers) { metadataMapper.postParse(context); } } @@ -461,10 +462,7 @@ final class DocumentParser { parseObjectOrNested(context, (ObjectMapper) mapper); } else if (mapper instanceof FieldMapper) { FieldMapper fieldMapper = (FieldMapper) mapper; - Mapper update = fieldMapper.parse(context); - if (update != null) { - context.addDynamicMapper(update); - } + fieldMapper.parse(context); parseCopyFields(context, fieldMapper.copyTo().copyToFields()); } else if (mapper instanceof FieldAliasMapper) { throw new IllegalArgumentException("Cannot write to a field alias [" + mapper.name() + "]."); @@ -495,7 +493,7 @@ final class DocumentParser { if (builder == null) { builder = new ObjectMapper.Builder(currentFieldName).enabled(true); } - Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); + Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings().getSettings(), context.path()); objectMapper = builder.build(builderContext); context.addDynamicMapper(objectMapper); context.path().add(currentFieldName); @@ -538,7 +536,7 @@ final class DocumentParser { if (builder == null) { parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); } else { - Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); + Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings().getSettings(), context.path()); mapper = builder.build(builderContext); assert mapper != null; if (mapper instanceof ArrayValueMapperParser) { @@ -696,13 +694,13 @@ final class DocumentParser { if (parseableAsLong && context.root().numericDetection()) { Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, XContentFieldType.LONG); if (builder == null) { - builder = newLongBuilder(currentFieldName, Version.indexCreated(context.indexSettings())); + builder = newLongBuilder(currentFieldName, context.indexSettings().getIndexVersionCreated()); } return builder; } else if (parseableAsDouble && context.root().numericDetection()) { Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, XContentFieldType.DOUBLE); if (builder == null) { - builder = newFloatBuilder(currentFieldName, Version.indexCreated(context.indexSettings())); + builder = newFloatBuilder(currentFieldName, context.indexSettings().getIndexVersionCreated()); } return builder; } else if (parseableAsLong == false && parseableAsDouble == false && context.root().dateDetection()) { @@ -718,7 +716,7 @@ final class DocumentParser { } Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, XContentFieldType.DATE); if (builder == null) { - builder = newDateBuilder(currentFieldName, dateTimeFormatter, Version.indexCreated(context.indexSettings())); + builder = newDateBuilder(currentFieldName, dateTimeFormatter, context.indexSettings().getIndexVersionCreated()); } if (builder instanceof DateFieldMapper.Builder) { DateFieldMapper.Builder dateBuilder = (DateFieldMapper.Builder) builder; @@ -741,7 +739,7 @@ final class DocumentParser { if (numberType == XContentParser.NumberType.INT || numberType == XContentParser.NumberType.LONG) { Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, XContentFieldType.LONG); if (builder == null) { - builder = newLongBuilder(currentFieldName, Version.indexCreated(context.indexSettings())); + builder = newLongBuilder(currentFieldName, context.indexSettings().getIndexVersionCreated()); } return builder; } else if (numberType == XContentParser.NumberType.FLOAT || numberType == XContentParser.NumberType.DOUBLE) { @@ -750,7 +748,7 @@ final class DocumentParser { // no templates are defined, we use float by default instead of double // since this is much more space-efficient and should be enough most of // the time - builder = newFloatBuilder(currentFieldName, Version.indexCreated(context.indexSettings())); + builder = newFloatBuilder(currentFieldName, context.indexSettings().getIndexVersionCreated()); } return builder; } @@ -785,7 +783,7 @@ final class DocumentParser { return; } final String path = context.path().pathAsText(currentFieldName); - final Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); + final Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings().getSettings(), context.path()); final MappedFieldType existingFieldType = context.mapperService().fullName(path); final Mapper.Builder builder; if (existingFieldType != null) { @@ -883,8 +881,8 @@ final class DocumentParser { if (builder == null) { builder = new ObjectMapper.Builder(paths[i]).enabled(true); } - Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); - mapper = (ObjectMapper) builder.build(builderContext); + Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings().getSettings(), + context.path()); mapper = (ObjectMapper) builder.build(builderContext); if (mapper.nested() != ObjectMapper.Nested.NO) { throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().pathAsText(paths[i]) + "]) through `copy_to` or dots in field names"); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index 2e949f027d1..2a12bc65d14 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -264,11 +264,9 @@ public abstract class FieldMapper extends Mapper implements Cloneable { } /** - * Parse using the provided {@link ParseContext} and return a mapping - * update if dynamic mappings modified the mappings, or {@code null} if - * mappings were not modified. + * Parse the field value using the provided {@link ParseContext}. */ - public Mapper parse(ParseContext context) throws IOException { + public void parse(ParseContext context) throws IOException { final List fields = new ArrayList<>(2); try { parseCreateField(context, fields); @@ -280,7 +278,6 @@ public abstract class FieldMapper extends Mapper implements Cloneable { fieldType().typeName()); } multiFields.parse(this, context); - return null; } /** diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java index 606777392de..ce6e10ccc0f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java @@ -24,7 +24,6 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Query; import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.lucene.Lucene; @@ -205,20 +204,19 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper { } @Override - public void preParse(ParseContext context) throws IOException { + public void preParse(ParseContext context) { } @Override public void postParse(ParseContext context) throws IOException { - if (context.indexSettings().getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).before(Version.V_6_1_0)) { + if (context.indexSettings().getIndexVersionCreated().before(Version.V_6_1_0)) { super.parse(context); } } @Override - public Mapper parse(ParseContext context) throws IOException { + public void parse(ParseContext context) throws IOException { // Adding values to the _field_names field is handled by the mappers for each field type - return null; } static Iterable extractFieldNames(final String fullPath) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java index 551f7c18c1c..350dc27c615 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java @@ -284,7 +284,7 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper } @Override - public Mapper parse(ParseContext context) throws IOException { + public void parse(ParseContext context) throws IOException { context.path().add(simpleName()); GeoPoint sparse = context.parseExternalValue(GeoPoint.class); @@ -339,7 +339,6 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper } context.path().remove(); - return null; } /** diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java index fb9e16cbe13..275ff75f473 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java @@ -483,13 +483,13 @@ public class GeoShapeFieldMapper extends FieldMapper { return (GeoShapeFieldType) super.fieldType(); } @Override - public Mapper parse(ParseContext context) throws IOException { + public void parse(ParseContext context) throws IOException { try { Shape shape = context.parseExternalValue(Shape.class); if (shape == null) { ShapeBuilder shapeBuilder = ShapeParser.parse(context.parser(), this); if (shapeBuilder == null) { - return null; + return; } shape = shapeBuilder.build(); } @@ -501,7 +501,7 @@ public class GeoShapeFieldMapper extends FieldMapper { for (Shape s : shapes) { indexShape(context, s); } - return null; + return; } else if (shape instanceof Point == false) { throw new MapperParsingException("[{" + fieldType().name() + "}] is configured for points only but a " + ((shape instanceof JtsGeometry) ? ((JtsGeometry)shape).getGeom().getGeometryType() : shape.getClass()) + " was found"); @@ -515,7 +515,6 @@ public class GeoShapeFieldMapper extends FieldMapper { } context.addIgnoredField(fieldType.name()); } - return null; } private void indexShape(ParseContext context, Shape shape) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java index 69f1e36664e..7a3a9a8f2ae 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java @@ -129,9 +129,8 @@ public final class IgnoredFieldMapper extends MetadataFieldMapper { } @Override - public Mapper parse(ParseContext context) throws IOException { + public void parse(ParseContext context) throws IOException { // done in post-parse - return null; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index 9c334f79551..20b4bb37cc7 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -28,6 +28,7 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; +import org.apache.lucene.search.NormsFieldExistsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; @@ -166,7 +167,7 @@ public final class KeywordFieldMapper extends FieldMapper { builder.ignoreAbove(XContentMapValues.nodeIntegerValue(propNode, -1)); iterator.remove(); } else if (propName.equals("norms")) { - builder.omitNorms(XContentMapValues.nodeBooleanValue(propNode, "norms") == false); + TypeParsers.parseNorms(builder, name, propNode); iterator.remove(); } else if (propName.equals("eager_global_ordinals")) { builder.eagerGlobalOrdinals(XContentMapValues.nodeBooleanValue(propNode, "eager_global_ordinals")); @@ -256,8 +257,10 @@ public final class KeywordFieldMapper extends FieldMapper { public Query existsQuery(QueryShardContext context) { if (hasDocValues()) { return new DocValuesFieldExistsQuery(name()); - } else { + } else if (omitNorms()) { return new TermQuery(new Term(FieldNamesFieldMapper.NAME, name())); + } else { + return new NormsFieldExistsQuery(name()); } } @@ -366,17 +369,19 @@ public final class KeywordFieldMapper extends FieldMapper { // convert to utf8 only once before feeding postings/dv/stored fields final BytesRef binaryValue = new BytesRef(value); - if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { + if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { Field field = new Field(fieldType().name(), binaryValue, fieldType()); fields.add(field); + + if (fieldType().hasDocValues() == false && fieldType().omitNorms()) { + createFieldNamesField(context, fields); + } } + if (fieldType().hasDocValues()) { fields.add(new SortedSetDocValuesField(fieldType().name(), binaryValue)); - } else if (fieldType().stored() || fieldType().indexOptions() != IndexOptions.NONE) { - createFieldNamesField(context, fields); } } - @Override protected String contentType() { return CONTENT_TYPE; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 5f3f4a4de49..4a3fa852e7f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -35,6 +35,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.joda.DateMathParser; @@ -314,7 +315,13 @@ public abstract class MappedFieldType extends FieldType { /** Generates a query that will only match documents that contain the given value. * The default implementation returns a {@link TermQuery} over the value bytes, * boosted by {@link #boost()}. - * @throws IllegalArgumentException if {@code value} cannot be converted to the expected data type */ + * @throws IllegalArgumentException if {@code value} cannot be converted to the expected data type or if the field is not searchable + * due to the way it is configured (eg. not indexed) + * @throws ElasticsearchParseException if {@code value} cannot be converted to the expected data type + * @throws UnsupportedOperationException if the field is not searchable regardless of options + * @throws QueryShardException if the field is not searchable regardless of options + */ + // TODO: Standardize exception types public abstract Query termQuery(Object value, @Nullable QueryShardContext context); /** Build a constant-scoring query that matches all values. The default implementation uses a diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 9cd8ef1f6ac..5d0239f846a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -21,10 +21,12 @@ package org.elasticsearch.index.mapper; import com.carrotsearch.hppc.ObjectHashSet; import com.carrotsearch.hppc.cursors.ObjectCursor; + import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; import org.apache.lucene.index.Term; +import org.elasticsearch.Assertions; import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -192,8 +194,8 @@ public class MapperService extends AbstractIndexComponent implements Closeable { /** * Update mapping by only merging the metadata that is different between received and stored entries */ - public boolean updateMapping(IndexMetaData indexMetaData) throws IOException { - assert indexMetaData.getIndex().equals(index()) : "index mismatch: expected " + index() + " but was " + indexMetaData.getIndex(); + public boolean updateMapping(final IndexMetaData currentIndexMetaData, final IndexMetaData newIndexMetaData) throws IOException { + assert newIndexMetaData.getIndex().equals(index()) : "index mismatch: expected " + index() + " but was " + newIndexMetaData.getIndex(); // go over and add the relevant mappings (or update them) Set existingMappers = new HashSet<>(); if (mapper != null) { @@ -205,7 +207,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { final Map updatedEntries; try { // only update entries if needed - updatedEntries = internalMerge(indexMetaData, MergeReason.MAPPING_RECOVERY, true); + updatedEntries = internalMerge(newIndexMetaData, MergeReason.MAPPING_RECOVERY, true); } catch (Exception e) { logger.warn(() -> new ParameterizedMessage("[{}] failed to apply mappings", index()), e); throw e; @@ -213,9 +215,11 @@ public class MapperService extends AbstractIndexComponent implements Closeable { boolean requireRefresh = false; + assertMappingVersion(currentIndexMetaData, newIndexMetaData, updatedEntries); + for (DocumentMapper documentMapper : updatedEntries.values()) { String mappingType = documentMapper.type(); - CompressedXContent incomingMappingSource = indexMetaData.mapping(mappingType).source(); + CompressedXContent incomingMappingSource = newIndexMetaData.mapping(mappingType).source(); String op = existingMappers.contains(mappingType) ? "updated" : "added"; if (logger.isDebugEnabled() && incomingMappingSource.compressed().length < 512) { @@ -240,6 +244,45 @@ public class MapperService extends AbstractIndexComponent implements Closeable { return requireRefresh; } + private void assertMappingVersion( + final IndexMetaData currentIndexMetaData, + final IndexMetaData newIndexMetaData, + final Map updatedEntries) { + if (Assertions.ENABLED + && currentIndexMetaData != null + && currentIndexMetaData.getCreationVersion().onOrAfter(Version.V_6_5_0)) { + if (currentIndexMetaData.getMappingVersion() == newIndexMetaData.getMappingVersion()) { + // if the mapping version is unchanged, then there should not be any updates and all mappings should be the same + assert updatedEntries.isEmpty() : updatedEntries; + for (final ObjectCursor mapping : newIndexMetaData.getMappings().values()) { + final CompressedXContent currentSource = currentIndexMetaData.mapping(mapping.value.type()).source(); + final CompressedXContent newSource = mapping.value.source(); + assert currentSource.equals(newSource) : + "expected current mapping [" + currentSource + "] for type [" + mapping.value.type() + "] " + + "to be the same as new mapping [" + newSource + "]"; + } + } else { + // if the mapping version is changed, it should increase, there should be updates, and the mapping should be different + final long currentMappingVersion = currentIndexMetaData.getMappingVersion(); + final long newMappingVersion = newIndexMetaData.getMappingVersion(); + assert currentMappingVersion < newMappingVersion : + "expected current mapping version [" + currentMappingVersion + "] " + + "to be less than new mapping version [" + newMappingVersion + "]"; + assert updatedEntries.isEmpty() == false; + for (final DocumentMapper documentMapper : updatedEntries.values()) { + final MappingMetaData currentMapping = currentIndexMetaData.mapping(documentMapper.type()); + if (currentMapping != null) { + final CompressedXContent currentSource = currentMapping.source(); + final CompressedXContent newSource = documentMapper.mappingSource(); + assert currentSource.equals(newSource) == false : + "expected current mapping [" + currentSource + "] for type [" + documentMapper.type() + "] " + + "to be different than new mapping"; + } + } + } + } + } + public void merge(Map> mappings, MergeReason reason) { Map mappingSourcesCompressed = new LinkedHashMap<>(mappings.size()); for (Map.Entry> entry : mappings.entrySet()) { @@ -396,7 +439,8 @@ public class MapperService extends AbstractIndexComponent implements Closeable { List objectMappers = new ArrayList<>(); List fieldMappers = new ArrayList<>(); List fieldAliasMappers = new ArrayList<>(); - Collections.addAll(fieldMappers, newMapper.mapping().metadataMappers); + MetadataFieldMapper[] metadataMappers = newMapper.mapping().metadataMappers; + Collections.addAll(fieldMappers, metadataMappers); MapperUtils.collect(newMapper.mapping().root(), objectMappers, fieldMappers, fieldAliasMappers); MapperMergeValidator.validateMapperStructure(newMapper.type(), objectMappers, fieldMappers, @@ -430,7 +474,8 @@ public class MapperService extends AbstractIndexComponent implements Closeable { // the master node restoring mappings from disk or data nodes // deserializing cluster state that was sent by the master node, // this check will be skipped. - checkTotalFieldsLimit(objectMappers.size() + fieldMappers.size() + fieldAliasMappers.size()); + // Also, don't take metadata mappers into account for the field limit check + checkTotalFieldsLimit(objectMappers.size() + fieldMappers.size() - metadataMappers.length + fieldAliasMappers.size() ); } results.put(newMapper.type(), newMapper); @@ -468,11 +513,11 @@ public class MapperService extends AbstractIndexComponent implements Closeable { // commit the change if (defaultMappingSource != null) { this.defaultMappingSource = defaultMappingSource; + this.defaultMapper = defaultMapper; } if (newMapper != null) { this.mapper = newMapper; } - this.defaultMapper = defaultMapper; this.fieldTypes = fieldTypes; this.hasNested = hasNested; this.fullPathObjectMappers = fullPathObjectMappers; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ParseContext.java b/server/src/main/java/org/elasticsearch/index/mapper/ParseContext.java index b77ffee05ca..cf8cc4022fd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ParseContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ParseContext.java @@ -24,9 +24,8 @@ import com.carrotsearch.hppc.ObjectObjectMap; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.IndexSettings; import java.util.ArrayList; import java.util.Collection; @@ -196,7 +195,7 @@ public abstract class ParseContext implements Iterable{ } @Override - public Settings indexSettings() { + public IndexSettings indexSettings() { return in.indexSettings(); } @@ -315,8 +314,7 @@ public abstract class ParseContext implements Iterable{ private final List documents; - @Nullable - private final Settings indexSettings; + private final IndexSettings indexSettings; private final SourceToParse sourceToParse; @@ -334,8 +332,8 @@ public abstract class ParseContext implements Iterable{ private final Set ignoredFields = new HashSet<>(); - public InternalParseContext(@Nullable Settings indexSettings, DocumentMapperParser docMapperParser, DocumentMapper docMapper, - SourceToParse source, XContentParser parser) { + public InternalParseContext(IndexSettings indexSettings, DocumentMapperParser docMapperParser, DocumentMapper docMapper, + SourceToParse source, XContentParser parser) { this.indexSettings = indexSettings; this.docMapper = docMapper; this.docMapperParser = docMapperParser; @@ -347,7 +345,7 @@ public abstract class ParseContext implements Iterable{ this.version = null; this.sourceToParse = source; this.dynamicMappers = new ArrayList<>(); - this.maxAllowedNumNestedDocs = MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING.get(indexSettings); + this.maxAllowedNumNestedDocs = indexSettings.getValue(MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING); this.numNestedDocs = 0L; } @@ -357,8 +355,7 @@ public abstract class ParseContext implements Iterable{ } @Override - @Nullable - public Settings indexSettings() { + public IndexSettings indexSettings() { return this.indexSettings; } @@ -565,8 +562,7 @@ public abstract class ParseContext implements Iterable{ return false; } - @Nullable - public abstract Settings indexSettings(); + public abstract IndexSettings indexSettings(); public abstract SourceToParse sourceToParse(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java b/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java index 414cb3a98ec..d2cf17ddd35 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java @@ -83,6 +83,17 @@ public class ParsedDocument { this.seqID.primaryTerm.setLongValue(primaryTerm); } + /** + * Makes the processing document as a tombstone document rather than a regular document. + * Tombstone documents are stored in Lucene index to represent delete operations or Noops. + */ + ParsedDocument toTombstone() { + assert docs().size() == 1 : "Tombstone should have a single doc [" + docs() + "]"; + this.seqID.tombstoneField.setLongValue(1); + rootDoc().add(this.seqID.tombstoneField); + return this; + } + public String routing() { return this.routing; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java index 76753496f46..5411c4604ac 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java @@ -158,11 +158,10 @@ public class RoutingFieldMapper extends MetadataFieldMapper { } @Override - public Mapper parse(ParseContext context) throws IOException { + public void parse(ParseContext context) throws IOException { // no need ot parse here, we either get the routing in the sourceToParse // or we don't have routing, if we get it in sourceToParse, we process it in preParse // which will always be called - return null; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java index ac3ffe46272..8c032402b50 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java @@ -69,26 +69,29 @@ public class SeqNoFieldMapper extends MetadataFieldMapper { public final Field seqNo; public final Field seqNoDocValue; public final Field primaryTerm; + public final Field tombstoneField; - public SequenceIDFields(Field seqNo, Field seqNoDocValue, Field primaryTerm) { + public SequenceIDFields(Field seqNo, Field seqNoDocValue, Field primaryTerm, Field tombstoneField) { Objects.requireNonNull(seqNo, "sequence number field cannot be null"); Objects.requireNonNull(seqNoDocValue, "sequence number dv field cannot be null"); Objects.requireNonNull(primaryTerm, "primary term field cannot be null"); this.seqNo = seqNo; this.seqNoDocValue = seqNoDocValue; this.primaryTerm = primaryTerm; + this.tombstoneField = tombstoneField; } public static SequenceIDFields emptySeqID() { return new SequenceIDFields(new LongPoint(NAME, SequenceNumbers.UNASSIGNED_SEQ_NO), new NumericDocValuesField(NAME, SequenceNumbers.UNASSIGNED_SEQ_NO), - new NumericDocValuesField(PRIMARY_TERM_NAME, 0)); + new NumericDocValuesField(PRIMARY_TERM_NAME, 0), new NumericDocValuesField(TOMBSTONE_NAME, 0)); } } public static final String NAME = "_seq_no"; public static final String CONTENT_TYPE = "_seq_no"; public static final String PRIMARY_TERM_NAME = "_primary_term"; + public static final String TOMBSTONE_NAME = "_tombstone"; public static class SeqNoDefaults { public static final String NAME = SeqNoFieldMapper.NAME; @@ -239,9 +242,8 @@ public class SeqNoFieldMapper extends MetadataFieldMapper { } @Override - public Mapper parse(ParseContext context) throws IOException { + public void parse(ParseContext context) throws IOException { // fields are added in parseCreateField - return null; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index f2090613c09..3c7c0dd290a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.StoredField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; @@ -49,6 +50,7 @@ import java.util.function.Function; public class SourceFieldMapper extends MetadataFieldMapper { public static final String NAME = "_source"; + public static final String RECOVERY_SOURCE_NAME = "_recovery_source"; public static final String CONTENT_TYPE = "_source"; private final Function, Map> filter; @@ -217,14 +219,14 @@ public class SourceFieldMapper extends MetadataFieldMapper { } @Override - public Mapper parse(ParseContext context) throws IOException { + public void parse(ParseContext context) throws IOException { // nothing to do here, we will call it in pre parse - return null; } @Override protected void parseCreateField(ParseContext context, List fields) throws IOException { - BytesReference source = context.sourceToParse().source(); + BytesReference originalSource = context.sourceToParse().source(); + BytesReference source = originalSource; if (enabled && fieldType().stored() && source != null) { // Percolate and tv APIs may not set the source and that is ok, because these APIs will not index any data if (filter != null) { @@ -240,8 +242,17 @@ public class SourceFieldMapper extends MetadataFieldMapper { } BytesRef ref = source.toBytesRef(); fields.add(new StoredField(fieldType().name(), ref.bytes, ref.offset, ref.length)); + } else { + source = null; } - } + + if (originalSource != null && source != originalSource && context.indexSettings().isSoftDeleteEnabled()) { + // if we omitted source or modified it we add the _recovery_source to ensure we have it for ops based recovery + BytesRef ref = originalSource.toBytesRef(); + fields.add(new StoredField(RECOVERY_SOURCE_NAME, ref.bytes, ref.offset, ref.length)); + fields.add(new NumericDocValuesField(RECOVERY_SOURCE_NAME, 1)); + } + } @Override protected String contentType() { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 29f1cbb721f..f7bcab21d72 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -296,7 +296,7 @@ public class TextFieldMapper extends FieldMapper { @Override protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { - TokenFilter filter = new EdgeNGramTokenFilter(components.getTokenStream(), minChars, maxChars); + TokenFilter filter = new EdgeNGramTokenFilter(components.getTokenStream(), minChars, maxChars, false); return new TokenStreamComponents(components.getTokenizer(), filter); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java index 71bd2e93d30..162ce2a3fde 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java @@ -25,7 +25,7 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; -import org.apache.lucene.index.TermContext; +import org.apache.lucene.index.TermStates; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; @@ -216,7 +216,7 @@ public class TypeFieldMapper extends MetadataFieldMapper { for (BytesRef type : types) { if (uniqueTypes.add(type)) { Term term = new Term(CONTENT_TYPE, type); - TermContext context = TermContext.build(reader.getContext(), term); + TermStates context = TermStates.build(reader.getContext(), term, true); if (context.docFreq() == 0) { // this _type is not present in the reader continue; @@ -287,9 +287,8 @@ public class TypeFieldMapper extends MetadataFieldMapper { } @Override - public Mapper parse(ParseContext context) throws IOException { + public void parse(ParseContext context) throws IOException { // we parse in pre parse - return null; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java index a6a5fab0d04..a43aed3b08d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java @@ -122,8 +122,7 @@ public class TypeParsers { } } - public static void parseNorms(FieldMapper.Builder builder, String fieldName, Object propNode, - Mapper.TypeParser.ParserContext parserContext) { + public static void parseNorms(FieldMapper.Builder builder, String fieldName, Object propNode) { builder.omitNorms(XContentMapValues.nodeBooleanValue(propNode, fieldName + ".norms") == false); } @@ -140,7 +139,7 @@ public class TypeParsers { final String propName = entry.getKey(); final Object propNode = entry.getValue(); if ("norms".equals(propName)) { - parseNorms(builder, name, propNode, parserContext); + parseNorms(builder, name, propNode); iterator.remove(); } } @@ -265,7 +264,10 @@ public class TypeParsers { } public static FormatDateTimeFormatter parseDateTimeFormatter(Object node) { - return Joda.forPattern(node.toString()); + if (node instanceof String) { + return Joda.forPattern((String) node); + } + throw new IllegalArgumentException("Invalid format: [" + node.toString() + "]: expected string value"); } public static void parseTermVector(String fieldName, String termVector, FieldMapper.Builder builder) throws MapperParsingException { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java index ef3c63f4889..6afac0fcf81 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java @@ -117,9 +117,8 @@ public class VersionFieldMapper extends MetadataFieldMapper { } @Override - public Mapper parse(ParseContext context) throws IOException { + public void parse(ParseContext context) throws IOException { // _version added in preparse - return null; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java index 35b0d18b1e8..f3e6f6c8061 100644 --- a/server/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query; -import org.apache.lucene.queries.BoostingQuery; +import org.apache.lucene.queries.function.FunctionScoreQuery; import org.apache.lucene.search.Query; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; @@ -201,7 +201,7 @@ public class BoostingQueryBuilder extends AbstractQueryBuilder= " + Version.V_5_5_0.toString()); - } out.writeOptionalString(name); out.writeBoolean(ignoreUnmapped); out.writeVInt(from); @@ -252,84 +235,6 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject { } } - /** - * BWC serialization for nested {@link InnerHitBuilder}. - * Should only be used to send nested inner hits to nodes pre 5.5. - */ - protected void writeToNestedBWC(StreamOutput out, QueryBuilder query, String nestedPath) throws IOException { - assert out.getVersion().before(Version.V_5_5_0) : - "invalid output version, must be < " + Version.V_5_5_0.toString(); - writeToBWC(out, query, nestedPath, null); - } - - /** - * BWC serialization for collapsing {@link InnerHitBuilder}. - * Should only be used to send collapsing inner hits to nodes pre 5.5. - */ - public void writeToCollapseBWC(StreamOutput out) throws IOException { - assert out.getVersion().before(Version.V_5_5_0) : - "invalid output version, must be < " + Version.V_5_5_0.toString(); - writeToBWC(out, new MatchAllQueryBuilder(), null, null); - } - - /** - * BWC serialization for parent/child {@link InnerHitBuilder}. - * Should only be used to send hasParent or hasChild inner hits to nodes pre 5.5. - */ - public void writeToParentChildBWC(StreamOutput out, QueryBuilder query, String parentChildPath) throws IOException { - assert(out.getVersion().before(Version.V_5_5_0)) : - "invalid output version, must be < " + Version.V_5_5_0.toString(); - writeToBWC(out, query, null, parentChildPath); - } - - private void writeToBWC(StreamOutput out, - QueryBuilder query, - String nestedPath, - String parentChildPath) throws IOException { - out.writeOptionalString(name); - if (nestedPath != null) { - out.writeOptionalString(nestedPath); - out.writeOptionalString(null); - } else { - out.writeOptionalString(null); - out.writeOptionalString(parentChildPath); - } - if (out.getVersion().onOrAfter(Version.V_5_2_0)) { - out.writeBoolean(ignoreUnmapped); - } - out.writeVInt(from); - out.writeVInt(size); - out.writeBoolean(explain); - out.writeBoolean(version); - out.writeBoolean(trackScores); - out.writeOptionalWriteable(storedFieldsContext); - out.writeGenericValue(docValueFields == null - ? null - : docValueFields.stream().map(ff -> ff.field).collect(Collectors.toList())); - boolean hasScriptFields = scriptFields != null; - out.writeBoolean(hasScriptFields); - if (hasScriptFields) { - out.writeVInt(scriptFields.size()); - Iterator iterator = scriptFields.stream() - .sorted(Comparator.comparing(ScriptField::fieldName)).iterator(); - while (iterator.hasNext()) { - iterator.next().writeTo(out); - } - } - out.writeOptionalWriteable(fetchSourceContext); - boolean hasSorts = sorts != null; - out.writeBoolean(hasSorts); - if (hasSorts) { - out.writeVInt(sorts.size()); - for (SortBuilder sort : sorts) { - out.writeNamedWriteable(sort); - } - } - out.writeOptionalWriteable(highlightBuilder); - out.writeNamedWriteable(query); - out.writeBoolean(false); - } - public String getName() { return name; } diff --git a/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java index 0de474f8b99..950c9e052ad 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.termvectors.MultiTermVectorsItemResponse; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; @@ -47,7 +46,6 @@ import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; @@ -220,11 +218,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder out.writeString(path); out.writeVInt(scoreMode.ordinal()); out.writeNamedWriteable(query); - if (out.getVersion().before(Version.V_5_5_0)) { - final boolean hasInnerHit = innerHitBuilder != null; - out.writeBoolean(hasInnerHit); - if (hasInnerHit) { - innerHitBuilder.writeToNestedBWC(out, query, path); - } - } else { - out.writeOptionalWriteable(innerHitBuilder); - } + out.writeOptionalWriteable(innerHitBuilder); out.writeBoolean(ignoreUnmapped); } @@ -374,9 +369,9 @@ public class NestedQueryBuilder extends AbstractQueryBuilder } @Override - public TopDocs[] topDocs(SearchHit[] hits) throws IOException { + public TopDocsAndMaxScore[] topDocs(SearchHit[] hits) throws IOException { Weight innerHitQueryWeight = createInnerHitQueryWeight(); - TopDocs[] result = new TopDocs[hits.length]; + TopDocsAndMaxScore[] result = new TopDocsAndMaxScore[hits.length]; for (int i = 0; i < hits.length; i++) { SearchHit hit = hits[i]; Query rawParentFilter; @@ -394,25 +389,38 @@ public class NestedQueryBuilder extends AbstractQueryBuilder Query childFilter = childObjectMapper.nestedTypeFilter(); BitSetProducer parentFilter = context.bitsetFilterCache().getBitSetProducer(rawParentFilter); Query q = new ParentChildrenBlockJoinQuery(parentFilter, childFilter, parentDocId); - Weight weight = context.searcher().createNormalizedWeight(q, false); + Weight weight = context.searcher().createWeight(context.searcher().rewrite(q), + org.apache.lucene.search.ScoreMode.COMPLETE_NO_SCORES, 1f); if (size() == 0) { TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); intersect(weight, innerHitQueryWeight, totalHitCountCollector, ctx); - result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0); + result[i] = new TopDocsAndMaxScore(new TopDocs(new TotalHits(totalHitCountCollector.getTotalHits(), + TotalHits.Relation.EQUAL_TO), Lucene.EMPTY_SCORE_DOCS), Float.NaN); } else { int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc()); TopDocsCollector topDocsCollector; + MaxScoreCollector maxScoreCollector = null; if (sort() != null) { - topDocsCollector = TopFieldCollector.create(sort().sort, topN, true, trackScores(), trackScores(), true); + topDocsCollector = TopFieldCollector.create(sort().sort, topN, Integer.MAX_VALUE); + if (trackScores()) { + maxScoreCollector = new MaxScoreCollector(); + } } else { - topDocsCollector = TopScoreDocCollector.create(topN); + topDocsCollector = TopScoreDocCollector.create(topN, Integer.MAX_VALUE); + maxScoreCollector = new MaxScoreCollector(); } try { - intersect(weight, innerHitQueryWeight, topDocsCollector, ctx); + intersect(weight, innerHitQueryWeight, MultiCollector.wrap(topDocsCollector, maxScoreCollector), ctx); } finally { clearReleasables(Lifetime.COLLECTION); } - result[i] = topDocsCollector.topDocs(from(), size()); + + TopDocs td = topDocsCollector.topDocs(from(), size()); + float maxScore = Float.NaN; + if (maxScoreCollector != null) { + maxScore = maxScoreCollector.getMaxScore(); + } + result[i] = new TopDocsAndMaxScore(td, maxScore); } } return result; diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java index e9d53d8e829..0289ce6f6ae 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java @@ -175,9 +175,6 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder i if (formatString != null) { format = Joda.forPattern(formatString); } - if (in.getVersion().onOrAfter(Version.V_5_2_0)) { - String relationString = in.readOptionalString(); - if (relationString != null) { - relation = ShapeRelation.getRelationByName(relationString); - if (relation != null && !isRelationAllowed(relation)) { - throw new IllegalArgumentException( - "[range] query does not support relation [" + relationString + "]"); - } + String relationString = in.readOptionalString(); + if (relationString != null) { + relation = ShapeRelation.getRelationByName(relationString); + if (relation != null && !isRelationAllowed(relation)) { + throw new IllegalArgumentException( + "[range] query does not support relation [" + relationString + "]"); } } } @@ -139,13 +136,11 @@ public class RangeQueryBuilder extends AbstractQueryBuilder i formatString = this.format.format(); } out.writeOptionalString(formatString); - if (out.getVersion().onOrAfter(Version.V_5_2_0)) { - String relationString = null; - if (this.relation != null) { - relationString = this.relation.getRelationName(); - } - out.writeOptionalString(relationString); + String relationString = null; + if (this.relation != null) { + relationString = this.relation.getRelationName(); } + out.writeOptionalString(relationString); } /** diff --git a/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java index c08f342d508..50586aa2522 100644 --- a/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java @@ -25,6 +25,7 @@ import org.apache.lucene.search.ConstantScoreWeight; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; @@ -169,7 +170,7 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { return new ConstantScoreWeight(this, boost) { @Override diff --git a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java index 46a958b58fe..473aa636caa 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java @@ -168,27 +168,11 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder topLevel, Term term, int docCount, float boost, TermContext states) { + protected void addClause(List topLevel, Term term, int docCount, float boost, TermStates states) { SpanTermQuery q = new SpanTermQuery(term, states); topLevel.add(q); } diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java index 54c25b40501..7d6dd4a59cb 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java @@ -554,7 +554,7 @@ public abstract class DecayFunctionBuilder } double value = distance.doubleValue(); return Explanation.match( - (float) score(docId, subQueryScore.getValue()), + (float) score(docId, subQueryScore.getValue().floatValue()), "Function for field " + getFieldName() + ":", func.explainFunction(getDistanceString(ctx, docId), value, scale)); } diff --git a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java index 8536337bfdb..b2e6f98f126 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java @@ -44,8 +44,8 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes; public abstract class AbstractBulkByScrollRequest> extends ActionRequest { public static final int SIZE_ALL_MATCHES = -1; - static final TimeValue DEFAULT_SCROLL_TIMEOUT = timeValueMinutes(5); - static final int DEFAULT_SCROLL_SIZE = 1000; + public static final TimeValue DEFAULT_SCROLL_TIMEOUT = timeValueMinutes(5); + public static final int DEFAULT_SCROLL_SIZE = 1000; public static final int AUTO_SLICES = 0; public static final String AUTO_SLICES_VALUE = "auto"; @@ -252,6 +252,14 @@ public abstract class AbstractBulkByScrollRequest searchFailures; private boolean timedOut; + private static final String TOOK_FIELD = "took"; + private static final String TIMED_OUT_FIELD = "timed_out"; + private static final String FAILURES_FIELD = "failures"; + + @SuppressWarnings("unchecked") + private static final ObjectParser PARSER = + new ObjectParser<>( + "bulk_by_scroll_response", + true, + BulkByScrollResponseBuilder::new + ); + static { + PARSER.declareLong(BulkByScrollResponseBuilder::setTook, new ParseField(TOOK_FIELD)); + PARSER.declareBoolean(BulkByScrollResponseBuilder::setTimedOut, new ParseField(TIMED_OUT_FIELD)); + PARSER.declareObjectArray( + BulkByScrollResponseBuilder::setFailures, (p, c) -> parseFailure(p), new ParseField(FAILURES_FIELD) + ); + // since the result of BulkByScrollResponse.Status are mixed we also parse that in this + Status.declareFields(PARSER); + } + public BulkByScrollResponse() { } @@ -87,6 +118,10 @@ public class BulkByScrollResponse extends ActionResponse implements ToXContentFr return status.getCreated(); } + public long getTotal() { + return status.getTotal(); + } + public long getDeleted() { return status.getDeleted(); } @@ -171,8 +206,8 @@ public class BulkByScrollResponse extends ActionResponse implements ToXContentFr @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field("took", took.millis()); - builder.field("timed_out", timedOut); + builder.field(TOOK_FIELD, took.millis()); + builder.field(TIMED_OUT_FIELD, timedOut); status.innerXContent(builder, params); builder.startArray("failures"); for (Failure failure: bulkFailures) { @@ -187,6 +222,80 @@ public class BulkByScrollResponse extends ActionResponse implements ToXContentFr return builder; } + public static BulkByScrollResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null).buildResponse(); + } + + private static Object parseFailure(XContentParser parser) throws IOException { + ensureExpectedToken(Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation); + Token token; + String index = null; + String type = null; + String id = null; + Integer status = null; + Integer shardId = null; + String nodeId = null; + ElasticsearchException bulkExc = null; + ElasticsearchException searchExc = null; + while ((token = parser.nextToken()) != Token.END_OBJECT) { + ensureExpectedToken(Token.FIELD_NAME, token, parser::getTokenLocation); + String name = parser.currentName(); + token = parser.nextToken(); + if (token == Token.START_ARRAY) { + parser.skipChildren(); + } else if (token == Token.START_OBJECT) { + switch (name) { + case SearchFailure.REASON_FIELD: + bulkExc = ElasticsearchException.fromXContent(parser); + break; + case Failure.CAUSE_FIELD: + searchExc = ElasticsearchException.fromXContent(parser); + break; + default: + parser.skipChildren(); + } + } else if (token == Token.VALUE_STRING) { + switch (name) { + // This field is the same as SearchFailure.index + case Failure.INDEX_FIELD: + index = parser.text(); + break; + case Failure.TYPE_FIELD: + type = parser.text(); + break; + case Failure.ID_FIELD: + id = parser.text(); + break; + case SearchFailure.NODE_FIELD: + nodeId = parser.text(); + break; + default: + // Do nothing + break; + } + } else if (token == Token.VALUE_NUMBER) { + switch (name) { + case Failure.STATUS_FIELD: + status = parser.intValue(); + break; + case SearchFailure.SHARD_FIELD: + shardId = parser.intValue(); + break; + default: + // Do nothing + break; + } + } + } + if (bulkExc != null) { + return new Failure(index, type, id, bulkExc, RestStatus.fromCode(status)); + } else if (searchExc != null) { + return new SearchFailure(searchExc, index, shardId, nodeId); + } else { + throw new ElasticsearchParseException("failed to parse failures array. At least one of {reason,cause} must be present"); + } + } + @Override public String toString() { StringBuilder builder = new StringBuilder(); diff --git a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponseBuilder.java b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponseBuilder.java new file mode 100644 index 00000000000..ad5bfd6e03c --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponseBuilder.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.action.bulk.BulkItemResponse.Failure; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; +import org.elasticsearch.index.reindex.BulkByScrollTask.StatusBuilder; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * Helps build a {@link BulkByScrollResponse}. Used by an instance of {@link ObjectParser} when parsing from XContent. + */ +class BulkByScrollResponseBuilder extends StatusBuilder { + private TimeValue took; + private BulkByScrollTask.Status status; + private List bulkFailures = new ArrayList<>(); + private List searchFailures = new ArrayList<>(); + private boolean timedOut; + + BulkByScrollResponseBuilder() {} + + public void setTook(long took) { + setTook(new TimeValue(took, TimeUnit.MILLISECONDS)); + } + + public void setTook(TimeValue took) { + this.took = took; + } + + public void setStatus(BulkByScrollTask.Status status) { + this.status = status; + } + + public void setFailures(List failures) { + if (failures != null) { + for (Object object: failures) { + if (object instanceof Failure) { + bulkFailures.add((Failure) object); + } else if (object instanceof SearchFailure) { + searchFailures.add((SearchFailure) object); + } + } + } + } + + public void setTimedOut(boolean timedOut) { + this.timedOut = timedOut; + } + + public BulkByScrollResponse buildResponse() { + status = super.buildStatus(); + return new BulkByScrollResponse(took, status, bulkFailures, searchFailures, timedOut); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java index 9ff26b13212..7aa2c8a1b75 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java @@ -20,29 +20,41 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; +import java.util.concurrent.TimeUnit; import static java.lang.Math.min; import static java.util.Collections.emptyList; import static org.elasticsearch.common.unit.TimeValue.timeValueNanos; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; /** * Task storing information about a currently running BulkByScroll request. @@ -189,6 +201,124 @@ public class BulkByScrollTask extends CancellableTask { return true; } + /** + * This class acts as a builder for {@link Status}. Once the {@link Status} object is built by calling + * {@link #buildStatus()} it is immutable. Used by an instance of {@link ObjectParser} when parsing from + * XContent. + */ + public static class StatusBuilder { + private Integer sliceId = null; + private Long total = null; + private long updated = 0; // Not present during deleteByQuery + private long created = 0; // Not present during updateByQuery + private Long deleted = null; + private Integer batches = null; + private Long versionConflicts = null; + private Long noops = null; + private Long bulkRetries = null; + private Long searchRetries = null; + private TimeValue throttled = null; + private Float requestsPerSecond = null; + private String reasonCancelled = null; + private TimeValue throttledUntil = null; + private List sliceStatuses = new ArrayList<>(); + + public void setSliceId(Integer sliceId) { + this.sliceId = sliceId; + } + + public void setTotal(Long total) { + this.total = total; + } + + public void setUpdated(Long updated) { + this.updated = updated; + } + + public void setCreated(Long created) { + this.created = created; + } + + public void setDeleted(Long deleted) { + this.deleted = deleted; + } + + public void setBatches(Integer batches) { + this.batches = batches; + } + + public void setVersionConflicts(Long versionConflicts) { + this.versionConflicts = versionConflicts; + } + + public void setNoops(Long noops) { + this.noops = noops; + } + + public void setRetries(Tuple retries) { + if (retries != null) { + setBulkRetries(retries.v1()); + setSearchRetries(retries.v2()); + } + } + + public void setBulkRetries(Long bulkRetries) { + this.bulkRetries = bulkRetries; + } + + public void setSearchRetries(Long searchRetries) { + this.searchRetries = searchRetries; + } + + public void setThrottled(Long throttled) { + if (throttled != null) { + this.throttled = new TimeValue(throttled, TimeUnit.MILLISECONDS); + } + } + + public void setRequestsPerSecond(Float requestsPerSecond) { + if (requestsPerSecond != null) { + requestsPerSecond = requestsPerSecond == -1 ? Float.POSITIVE_INFINITY : requestsPerSecond; + this.requestsPerSecond = requestsPerSecond; + } + } + + public void setReasonCancelled(String reasonCancelled) { + this.reasonCancelled = reasonCancelled; + } + + public void setThrottledUntil(Long throttledUntil) { + if (throttledUntil != null) { + this.throttledUntil = new TimeValue(throttledUntil, TimeUnit.MILLISECONDS); + } + } + + public void setSliceStatuses(List sliceStatuses) { + if (sliceStatuses != null) { + this.sliceStatuses.addAll(sliceStatuses); + } + } + + public void addToSliceStatuses(StatusOrException statusOrException) { + this.sliceStatuses.add(statusOrException); + } + + public Status buildStatus() { + if (sliceStatuses.isEmpty()) { + try { + return new Status( + sliceId, total, updated, created, deleted, batches, versionConflicts, noops, bulkRetries, + searchRetries, throttled, requestsPerSecond, reasonCancelled, throttledUntil + ); + } catch (NullPointerException npe) { + throw new IllegalArgumentException("a required field is null when building Status"); + } + } else { + return new Status(sliceStatuses, reasonCancelled); + } + } + } + public static class Status implements Task.Status, SuccessfullyProcessed { public static final String NAME = "bulk-by-scroll"; @@ -204,6 +334,76 @@ public class BulkByScrollTask extends CancellableTask { */ public static final String INCLUDE_UPDATED = "include_updated"; + public static final String SLICE_ID_FIELD = "slice_id"; + public static final String TOTAL_FIELD = "total"; + public static final String UPDATED_FIELD = "updated"; + public static final String CREATED_FIELD = "created"; + public static final String DELETED_FIELD = "deleted"; + public static final String BATCHES_FIELD = "batches"; + public static final String VERSION_CONFLICTS_FIELD = "version_conflicts"; + public static final String NOOPS_FIELD = "noops"; + public static final String RETRIES_FIELD = "retries"; + public static final String RETRIES_BULK_FIELD = "bulk"; + public static final String RETRIES_SEARCH_FIELD = "search"; + public static final String THROTTLED_RAW_FIELD = "throttled_millis"; + public static final String THROTTLED_HR_FIELD = "throttled"; + public static final String REQUESTS_PER_SEC_FIELD = "requests_per_second"; + public static final String CANCELED_FIELD = "canceled"; + public static final String THROTTLED_UNTIL_RAW_FIELD = "throttled_until_millis"; + public static final String THROTTLED_UNTIL_HR_FIELD = "throttled_until"; + public static final String SLICES_FIELD = "slices"; + + public static Set FIELDS_SET = new HashSet<>(); + static { + FIELDS_SET.add(SLICE_ID_FIELD); + FIELDS_SET.add(TOTAL_FIELD); + FIELDS_SET.add(UPDATED_FIELD); + FIELDS_SET.add(CREATED_FIELD); + FIELDS_SET.add(DELETED_FIELD); + FIELDS_SET.add(BATCHES_FIELD); + FIELDS_SET.add(VERSION_CONFLICTS_FIELD); + FIELDS_SET.add(NOOPS_FIELD); + FIELDS_SET.add(RETRIES_FIELD); + // No need for inner level fields for retries in the set of outer level fields + FIELDS_SET.add(THROTTLED_RAW_FIELD); + FIELDS_SET.add(THROTTLED_HR_FIELD); + FIELDS_SET.add(REQUESTS_PER_SEC_FIELD); + FIELDS_SET.add(CANCELED_FIELD); + FIELDS_SET.add(THROTTLED_UNTIL_RAW_FIELD); + FIELDS_SET.add(THROTTLED_UNTIL_HR_FIELD); + FIELDS_SET.add(SLICES_FIELD); + } + + @SuppressWarnings("unchecked") + static ConstructingObjectParser, Void> RETRIES_PARSER = new ConstructingObjectParser<>( + "bulk_by_scroll_task_status_retries", + true, + a -> new Tuple(a[0], a[1]) + ); + static { + RETRIES_PARSER.declareLong(constructorArg(), new ParseField(RETRIES_BULK_FIELD)); + RETRIES_PARSER.declareLong(constructorArg(), new ParseField(RETRIES_SEARCH_FIELD)); + } + + public static void declareFields(ObjectParser parser) { + parser.declareInt(StatusBuilder::setSliceId, new ParseField(SLICE_ID_FIELD)); + parser.declareLong(StatusBuilder::setTotal, new ParseField(TOTAL_FIELD)); + parser.declareLong(StatusBuilder::setUpdated, new ParseField(UPDATED_FIELD)); + parser.declareLong(StatusBuilder::setCreated, new ParseField(CREATED_FIELD)); + parser.declareLong(StatusBuilder::setDeleted, new ParseField(DELETED_FIELD)); + parser.declareInt(StatusBuilder::setBatches, new ParseField(BATCHES_FIELD)); + parser.declareLong(StatusBuilder::setVersionConflicts, new ParseField(VERSION_CONFLICTS_FIELD)); + parser.declareLong(StatusBuilder::setNoops, new ParseField(NOOPS_FIELD)); + parser.declareObject(StatusBuilder::setRetries, RETRIES_PARSER, new ParseField(RETRIES_FIELD)); + parser.declareLong(StatusBuilder::setThrottled, new ParseField(THROTTLED_RAW_FIELD)); + parser.declareFloat(StatusBuilder::setRequestsPerSecond, new ParseField(REQUESTS_PER_SEC_FIELD)); + parser.declareString(StatusBuilder::setReasonCancelled, new ParseField(CANCELED_FIELD)); + parser.declareLong(StatusBuilder::setThrottledUntil, new ParseField(THROTTLED_UNTIL_RAW_FIELD)); + parser.declareObjectArray( + StatusBuilder::setSliceStatuses, (p, c) -> StatusOrException.fromXContent(p), new ParseField(SLICES_FIELD) + ); + } + private final Integer sliceId; private final long total; private final long updated; @@ -303,11 +503,7 @@ public class BulkByScrollTask extends CancellableTask { } public Status(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_5_1_1)) { - sliceId = in.readOptionalVInt(); - } else { - sliceId = null; - } + sliceId = in.readOptionalVInt(); total = in.readVLong(); updated = in.readVLong(); created = in.readVLong(); @@ -321,18 +517,12 @@ public class BulkByScrollTask extends CancellableTask { requestsPerSecond = in.readFloat(); reasonCancelled = in.readOptionalString(); throttledUntil = in.readTimeValue(); - if (in.getVersion().onOrAfter(Version.V_5_1_1)) { - sliceStatuses = in.readList(stream -> stream.readOptionalWriteable(StatusOrException::new)); - } else { - sliceStatuses = emptyList(); - } + sliceStatuses = in.readList(stream -> stream.readOptionalWriteable(StatusOrException::new)); } @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_5_1_1)) { - out.writeOptionalVInt(sliceId); - } + out.writeOptionalVInt(sliceId); out.writeVLong(total); out.writeVLong(updated); out.writeVLong(created); @@ -346,11 +536,9 @@ public class BulkByScrollTask extends CancellableTask { out.writeFloat(requestsPerSecond); out.writeOptionalString(reasonCancelled); out.writeTimeValue(throttledUntil); - if (out.getVersion().onOrAfter(Version.V_5_1_1)) { - out.writeVInt(sliceStatuses.size()); - for (StatusOrException sliceStatus : sliceStatuses) { - out.writeOptionalWriteable(sliceStatus); - } + out.writeVInt(sliceStatuses.size()); + for (StatusOrException sliceStatus : sliceStatuses) { + out.writeOptionalWriteable(sliceStatus); } } @@ -366,35 +554,40 @@ public class BulkByScrollTask extends CancellableTask { return builder.endObject(); } + /** + * We need to write a manual parser for this because of {@link StatusOrException}. Since + * {@link StatusOrException#fromXContent(XContentParser)} tries to peek at a field first before deciding + * what needs to be it cannot use an {@link ObjectParser}. + */ public XContentBuilder innerXContent(XContentBuilder builder, Params params) throws IOException { if (sliceId != null) { - builder.field("slice_id", sliceId); + builder.field(SLICE_ID_FIELD, sliceId); } - builder.field("total", total); + builder.field(TOTAL_FIELD, total); if (params.paramAsBoolean(INCLUDE_UPDATED, true)) { - builder.field("updated", updated); + builder.field(UPDATED_FIELD, updated); } if (params.paramAsBoolean(INCLUDE_CREATED, true)) { - builder.field("created", created); + builder.field(CREATED_FIELD, created); } - builder.field("deleted", deleted); - builder.field("batches", batches); - builder.field("version_conflicts", versionConflicts); - builder.field("noops", noops); - builder.startObject("retries"); { - builder.field("bulk", bulkRetries); - builder.field("search", searchRetries); + builder.field(DELETED_FIELD, deleted); + builder.field(BATCHES_FIELD, batches); + builder.field(VERSION_CONFLICTS_FIELD, versionConflicts); + builder.field(NOOPS_FIELD, noops); + builder.startObject(RETRIES_FIELD); { + builder.field(RETRIES_BULK_FIELD, bulkRetries); + builder.field(RETRIES_SEARCH_FIELD, searchRetries); } builder.endObject(); - builder.humanReadableField("throttled_millis", "throttled", throttled); - builder.field("requests_per_second", requestsPerSecond == Float.POSITIVE_INFINITY ? -1 : requestsPerSecond); + builder.humanReadableField(THROTTLED_RAW_FIELD, THROTTLED_HR_FIELD, throttled); + builder.field(REQUESTS_PER_SEC_FIELD, requestsPerSecond == Float.POSITIVE_INFINITY ? -1 : requestsPerSecond); if (reasonCancelled != null) { - builder.field("canceled", reasonCancelled); + builder.field(CANCELED_FIELD, reasonCancelled); } - builder.humanReadableField("throttled_until_millis", "throttled_until", throttledUntil); + builder.humanReadableField(THROTTLED_UNTIL_RAW_FIELD, THROTTLED_UNTIL_HR_FIELD, throttledUntil); if (false == sliceStatuses.isEmpty()) { - builder.startArray("slices"); + builder.startArray(SLICES_FIELD); for (StatusOrException slice : sliceStatuses) { if (slice == null) { builder.nullValue(); @@ -407,6 +600,87 @@ public class BulkByScrollTask extends CancellableTask { return builder; } + public static Status fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token; + if (parser.currentToken() == Token.START_OBJECT) { + token = parser.nextToken(); + } else { + token = parser.nextToken(); + } + ensureExpectedToken(Token.START_OBJECT, token, parser::getTokenLocation); + token = parser.nextToken(); + ensureExpectedToken(Token.FIELD_NAME, token, parser::getTokenLocation); + return innerFromXContent(parser); + } + + public static Status innerFromXContent(XContentParser parser) throws IOException { + Token token = parser.currentToken(); + String fieldName = parser.currentName(); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation); + StatusBuilder builder = new StatusBuilder(); + while ((token = parser.nextToken()) != Token.END_OBJECT) { + if (token == Token.FIELD_NAME) { + fieldName = parser.currentName(); + } else if (token == Token.START_OBJECT) { + if (fieldName.equals(Status.RETRIES_FIELD)) { + builder.setRetries(Status.RETRIES_PARSER.parse(parser, null)); + } else { + parser.skipChildren(); + } + } else if (token == Token.START_ARRAY) { + if (fieldName.equals(Status.SLICES_FIELD)) { + while ((token = parser.nextToken()) != Token.END_ARRAY) { + builder.addToSliceStatuses(StatusOrException.fromXContent(parser)); + } + } else { + parser.skipChildren(); + } + } else { // else if it is a value + switch (fieldName) { + case Status.SLICE_ID_FIELD: + builder.setSliceId(parser.intValue()); + break; + case Status.TOTAL_FIELD: + builder.setTotal(parser.longValue()); + break; + case Status.UPDATED_FIELD: + builder.setUpdated(parser.longValue()); + break; + case Status.CREATED_FIELD: + builder.setCreated(parser.longValue()); + break; + case Status.DELETED_FIELD: + builder.setDeleted(parser.longValue()); + break; + case Status.BATCHES_FIELD: + builder.setBatches(parser.intValue()); + break; + case Status.VERSION_CONFLICTS_FIELD: + builder.setVersionConflicts(parser.longValue()); + break; + case Status.NOOPS_FIELD: + builder.setNoops(parser.longValue()); + break; + case Status.THROTTLED_RAW_FIELD: + builder.setThrottled(parser.longValue()); + break; + case Status.REQUESTS_PER_SEC_FIELD: + builder.setRequestsPerSecond(parser.floatValue()); + break; + case Status.CANCELED_FIELD: + builder.setReasonCancelled(parser.text()); + break; + case Status.THROTTLED_UNTIL_RAW_FIELD: + builder.setThrottledUntil(parser.longValue()); + break; + default: + break; + } + } + } + return builder.buildStatus(); + } + @Override public String toString() { StringBuilder builder = new StringBuilder(); @@ -533,6 +807,44 @@ public class BulkByScrollTask extends CancellableTask { return sliceStatuses; } + @Override + public int hashCode() { + return Objects.hash( + sliceId, total, updated, created, deleted, batches, versionConflicts, noops, searchRetries, + bulkRetries, throttled, requestsPerSecond, reasonCancelled, throttledUntil, sliceStatuses + ); + } + + public boolean equalsWithoutSliceStatus(Object o, boolean includeUpdated, boolean includeCreated) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Status other = (Status) o; + return + Objects.equals(sliceId, other.sliceId) && + total == other.total && + (!includeUpdated || updated == other.updated) && + (!includeCreated || created == other.created) && + deleted == other.deleted && + batches == other.batches && + versionConflicts == other.versionConflicts && + noops == other.noops && + searchRetries == other.searchRetries && + bulkRetries == other.bulkRetries && + Objects.equals(throttled, other.throttled) && + requestsPerSecond == other.requestsPerSecond && + Objects.equals(reasonCancelled, other.reasonCancelled) && + Objects.equals(throttledUntil, other.throttledUntil); + } + + @Override + public boolean equals(Object o) { + if (equalsWithoutSliceStatus(o, true, true)) { + return Objects.equals(sliceStatuses, ((Status) o).sliceStatuses); + } else { + return false; + } + } + private int checkPositive(int value, String name) { if (value < 0) { throw new IllegalArgumentException(name + " must be greater than 0 but was [" + value + "]"); @@ -556,6 +868,19 @@ public class BulkByScrollTask extends CancellableTask { private final Status status; private final Exception exception; + public static Set EXPECTED_EXCEPTION_FIELDS = new HashSet<>(); + static { + EXPECTED_EXCEPTION_FIELDS.add("type"); + EXPECTED_EXCEPTION_FIELDS.add("reason"); + EXPECTED_EXCEPTION_FIELDS.add("caused_by"); + EXPECTED_EXCEPTION_FIELDS.add("suppressed"); + EXPECTED_EXCEPTION_FIELDS.add("stack_trace"); + EXPECTED_EXCEPTION_FIELDS.add("header"); + EXPECTED_EXCEPTION_FIELDS.add("error"); + EXPECTED_EXCEPTION_FIELDS.add("root_cause"); + } + + public StatusOrException(Status status) { this.status = status; exception = null; @@ -610,6 +935,48 @@ public class BulkByScrollTask extends CancellableTask { return builder; } + /** + * Since {@link StatusOrException} can contain either an {@link Exception} or a {@link Status} we need to peek + * at a field first before deciding what needs to be parsed since the same object could contains either. + * The {@link #EXPECTED_EXCEPTION_FIELDS} contains the fields that are expected when the serialised object + * was an instance of exception and the {@link Status#FIELDS_SET} is the set of fields expected when the + * serialized object was an instance of Status. + */ + public static StatusOrException fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + if (token == null) { + token = parser.nextToken(); + } + if (token == Token.VALUE_NULL) { + return null; + } else { + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation); + token = parser.nextToken(); + // This loop is present only to ignore unknown tokens. It breaks as soon as we find a field + // that is allowed. + while (token != Token.END_OBJECT) { + ensureExpectedToken(Token.FIELD_NAME, token, parser::getTokenLocation); + String fieldName = parser.currentName(); + // weird way to ignore unknown tokens + if (Status.FIELDS_SET.contains(fieldName)) { + return new StatusOrException( + Status.innerFromXContent(parser) + ); + } else if (EXPECTED_EXCEPTION_FIELDS.contains(fieldName)){ + return new StatusOrException(ElasticsearchException.innerFromXContent(parser, false)); + } else { + // Ignore unknown tokens + token = parser.nextToken(); + if (token == Token.START_OBJECT || token == Token.START_ARRAY) { + parser.skipChildren(); + } + token = parser.nextToken(); + } + } + throw new XContentParseException("Unable to parse StatusFromException. Expected fields not found."); + } + } + @Override public boolean equals(Object obj) { if (obj == null) { diff --git a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java index f848e8722c7..2713e5e2661 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java @@ -24,6 +24,9 @@ import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.tasks.TaskId; import java.io.IOException; @@ -47,12 +50,18 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; *
  • it's results won't be visible until the index is refreshed.
  • * */ -public class DeleteByQueryRequest extends AbstractBulkByScrollRequest implements IndicesRequest.Replaceable { +public class DeleteByQueryRequest extends AbstractBulkByScrollRequest + implements IndicesRequest.Replaceable, ToXContentObject { public DeleteByQueryRequest() { + this(new SearchRequest()); } - public DeleteByQueryRequest(SearchRequest search) { + public DeleteByQueryRequest(String... indices) { + this(new SearchRequest(indices)); + } + + DeleteByQueryRequest(SearchRequest search) { this(search, true); } @@ -68,6 +77,78 @@ public class DeleteByQueryRequest extends AbstractBulkByScrollRequest implements CompositeIndicesRequest { +public class ReindexRequest extends AbstractBulkIndexByScrollRequest + implements CompositeIndicesRequest, ToXContentObject { /** * Prototype for index requests. */ @@ -48,9 +54,10 @@ public class ReindexRequest extends AbstractBulkIndexByScrollRequest 0) { + builder.field("size", getSize()); + } + if (getScript() != null) { + builder.field("script", getScript()); + } + if (isAbortOnVersionConflict() == false) { + builder.field("conflicts", "proceed"); + } + } + builder.endObject(); + return builder; + } } diff --git a/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java b/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java index 70f79a9def6..e255b4db34e 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java @@ -26,6 +26,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.HashMap; @@ -35,7 +37,7 @@ import static java.util.Collections.unmodifiableMap; import static java.util.Objects.requireNonNull; import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; -public class RemoteInfo implements Writeable { +public class RemoteInfo implements Writeable, ToXContentObject { /** * Default {@link #socketTimeout} for requests that don't have one set. */ @@ -92,13 +94,8 @@ public class RemoteInfo implements Writeable { headers.put(in.readString(), in.readString()); } this.headers = unmodifiableMap(headers); - if (in.getVersion().onOrAfter(Version.V_5_2_0)) { - socketTimeout = in.readTimeValue(); - connectTimeout = in.readTimeValue(); - } else { - socketTimeout = DEFAULT_SOCKET_TIMEOUT; - connectTimeout = DEFAULT_CONNECT_TIMEOUT; - } + socketTimeout = in.readTimeValue(); + connectTimeout = in.readTimeValue(); if (in.getVersion().onOrAfter(Version.V_6_4_0)) { pathPrefix = in.readOptionalString(); } else { @@ -119,10 +116,8 @@ public class RemoteInfo implements Writeable { out.writeString(header.getKey()); out.writeString(header.getValue()); } - if (out.getVersion().onOrAfter(Version.V_5_2_0)) { - out.writeTimeValue(socketTimeout); - out.writeTimeValue(connectTimeout); - } + out.writeTimeValue(socketTimeout); + out.writeTimeValue(connectTimeout); if (out.getVersion().onOrAfter(Version.V_6_4_0)) { out.writeOptionalString(pathPrefix); } @@ -197,4 +192,25 @@ public class RemoteInfo implements Writeable { } return b.toString(); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (username != null) { + builder.field("username", username); + } + if (password != null) { + builder.field("password", password); + } + builder.field("host", scheme + "://" + host + ":" + port + + (pathPrefix == null ? "" : "/" + pathPrefix)); + if (headers.size() >0 ) { + builder.field("headers", headers); + } + builder.field("socket_timeout", socketTimeout.getStringRep()); + builder.field("connect_timeout", connectTimeout.getStringRep()); + builder.field("query", query); + builder.endObject(); + return builder; + } } diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java b/server/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java index 917b57a9c97..a3901bb7a56 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java @@ -284,6 +284,11 @@ public abstract class ScrollableHitSource { @Nullable private final String nodeId; + public static final String INDEX_FIELD = "index"; + public static final String SHARD_FIELD = "shard"; + public static final String NODE_FIELD = "node"; + public static final String REASON_FIELD = "reason"; + public SearchFailure(Throwable reason, @Nullable String index, @Nullable Integer shardId, @Nullable String nodeId) { this.index = index; this.shardId = shardId; @@ -337,15 +342,15 @@ public abstract class ScrollableHitSource { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); if (index != null) { - builder.field("index", index); + builder.field(INDEX_FIELD, index); } if (shardId != null) { - builder.field("shard", shardId); + builder.field(SHARD_FIELD, shardId); } if (nodeId != null) { - builder.field("node", nodeId); + builder.field(NODE_FIELD, nodeId); } - builder.field("reason"); + builder.field(REASON_FIELD); { builder.startObject(); ElasticsearchException.generateThrowableXContent(builder, params, reason); diff --git a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java index eb4fd59a7bc..71ffadc9303 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java @@ -24,6 +24,9 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.tasks.TaskId; import java.io.IOException; @@ -34,16 +37,22 @@ import java.io.IOException; * representative set of subrequests. This is best-effort but better than {@linkplain ReindexRequest} because scripts can't change the * destination index and things. */ -public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequest implements IndicesRequest.Replaceable { +public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequest + implements IndicesRequest.Replaceable, ToXContentObject { /** * Ingest pipeline to set on index requests made by this action. */ private String pipeline; public UpdateByQueryRequest() { + this(new SearchRequest()); } - public UpdateByQueryRequest(SearchRequest search) { + public UpdateByQueryRequest(String... indices) { + this(new SearchRequest(indices)); + } + + UpdateByQueryRequest(SearchRequest search) { this(search, true); } @@ -59,8 +68,81 @@ public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequest groupQuery) { if (groupQuery == null || groupQuery.isEmpty()) { - return new MatchNoDocsQuery("[multi_match] list of group queries was empty"); + return zeroTermsQuery(); } if (groupQuery.size() == 1) { return groupQuery.get(0); diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java b/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java index df96ff87ec2..d3bac583eac 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java @@ -19,47 +19,21 @@ package org.elasticsearch.index.search; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.index.mapper.DateFieldMapper; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.FieldMapper; -import org.elasticsearch.index.mapper.IpFieldMapper; -import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MetadataFieldMapper; -import org.elasticsearch.index.mapper.NumberFieldMapper; -import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.query.QueryShardException; import java.util.Collection; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; /** * Helpers to extract and expand field names and boosts */ public final class QueryParserHelper { - // Mapping types the "all-ish" query can be executed against - // TODO: Fix the API so that we don't need a hardcoded list of types - private static final Set ALLOWED_QUERY_MAPPER_TYPES; - - static { - ALLOWED_QUERY_MAPPER_TYPES = new HashSet<>(); - ALLOWED_QUERY_MAPPER_TYPES.add(DateFieldMapper.CONTENT_TYPE); - ALLOWED_QUERY_MAPPER_TYPES.add(IpFieldMapper.CONTENT_TYPE); - ALLOWED_QUERY_MAPPER_TYPES.add(KeywordFieldMapper.CONTENT_TYPE); - for (NumberFieldMapper.NumberType nt : NumberFieldMapper.NumberType.values()) { - ALLOWED_QUERY_MAPPER_TYPES.add(nt.typeName()); - } - ALLOWED_QUERY_MAPPER_TYPES.add("scaled_float"); - ALLOWED_QUERY_MAPPER_TYPES.add(TextFieldMapper.CONTENT_TYPE); - } - private QueryParserHelper() {} /** @@ -85,22 +59,6 @@ public final class QueryParserHelper { return fieldsAndWeights; } - /** - * Get a {@link FieldMapper} associated with a field name or null. - * @param mapperService The mapper service where to find the mapping. - * @param field The field name to search. - */ - public static Mapper getFieldMapper(MapperService mapperService, String field) { - DocumentMapper mapper = mapperService.documentMapper(); - if (mapper != null) { - Mapper fieldMapper = mapper.mappers().getMapper(field); - if (fieldMapper != null) { - return fieldMapper; - } - } - return null; - } - public static Map resolveMappingFields(QueryShardContext context, Map fieldsAndWeights) { return resolveMappingFields(context, fieldsAndWeights, null); @@ -138,8 +96,7 @@ public final class QueryParserHelper { * @param fieldOrPattern The field name or the pattern to resolve * @param weight The weight for the field * @param acceptAllTypes Whether all field type should be added when a pattern is expanded. - * If false, only {@link #ALLOWED_QUERY_MAPPER_TYPES} are accepted and other field types - * are discarded from the query. + * If false, only searchable field types are added. * @param acceptMetadataField Whether metadata fields should be added when a pattern is expanded. */ public static Map resolveMappingField(QueryShardContext context, String fieldOrPattern, float weight, @@ -154,8 +111,7 @@ public final class QueryParserHelper { * @param fieldOrPattern The field name or the pattern to resolve * @param weight The weight for the field * @param acceptAllTypes Whether all field type should be added when a pattern is expanded. - * If false, only {@link #ALLOWED_QUERY_MAPPER_TYPES} are accepted and other field types - * are discarded from the query. + * If false, only searchable field types are added. * @param acceptMetadataField Whether metadata fields should be added when a pattern is expanded. * @param fieldSuffix The suffix name to add to the expanded field names if a mapping exists for that name. * The original name of the field is kept if adding the suffix to the field name does not point to a valid field @@ -177,18 +133,20 @@ public final class QueryParserHelper { continue; } - // Ignore fields that are not in the allowed mapper types. Some - // types do not support term queries, and thus we cannot generate - // a special query for them. - String mappingType = fieldType.typeName(); - if (acceptAllTypes == false && ALLOWED_QUERY_MAPPER_TYPES.contains(mappingType) == false) { + if (acceptMetadataField == false && fieldType.name().startsWith("_")) { + // Ignore metadata fields continue; } - // Ignore metadata fields. - Mapper mapper = getFieldMapper(context.getMapperService(), fieldName); - if (acceptMetadataField == false && mapper instanceof MetadataFieldMapper) { - continue; + if (acceptAllTypes == false) { + try { + fieldType.termQuery("", context); + } catch (QueryShardException |UnsupportedOperationException e) { + // field type is never searchable with term queries (eg. geo point): ignore + continue; + } catch (IllegalArgumentException |ElasticsearchParseException e) { + // other exceptions are parsing errors or not indexed fields: keep + } } fields.put(fieldName, weight); } diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java index 50406ed5834..fa2fd033bee 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java @@ -280,14 +280,14 @@ public class QueryStringQueryParser extends XQueryParser { @Override public Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException { - if (quoted) { - return getFieldQuery(field, queryText, getPhraseSlop()); - } - if (field != null && EXISTS_FIELD.equals(field)) { return existsQuery(queryText); } + if (quoted) { + return getFieldQuery(field, queryText, getPhraseSlop()); + } + // Detects additional operators '<', '<=', '>', '>=' to handle range query with one side unbounded. // It is required to use a prefix field operator to enable the detection since they are not treated // as logical operator by the query parser (e.g. age:>=10). @@ -333,6 +333,10 @@ public class QueryStringQueryParser extends XQueryParser { @Override protected Query getFieldQuery(String field, String queryText, int slop) throws ParseException { + if (field != null && EXISTS_FIELD.equals(field)) { + return existsQuery(queryText); + } + Map fields = extractMultiFields(field, true); if (fields.isEmpty()) { return newUnmappedFieldQuery(field); @@ -347,6 +351,9 @@ public class QueryStringQueryParser extends XQueryParser { } queryBuilder.setPhraseSlop(slop); Query query = queryBuilder.parse(MultiMatchQueryBuilder.Type.PHRASE, fields, queryText, null); + if (query == null) { + return null; + } return applySlop(query, slop); } catch (IOException e) { throw new ParseException(e.getMessage()); diff --git a/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java b/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java index 0e46a562488..c56b0d740e7 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java +++ b/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java @@ -34,7 +34,7 @@ public abstract class AbstractIndexShardComponent implements IndexShardComponent protected AbstractIndexShardComponent(ShardId shardId, IndexSettings indexSettings) { this.shardId = shardId; this.indexSettings = indexSettings; - this.logger = Loggers.getLogger(getClass(), this.indexSettings.getSettings(), shardId); + this.logger = Loggers.getLogger(getClass(), shardId); this.deprecationLogger = new DeprecationLogger(logger); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexSearcherWrapper.java b/server/src/main/java/org/elasticsearch/index/shard/IndexSearcherWrapper.java index a2e738128e3..a6949c05597 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexSearcherWrapper.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexSearcherWrapper.java @@ -89,7 +89,7 @@ public class IndexSearcherWrapper { final IndexSearcher innerIndexSearcher = new IndexSearcher(reader); innerIndexSearcher.setQueryCache(origIndexSearcher.getQueryCache()); innerIndexSearcher.setQueryCachingPolicy(origIndexSearcher.getQueryCachingPolicy()); - innerIndexSearcher.setSimilarity(origIndexSearcher.getSimilarity(true)); + innerIndexSearcher.setSimilarity(origIndexSearcher.getSimilarity()); // TODO: Right now IndexSearcher isn't wrapper friendly, when it becomes wrapper friendly we should revise this extension point // For example if IndexSearcher#rewrite() is overwritten than also IndexSearcher#createNormalizedWeight needs to be overwritten // This needs to be fixed before we can allow the IndexSearcher from Engine to be wrapped multiple times diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index ffce0e6ea8b..bceb106aeef 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -92,12 +92,14 @@ import org.elasticsearch.index.fielddata.ShardFieldData; import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.ShardGetService; +import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperForType; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.RootObjectMapper; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.merge.MergeStats; @@ -384,6 +386,11 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl return this.pendingPrimaryTerm; } + /** Returns the primary term that is currently being used to assign to operations */ + public long getOperationPrimaryTerm() { + return this.operationPrimaryTerm; + } + /** * Returns the latest cluster routing entry received with this shard. */ @@ -1238,6 +1245,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } public Engine.Result applyTranslogOperation(Translog.Operation operation, Engine.Operation.Origin origin) throws IOException { + // If a translog op is replayed on the primary (eg. ccr), we need to use external instead of null for its version type. + final VersionType versionType = (origin == Engine.Operation.Origin.PRIMARY) ? VersionType.EXTERNAL : null; final Engine.Result result; switch (operation.opType()) { case INDEX: @@ -1245,14 +1254,14 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl // we set canHaveDuplicates to true all the time such that we de-optimze the translog case and ensure that all // autoGeneratedID docs that are coming from the primary are updated correctly. result = applyIndexOperation(index.seqNo(), index.primaryTerm(), index.version(), - null, index.getAutoGeneratedIdTimestamp(), true, origin, + versionType, index.getAutoGeneratedIdTimestamp(), true, origin, source(shardId.getIndexName(), index.type(), index.id(), index.source(), XContentHelper.xContentType(index.source())).routing(index.routing())); break; case DELETE: final Translog.Delete delete = (Translog.Delete) operation; result = applyDeleteOperation(delete.seqNo(), delete.primaryTerm(), delete.version(), delete.type(), delete.id(), - null, origin); + versionType, origin); break; case NO_OP: final Translog.NoOp noOp = (Translog.NoOp) operation; @@ -1305,7 +1314,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl **/ public void openEngineAndRecoverFromTranslog() throws IOException { innerOpenEngineAndTranslog(); - getEngine().recoverFromTranslog(); + getEngine().recoverFromTranslog(this::runTranslogRecovery, Long.MAX_VALUE); } /** @@ -1323,7 +1332,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } recoveryState.setStage(RecoveryState.Stage.VERIFY_INDEX); // also check here, before we apply the translog - if (Booleans.isTrue(checkIndexOnStartup)) { + if (Booleans.isTrue(checkIndexOnStartup) || "checksum".equals(checkIndexOnStartup)) { try { checkIndex(); } catch (IOException ex) { @@ -1620,25 +1629,48 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } /** - * Acquires a lock on the translog files, preventing them from being trimmed. + * Acquires a lock on the translog files and Lucene soft-deleted documents to prevent them from being trimmed */ - public Closeable acquireTranslogRetentionLock() { - return getEngine().acquireTranslogRetentionLock(); + public Closeable acquireRetentionLockForPeerRecovery() { + return getEngine().acquireRetentionLockForPeerRecovery(); } /** - * Creates a new translog snapshot for reading translog operations whose seq# at least the provided seq#. - * The caller has to close the returned snapshot after finishing the reading. + * Returns the estimated number of history operations whose seq# at least the provided seq# in this shard. */ - public Translog.Snapshot newTranslogSnapshotFromMinSeqNo(long minSeqNo) throws IOException { - return getEngine().newTranslogSnapshotFromMinSeqNo(minSeqNo); + public int estimateNumberOfHistoryOperations(String source, long startingSeqNo) throws IOException { + return getEngine().estimateNumberOfHistoryOperations(source, mapperService, startingSeqNo); } /** - * Returns the estimated number of operations in translog whose seq# at least the provided seq#. + * Creates a new history snapshot for reading operations since the provided starting seqno (inclusive). + * The returned snapshot can be retrieved from either Lucene index or translog files. */ - public int estimateTranslogOperationsFromMinSeq(long minSeqNo) { - return getEngine().estimateTranslogOperationsFromMinSeq(minSeqNo); + public Translog.Snapshot getHistoryOperations(String source, long startingSeqNo) throws IOException { + return getEngine().readHistoryOperations(source, mapperService, startingSeqNo); + } + + /** + * Checks if we have a completed history of operations since the given starting seqno (inclusive). + * This method should be called after acquiring the retention lock; See {@link #acquireRetentionLockForPeerRecovery()} + */ + public boolean hasCompleteHistoryOperations(String source, long startingSeqNo) throws IOException { + return getEngine().hasCompleteOperationHistory(source, mapperService, startingSeqNo); + } + + /** + * Creates a new changes snapshot for reading operations whose seq_no are between {@code fromSeqNo}(inclusive) + * and {@code toSeqNo}(inclusive). The caller has to close the returned snapshot after finishing the reading. + * + * @param source the source of the request + * @param fromSeqNo the from seq_no (inclusive) to read + * @param toSeqNo the to seq_no (inclusive) to read + * @param requiredFullRange if {@code true} then {@link Translog.Snapshot#next()} will throw {@link IllegalStateException} + * if any operation between {@code fromSeqNo} and {@code toSeqNo} is missing. + * This parameter should be only enabled when the entire requesting range is below the global checkpoint. + */ + public Translog.Snapshot newChangesSnapshot(String source, long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException { + return getEngine().newChangesSnapshot(source, mapperService, fromSeqNo, toSeqNo, requiredFullRange); } public List segments(boolean verbose) { @@ -1923,6 +1955,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl if (store.tryIncRef()) { try { doCheckIndex(); + } catch (IOException e) { + store.markStoreCorrupted(e); + throw e; } finally { store.decRef(); } @@ -1966,18 +2001,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl return; } logger.warn("check index [failure]\n{}", os.bytes().utf8ToString()); - if ("fix".equals(checkIndexOnStartup)) { - if (logger.isDebugEnabled()) { - logger.debug("fixing index, writing new segments file ..."); - } - store.exorciseIndex(status); - if (logger.isDebugEnabled()) { - logger.debug("index fixed, wrote new segments file \"{}\"", status.segmentsFileName); - } - } else { - // only throw a failure if we are not going to fix the index - throw new IllegalStateException("index check failure but can't fix it"); - } + throw new IOException("index check failure"); } } @@ -2209,7 +2233,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()), Collections.singletonList(refreshListeners), Collections.singletonList(new RefreshMetricUpdater(refreshMetric)), - indexSort, this::runTranslogRecovery, circuitBreakerService, replicationTracker, () -> operationPrimaryTerm); + indexSort, circuitBreakerService, replicationTracker, () -> operationPrimaryTerm, tombstoneDocSupplier()); } /** @@ -2648,4 +2672,19 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl refreshMetric.inc(System.nanoTime() - currentRefreshStartTime); } } + + private EngineConfig.TombstoneDocSupplier tombstoneDocSupplier() { + final RootObjectMapper.Builder noopRootMapper = new RootObjectMapper.Builder("__noop"); + final DocumentMapper noopDocumentMapper = new DocumentMapper.Builder(noopRootMapper, mapperService).build(mapperService); + return new EngineConfig.TombstoneDocSupplier() { + @Override + public ParsedDocument newDeleteTombstoneDoc(String type, String id) { + return docMapper(type).getDocumentMapper().createDeleteTombstoneDoc(shardId.getIndexName(), type, id); + } + @Override + public ParsedDocument newNoopTombstoneDoc(String reason) { + return noopDocumentMapper.createNoopTombstoneDoc(shardId.getIndexName(), reason); + } + }; + } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java index 1edc0eb5dca..016a8afff69 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java +++ b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java @@ -89,7 +89,7 @@ public class PrimaryReplicaSyncer extends AbstractComponent { // Wrap translog snapshot to make it synchronized as it is accessed by different threads through SnapshotSender. // Even though those calls are not concurrent, snapshot.next() uses non-synchronized state and is not multi-thread-compatible // Also fail the resync early if the shard is shutting down - snapshot = indexShard.newTranslogSnapshotFromMinSeqNo(startingSeqNo); + snapshot = indexShard.getHistoryOperations("resync", startingSeqNo); final Translog.Snapshot originalSnapshot = snapshot; final Translog.Snapshot wrappedSnapshot = new Translog.Snapshot() { @Override diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java index e27c68c7570..a2219397427 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java @@ -32,6 +32,7 @@ import org.apache.lucene.search.ConstantScoreWeight; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; @@ -73,7 +74,7 @@ final class ShardSplittingQuery extends Query { this.nestedParentBitSetProducer = hasNested ? newParentDocBitSetProducer(indexMetaData.getCreationVersion()) : null; } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) { return new ConstantScoreWeight(this, boost) { @Override public String toString() { @@ -348,7 +349,7 @@ final class ShardSplittingQuery extends Query { final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context); final IndexSearcher searcher = new IndexSearcher(topLevelContext); searcher.setQueryCache(null); - final Weight weight = searcher.createNormalizedWeight(query, false); + final Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); Scorer s = weight.scorer(context); return s == null ? null : BitSet.of(s.iterator(), context.reader().maxDoc()); }; diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index e9acfe3d8b0..c4b971e470d 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -156,6 +156,7 @@ final class StoreRecovery { final Directory hardLinkOrCopyTarget = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target); IndexWriterConfig iwc = new IndexWriterConfig(null) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setCommitOnClose(false) // we don't want merges to happen here - we call maybe merge on the engine // later once we stared it up otherwise we would need to wait for it here @@ -397,6 +398,9 @@ final class StoreRecovery { indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId, indexShard.getPendingPrimaryTerm()); store.associateIndexWithNewTranslog(translogUUID); } else if (indexShouldExists) { + if (recoveryState.getRecoverySource().shouldBootstrapNewHistoryUUID()) { + store.bootstrapNewHistory(); + } // since we recover from local, just fill the files and size try { final RecoveryState.Index index = recoveryState.getIndex(); diff --git a/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarity.java b/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarity.java index aea18c30a69..7e3efacfa20 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarity.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarity.java @@ -20,19 +20,14 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.index.FieldInvertState; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.similarities.Similarity; -import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.SmallFloat; import org.elasticsearch.script.SimilarityScript; import org.elasticsearch.script.SimilarityWeightScript; -import java.io.IOException; - /** * A {@link Similarity} implementation that allows scores to be scripted. */ @@ -65,8 +60,18 @@ public final class ScriptedSimilarity extends Similarity { return SmallFloat.intToByte4(numTerms); } + /** Compute the part of the score that does not depend on the current document using the init_script. */ + private double computeWeight(Query query, Field field, Term term) { + if (weightScriptFactory == null) { + return 1d; + } + SimilarityWeightScript weightScript = weightScriptFactory.newInstance(); + return weightScript.execute(query, field, term); + } + @Override - public SimWeight computeWeight(float boost, CollectionStatistics collectionStats, TermStatistics... termStats) { + public SimScorer scorer(float boost, + CollectionStatistics collectionStats, TermStatistics... termStats) { Query query = new Query(boost); long docCount = collectionStats.docCount(); if (docCount == -1) { @@ -77,58 +82,32 @@ public final class ScriptedSimilarity extends Similarity { for (int i = 0; i < termStats.length; ++i) { terms[i] = new Term(termStats[i].docFreq(), termStats[i].totalTermFreq()); } - return new Weight(collectionStats.field(), query, field, terms); - } - /** Compute the part of the score that does not depend on the current document using the init_script. */ - private double computeWeight(Query query, Field field, Term term) throws IOException { - if (weightScriptFactory == null) { - return 1d; - } - SimilarityWeightScript weightScript = weightScriptFactory.newInstance(); - return weightScript.execute(query, field, term); - } - - @Override - public SimScorer simScorer(SimWeight w, LeafReaderContext context) throws IOException { - Weight weight = (Weight) w; - SimScorer[] scorers = new SimScorer[weight.terms.length]; - for (int i = 0; i < weight.terms.length; ++i) { - final Term term = weight.terms[i]; + SimScorer[] scorers = new SimScorer[terms.length]; + for (int i = 0; i < terms.length; ++i) { + final Term term = terms[i]; final SimilarityScript script = scriptFactory.newInstance(); - final NumericDocValues norms = context.reader().getNormValues(weight.fieldName); - final Doc doc = new Doc(norms); - final double scoreWeight = computeWeight(weight.query, weight.field, term); + final Doc doc = new Doc(); + final double scoreWeight = computeWeight(query, field, term); scorers[i] = new SimScorer() { @Override - public float score(int docID, float freq) throws IOException { - doc.docID = docID; + public float score(float freq, long norm) { doc.freq = freq; - return (float) script.execute(scoreWeight, weight.query, weight.field, term, doc); + doc.norm = norm; + return (float) script.execute(scoreWeight, query, field, term, doc); } @Override - public float computeSlopFactor(int distance) { - return 1.0f / (distance + 1); - } - - @Override - public float computePayloadFactor(int doc, int start, int end, BytesRef payload) { - return 1f; - } - - @Override - public Explanation explain(int docID, Explanation freq) throws IOException { - doc.docID = docID; - float score = score(docID, freq.getValue()); + public Explanation explain(Explanation freq, long norm) { + float score = score(freq.getValue().floatValue(), norm); return Explanation.match(score, "score from " + ScriptedSimilarity.this.toString() + " computed from:", Explanation.match((float) scoreWeight, "weight"), - Explanation.match(weight.query.boost, "query.boost"), - Explanation.match(weight.field.docCount, "field.docCount"), - Explanation.match(weight.field.sumDocFreq, "field.sumDocFreq"), - Explanation.match(weight.field.sumTotalTermFreq, "field.sumTotalTermFreq"), + Explanation.match(query.boost, "query.boost"), + Explanation.match(field.docCount, "field.docCount"), + Explanation.match(field.sumDocFreq, "field.sumDocFreq"), + Explanation.match(field.sumTotalTermFreq, "field.sumTotalTermFreq"), Explanation.match(term.docFreq, "term.docFreq"), Explanation.match(term.totalTermFreq, "term.totalTermFreq"), Explanation.match(freq.getValue(), "doc.freq", freq.getDetails()), @@ -143,50 +122,26 @@ public final class ScriptedSimilarity extends Similarity { return new SimScorer() { @Override - public float score(int doc, float freq) throws IOException { + public float score(float freq, long norm) { double sum = 0; for (SimScorer scorer : scorers) { - sum += scorer.score(doc, freq); + sum += scorer.score(freq, norm); } return (float) sum; } @Override - public float computeSlopFactor(int distance) { - return 1.0f / (distance + 1); - } - - @Override - public float computePayloadFactor(int doc, int start, int end, BytesRef payload) { - return 1f; - } - - @Override - public Explanation explain(int doc, Explanation freq) throws IOException { + public Explanation explain(Explanation freq, long norm) { Explanation[] subs = new Explanation[scorers.length]; for (int i = 0; i < subs.length; ++i) { - subs[i] = scorers[i].explain(doc, freq); + subs[i] = scorers[i].explain(freq, norm); } - return Explanation.match(score(doc, freq.getValue()), "Sum of:", subs); + return Explanation.match(score(freq.getValue().floatValue(), norm), "Sum of:", subs); } }; } } - private static class Weight extends SimWeight { - private final String fieldName; - private final Query query; - private final Field field; - private final Term[] terms; - - Weight(String fieldName, Query query, Field field, Term[] terms) { - this.fieldName = fieldName; - this.query = query; - this.field = field; - this.terms = terms; - } - } - /** Scoring factors that come from the query. */ public static class Query { private final float boost; @@ -254,25 +209,16 @@ public final class ScriptedSimilarity extends Similarity { /** Statistics that are specific to a document. */ public static class Doc { - private final NumericDocValues norms; - private int docID; private float freq; + private long norm; - private Doc(NumericDocValues norms) { - this.norms = norms; - } + private Doc() {} /** Return the number of tokens that the current document has in the considered field. */ - public int getLength() throws IOException { + public int getLength() { // the length is computed lazily so that similarities that do not use the length are // not penalized - if (norms == null) { - return 1; - } else if (norms.advanceExact(docID)) { - return SmallFloat.byte4ToInt((byte) norms.longValue()); - } else { - return 0; - } + return SmallFloat.byte4ToInt((byte) norm); } /** Return the number of occurrences of the term in the current document for the considered field. */ diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java index 18c6d6a3fc0..9aab1260b6b 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java @@ -24,13 +24,10 @@ import org.apache.lucene.search.similarities.AfterEffectB; import org.apache.lucene.search.similarities.AfterEffectL; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.BasicModel; -import org.apache.lucene.search.similarities.BasicModelBE; -import org.apache.lucene.search.similarities.BasicModelD; import org.apache.lucene.search.similarities.BasicModelG; import org.apache.lucene.search.similarities.BasicModelIF; import org.apache.lucene.search.similarities.BasicModelIn; import org.apache.lucene.search.similarities.BasicModelIne; -import org.apache.lucene.search.similarities.BasicModelP; import org.apache.lucene.search.similarities.BooleanSimilarity; import org.apache.lucene.search.similarities.ClassicSimilarity; import org.apache.lucene.search.similarities.DFISimilarity; @@ -74,24 +71,35 @@ final class SimilarityProviders { static final String DISCOUNT_OVERLAPS = "discount_overlaps"; private static final Map BASIC_MODELS; + private static final Map LEGACY_BASIC_MODELS; private static final Map AFTER_EFFECTS; + private static final Map LEGACY_AFTER_EFFECTS; static { Map models = new HashMap<>(); - models.put("be", new BasicModelBE()); - models.put("d", new BasicModelD()); models.put("g", new BasicModelG()); models.put("if", new BasicModelIF()); models.put("in", new BasicModelIn()); models.put("ine", new BasicModelIne()); - models.put("p", new BasicModelP()); BASIC_MODELS = unmodifiableMap(models); + Map legacyModels = new HashMap<>(); + // TODO: be and g and both based on the bose-einstein model. + // Is there a better replacement for d and p which use the binomial model? + legacyModels.put("be", "g"); + legacyModels.put("d", "ine"); + legacyModels.put("p", "ine"); + LEGACY_BASIC_MODELS = unmodifiableMap(legacyModels); + Map effects = new HashMap<>(); - effects.put("no", new AfterEffect.NoAfterEffect()); effects.put("b", new AfterEffectB()); effects.put("l", new AfterEffectL()); AFTER_EFFECTS = unmodifiableMap(effects); + + Map legacyEffects = new HashMap<>(); + // l is simpler than b, so this should be a better replacement for "no" + legacyEffects.put("no", "l"); + LEGACY_AFTER_EFFECTS = unmodifiableMap(legacyEffects); } private static final Map INDEPENDENCE_MEASURES; @@ -124,9 +132,25 @@ final class SimilarityProviders { * @param settings Settings to parse * @return {@link BasicModel} referred to in the Settings */ - private static BasicModel parseBasicModel(Settings settings) { + private static BasicModel parseBasicModel(Version indexCreatedVersion, Settings settings) { String basicModel = settings.get("basic_model"); BasicModel model = BASIC_MODELS.get(basicModel); + + if (model == null) { + String replacement = LEGACY_BASIC_MODELS.get(basicModel); + if (replacement != null) { + if (indexCreatedVersion.onOrAfter(Version.V_7_0_0_alpha1)) { + throw new IllegalArgumentException("Basic model [" + basicModel + "] isn't supported anymore, " + + "please use another model."); + } else { + DEPRECATION_LOGGER.deprecated("Basic model [" + basicModel + + "] isn't supported anymore and has arbitrarily been replaced with [" + replacement + "]."); + model = BASIC_MODELS.get(replacement); + assert model != null; + } + } + } + if (model == null) { throw new IllegalArgumentException("Unsupported BasicModel [" + basicModel + "], expected one of " + BASIC_MODELS.keySet()); } @@ -139,9 +163,25 @@ final class SimilarityProviders { * @param settings Settings to parse * @return {@link AfterEffect} referred to in the Settings */ - private static AfterEffect parseAfterEffect(Settings settings) { + private static AfterEffect parseAfterEffect(Version indexCreatedVersion, Settings settings) { String afterEffect = settings.get("after_effect"); AfterEffect effect = AFTER_EFFECTS.get(afterEffect); + + if (effect == null) { + String replacement = LEGACY_AFTER_EFFECTS.get(afterEffect); + if (replacement != null) { + if (indexCreatedVersion.onOrAfter(Version.V_7_0_0_alpha1)) { + throw new IllegalArgumentException("After effect [" + afterEffect + + "] isn't supported anymore, please use another effect."); + } else { + DEPRECATION_LOGGER.deprecated("After effect [" + afterEffect + + "] isn't supported anymore and has arbitrarily been replaced with [" + replacement + "]."); + effect = AFTER_EFFECTS.get(replacement); + assert effect != null; + } + } + } + if (effect == null) { throw new IllegalArgumentException("Unsupported AfterEffect [" + afterEffect + "], expected one of " + AFTER_EFFECTS.keySet()); } @@ -263,8 +303,8 @@ final class SimilarityProviders { return new DFRSimilarity( - parseBasicModel(settings), - parseAfterEffect(settings), + parseBasicModel(indexCreatedVersion, settings), + parseAfterEffect(indexCreatedVersion, settings), parseNormalization(settings)); } diff --git a/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java b/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java index fc605430066..f95cdb3a9f6 100644 --- a/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java +++ b/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.store; import org.apache.lucene.store.Directory; -import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.FileSwitchDirectory; import org.apache.lucene.store.LockFactory; import org.apache.lucene.store.MMapDirectory; @@ -77,10 +76,21 @@ public class FsDirectoryService extends DirectoryService { } protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { - final String storeType = indexSettings.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), - IndexModule.Type.FS.getSettingsKey()); + final String storeType = + indexSettings.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.FS.getSettingsKey()); if (IndexModule.Type.FS.match(storeType)) { - return FSDirectory.open(location, lockFactory); // use lucene defaults + final IndexModule.Type type = + IndexModule.defaultStoreType(IndexModule.NODE_STORE_ALLOW_MMAPFS.get(indexSettings.getNodeSettings())); + switch (type) { + case MMAPFS: + return new MMapDirectory(location, lockFactory); + case SIMPLEFS: + return new SimpleFSDirectory(location, lockFactory); + case NIOFS: + return new NIOFSDirectory(location, lockFactory); + default: + throw new AssertionError("unexpected built-in store type [" + type + "]"); + } } else if (IndexModule.Type.SIMPLEFS.match(storeType)) { return new SimpleFSDirectory(location, lockFactory); } else if (IndexModule.Type.NIOFS.match(storeType)) { diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 001e263ea8f..b892c5c01fe 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -64,7 +64,6 @@ import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRefCounted; import org.elasticsearch.common.util.concurrent.RefCounted; @@ -134,7 +133,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref static final int VERSION_STACK_TRACE = 1; // we write the stack trace too since 1.4.0 static final int VERSION_START = 0; static final int VERSION = VERSION_WRITE_THROWABLE; - static final String CORRUPTED = "corrupted_"; + // public is for test purposes + public static final String CORRUPTED = "corrupted_"; public static final Setting INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING = Setting.timeSetting("index.store.stats_refresh_interval", TimeValue.timeValueSeconds(10), Property.IndexScope); @@ -152,19 +152,17 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref } }; - public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService directoryService, ShardLock shardLock) throws IOException { - this(shardId, indexSettings, directoryService, shardLock, OnClose.EMPTY); + public Store(ShardId shardId, IndexSettings indexSettings, Directory directory, ShardLock shardLock) { + this(shardId, indexSettings, directory, shardLock, OnClose.EMPTY); } - public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService directoryService, ShardLock shardLock, - OnClose onClose) throws IOException { + public Store(ShardId shardId, IndexSettings indexSettings, Directory directory, ShardLock shardLock, + OnClose onClose) { super(shardId, indexSettings); - final Settings settings = indexSettings.getSettings(); - Directory dir = directoryService.newDirectory(); final TimeValue refreshInterval = indexSettings.getValue(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING); logger.debug("store stats are refreshed with refresh_interval [{}]", refreshInterval); - ByteSizeCachingDirectory sizeCachingDir = new ByteSizeCachingDirectory(dir, refreshInterval); - this.directory = new StoreDirectory(sizeCachingDir, Loggers.getLogger("index.store.deletes", settings, shardId)); + ByteSizeCachingDirectory sizeCachingDir = new ByteSizeCachingDirectory(directory, refreshInterval); + this.directory = new StoreDirectory(sizeCachingDir, Loggers.getLogger("index.store.deletes", shardId)); this.shardLock = shardLock; this.onClose = onClose; @@ -360,18 +358,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref } } - /** - * Repairs the index using the previous returned status from {@link #checkIndex(PrintStream)}. - */ - public void exorciseIndex(CheckIndex.Status status) throws IOException { - metadataLock.writeLock().lock(); - try (CheckIndex checkIndex = new CheckIndex(directory)) { - checkIndex.exorciseIndex(status); - } finally { - metadataLock.writeLock().unlock(); - } - } - public StoreStats stats() throws IOException { ensureOpen(); return new StoreStats(directory.estimateSize()); @@ -1009,7 +995,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref } final String segmentId = IndexFileNames.parseSegmentName(meta.name()); final String extension = IndexFileNames.getExtension(meta.name()); - assert FIELD_INFOS_FILE_EXTENSION.equals(extension) == false || IndexFileNames.stripExtension(IndexFileNames.stripSegmentName(meta.name())).isEmpty() : "FieldInfos are generational but updateable DV are not supported in elasticsearch"; if (IndexFileNames.SEGMENTS.equals(segmentId) || DEL_FILE_EXTENSION.equals(extension) || LIV_FILE_EXTENSION.equals(extension)) { // only treat del files as per-commit files fnm files are generational but only for upgradable DV perCommitStoreFiles.add(meta); @@ -1595,6 +1580,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref throws IOException { assert openMode == IndexWriterConfig.OpenMode.APPEND || commit == null : "can't specify create flag with a commit"; IndexWriterConfig iwc = new IndexWriterConfig(null) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setCommitOnClose(false) .setIndexCommit(commit) // we don't want merges to happen here - we call maybe merge on the engine diff --git a/server/src/main/java/org/elasticsearch/index/translog/TragicExceptionHolder.java b/server/src/main/java/org/elasticsearch/index/translog/TragicExceptionHolder.java new file mode 100644 index 00000000000..b823a920039 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/translog/TragicExceptionHolder.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog; + +import java.util.concurrent.atomic.AtomicReference; + +public class TragicExceptionHolder { + private final AtomicReference tragedy = new AtomicReference<>(); + + /** + * Sets the tragic exception or if the tragic exception is already set adds passed exception as suppressed exception + * @param ex tragic exception to set + */ + public void setTragicException(Exception ex) { + assert ex != null; + if (tragedy.compareAndSet(null, ex) == false) { + if (tragedy.get() != ex) { // to ensure there is no self-suppression + tragedy.get().addSuppressed(ex); + } + } + } + + public Exception get() { + return tragedy.get(); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index e426b3a7253..f17acac3789 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -66,6 +66,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.LongSupplier; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.Collectors; import java.util.stream.Stream; /** @@ -117,6 +118,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private final Path location; private TranslogWriter current; + protected final TragicExceptionHolder tragedy = new TragicExceptionHolder(); private final AtomicBoolean closed = new AtomicBoolean(); private final TranslogConfig config; private final LongSupplier globalCheckpointSupplier; @@ -310,8 +312,28 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC return closed.get() == false; } + private static boolean calledFromOutsideOrViaTragedyClose() { + List frames = Stream.of(Thread.currentThread().getStackTrace()). + skip(3). //skip getStackTrace, current method and close method frames + limit(10). //limit depth of analysis to 10 frames, it should be enough to catch closing with, e.g. IOUtils + filter(f -> + { + try { + return Translog.class.isAssignableFrom(Class.forName(f.getClassName())); + } catch (Exception ignored) { + return false; + } + } + ). //find all inner callers including Translog subclasses + collect(Collectors.toList()); + //the list of inner callers should be either empty or should contain closeOnTragicEvent method + return frames.isEmpty() || frames.stream().anyMatch(f -> f.getMethodName().equals("closeOnTragicEvent")); + } + @Override public void close() throws IOException { + assert calledFromOutsideOrViaTragedyClose() : + "Translog.close method is called from inside Translog, but not via closeOnTragicEvent method"; if (closed.compareAndSet(false, true)) { try (ReleasableLock lock = writeLock.acquire()) { try { @@ -462,7 +484,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC getChannelFactory(), config.getBufferSize(), initialMinTranslogGen, initialGlobalCheckpoint, - globalCheckpointSupplier, this::getMinFileGeneration, primaryTermSupplier.getAsLong()); + globalCheckpointSupplier, this::getMinFileGeneration, primaryTermSupplier.getAsLong(), tragedy); } catch (final IOException e) { throw new TranslogException(shardId, "failed to create new translog file", e); } @@ -555,21 +577,27 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC */ public Snapshot newSnapshot() throws IOException { try (ReleasableLock ignored = readLock.acquire()) { - return newSnapshotFromGen(getMinFileGeneration()); + return newSnapshotFromGen(new TranslogGeneration(translogUUID, getMinFileGeneration()), Long.MAX_VALUE); } } - public Snapshot newSnapshotFromGen(long minGeneration) throws IOException { + public Snapshot newSnapshotFromGen(TranslogGeneration fromGeneration, long upToSeqNo) throws IOException { try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); - if (minGeneration < getMinFileGeneration()) { - throw new IllegalArgumentException("requested snapshot generation [" + minGeneration + "] is not available. " + + final long fromFileGen = fromGeneration.translogFileGeneration; + if (fromFileGen < getMinFileGeneration()) { + throw new IllegalArgumentException("requested snapshot generation [" + fromFileGen + "] is not available. " + "Min referenced generation is [" + getMinFileGeneration() + "]"); } TranslogSnapshot[] snapshots = Stream.concat(readers.stream(), Stream.of(current)) - .filter(reader -> reader.getGeneration() >= minGeneration) + .filter(reader -> reader.getGeneration() >= fromFileGen && reader.getCheckpoint().minSeqNo <= upToSeqNo) .map(BaseTranslogReader::newSnapshot).toArray(TranslogSnapshot[]::new); - return newMultiSnapshot(snapshots); + final Snapshot snapshot = newMultiSnapshot(snapshots); + if (upToSeqNo == Long.MAX_VALUE) { + return snapshot; + } else { + return new SeqNoFilterSnapshot(snapshot, Long.MIN_VALUE, upToSeqNo); + } } } @@ -726,7 +754,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } } catch (IOException e) { IOUtils.closeWhileHandlingException(newReaders); - close(); + tragedy.setTragicException(e); + closeOnTragicEvent(e); throw e; } @@ -779,10 +808,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC * * @param ex if an exception occurs closing the translog, it will be suppressed into the provided exception */ - private void closeOnTragicEvent(final Exception ex) { + protected void closeOnTragicEvent(final Exception ex) { // we can not hold a read lock here because closing will attempt to obtain a write lock and that would result in self-deadlock assert readLock.isHeldByCurrentThread() == false : Thread.currentThread().getName(); - if (current.getTragicException() != null) { + if (tragedy.get() != null) { try { close(); } catch (final AlreadyClosedException inner) { @@ -903,7 +932,59 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC * Returns the next operation in the snapshot or null if we reached the end. */ Translog.Operation next() throws IOException; + } + /** + * A filtered snapshot consisting of only operations whose sequence numbers are in the given range + * between {@code fromSeqNo} (inclusive) and {@code toSeqNo} (inclusive). This filtered snapshot + * shares the same underlying resources with the {@code delegate} snapshot, therefore we should not + * use the {@code delegate} after passing it to this filtered snapshot. + */ + static final class SeqNoFilterSnapshot implements Snapshot { + private final Snapshot delegate; + private int filteredOpsCount; + private final long fromSeqNo; // inclusive + private final long toSeqNo; // inclusive + + SeqNoFilterSnapshot(Snapshot delegate, long fromSeqNo, long toSeqNo) { + assert fromSeqNo <= toSeqNo : "from_seq_no[" + fromSeqNo + "] > to_seq_no[" + toSeqNo + "]"; + this.delegate = delegate; + this.fromSeqNo = fromSeqNo; + this.toSeqNo = toSeqNo; + } + + @Override + public int totalOperations() { + return delegate.totalOperations(); + } + + @Override + public int skippedOperations() { + return filteredOpsCount + delegate.skippedOperations(); + } + + @Override + public int overriddenOperations() { + return delegate.overriddenOperations(); + } + + @Override + public Operation next() throws IOException { + Translog.Operation op; + while ((op = delegate.next()) != null) { + if (fromSeqNo <= op.seqNo() && op.seqNo() <= toSeqNo) { + return op; + } else { + filteredOpsCount++; + } + } + return null; + } + + @Override + public void close() throws IOException { + delegate.close(); + } } /** @@ -1180,6 +1261,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC ", type='" + type + '\'' + ", seqNo=" + seqNo + ", primaryTerm=" + primaryTerm + + ", version=" + version + + ", autoGeneratedIdTimestamp=" + autoGeneratedIdTimestamp + '}'; } @@ -1322,6 +1405,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC "uid=" + uid + ", seqNo=" + seqNo + ", primaryTerm=" + primaryTerm + + ", version=" + version + '}'; } } @@ -1556,7 +1640,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC current = createWriter(current.getGeneration() + 1); logger.trace("current translog set to [{}]", current.getGeneration()); } catch (final Exception e) { - IOUtils.closeWhileHandlingException(this); // tragic event + tragedy.setTragicException(e); + closeOnTragicEvent(e); throw e; } } @@ -1669,7 +1754,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private void ensureOpen() { if (closed.get()) { - throw new AlreadyClosedException("translog is already closed", current.getTragicException()); + throw new AlreadyClosedException("translog is already closed", tragedy.get()); } } @@ -1683,7 +1768,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC * Otherwise (no tragic exception has occurred) it returns null. */ public Exception getTragicException() { - return current.getTragicException(); + return tragedy.get(); } /** Reads and returns the current checkpoint */ @@ -1766,8 +1851,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC final String translogUUID = UUIDs.randomBase64UUID(); TranslogWriter writer = TranslogWriter.create(shardId, translogUUID, 1, location.resolve(getFilename(1)), channelFactory, new ByteSizeValue(10), 1, initialGlobalCheckpoint, - () -> { throw new UnsupportedOperationException(); }, () -> { throw new UnsupportedOperationException(); }, primaryTerm - ); + () -> { throw new UnsupportedOperationException(); }, () -> { throw new UnsupportedOperationException(); }, primaryTerm, + new TragicExceptionHolder()); writer.close(); return translogUUID; } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index b779644cd5c..e0cfe9eaaff 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -40,6 +40,7 @@ import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.HashMap; import java.util.Map; +import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.LongSupplier; @@ -51,7 +52,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { /* the number of translog operations written to this file */ private volatile int operationCounter; /* if we hit an exception that we can't recover from we assign it to this var and ship it with every AlreadyClosedException we throw */ - private volatile Exception tragedy; + private final TragicExceptionHolder tragedy; /* A buffered outputstream what writes to the writers channel */ private final OutputStream outputStream; /* the total offset of this file including the bytes written to the file as well as into the buffer */ @@ -76,7 +77,10 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { final FileChannel channel, final Path path, final ByteSizeValue bufferSize, - final LongSupplier globalCheckpointSupplier, LongSupplier minTranslogGenerationSupplier, TranslogHeader header) throws IOException { + final LongSupplier globalCheckpointSupplier, LongSupplier minTranslogGenerationSupplier, TranslogHeader header, + TragicExceptionHolder tragedy) + throws + IOException { super(initialCheckpoint.generation, channel, path, header); assert initialCheckpoint.offset == channel.position() : "initial checkpoint offset [" + initialCheckpoint.offset + "] is different than current channel position [" @@ -94,12 +98,13 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { assert initialCheckpoint.trimmedAboveSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO : initialCheckpoint.trimmedAboveSeqNo; this.globalCheckpointSupplier = globalCheckpointSupplier; this.seenSequenceNumbers = Assertions.ENABLED ? new HashMap<>() : null; + this.tragedy = tragedy; } public static TranslogWriter create(ShardId shardId, String translogUUID, long fileGeneration, Path file, ChannelFactory channelFactory, ByteSizeValue bufferSize, final long initialMinTranslogGen, long initialGlobalCheckpoint, final LongSupplier globalCheckpointSupplier, final LongSupplier minTranslogGenerationSupplier, - final long primaryTerm) + final long primaryTerm, TragicExceptionHolder tragedy) throws IOException { final FileChannel channel = channelFactory.open(file); try { @@ -120,7 +125,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { writerGlobalCheckpointSupplier = globalCheckpointSupplier; } return new TranslogWriter(channelFactory, shardId, checkpoint, channel, file, bufferSize, - writerGlobalCheckpointSupplier, minTranslogGenerationSupplier, header); + writerGlobalCheckpointSupplier, minTranslogGenerationSupplier, header, tragedy); } catch (Exception exception) { // if we fail to bake the file-generation into the checkpoint we stick with the file and once we recover and that // file exists we remove it. We only apply this logic to the checkpoint.generation+1 any other file with a higher generation is an error condition @@ -129,24 +134,8 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { } } - /** - * If this {@code TranslogWriter} was closed as a side-effect of a tragic exception, - * e.g. disk full while flushing a new segment, this returns the root cause exception. - * Otherwise (no tragic exception has occurred) it returns null. - */ - public Exception getTragicException() { - return tragedy; - } - private synchronized void closeWithTragicEvent(final Exception ex) { - assert ex != null; - if (tragedy == null) { - tragedy = ex; - } else if (tragedy != ex) { - // it should be safe to call closeWithTragicEvents on multiple layers without - // worrying about self suppression. - tragedy.addSuppressed(ex); - } + tragedy.setTragicException(ex); try { close(); } catch (final IOException | RuntimeException e) { @@ -204,7 +193,24 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { new BufferedChecksumStreamInput(data.streamInput(), "assertion")); Translog.Operation prvOp = Translog.readOperation( new BufferedChecksumStreamInput(previous.v1().streamInput(), "assertion")); - if (newOp.equals(prvOp) == false) { + // TODO: We haven't had timestamp for Index operations in Lucene yet, we need to loosen this check without timestamp. + final boolean sameOp; + if (newOp instanceof Translog.Index && prvOp instanceof Translog.Index) { + final Translog.Index o1 = (Translog.Index) prvOp; + final Translog.Index o2 = (Translog.Index) newOp; + sameOp = Objects.equals(o1.id(), o2.id()) && Objects.equals(o1.type(), o2.type()) + && Objects.equals(o1.source(), o2.source()) && Objects.equals(o1.routing(), o2.routing()) + && o1.primaryTerm() == o2.primaryTerm() && o1.seqNo() == o2.seqNo() + && o1.version() == o2.version(); + } else if (newOp instanceof Translog.Delete && prvOp instanceof Translog.Delete) { + final Translog.Delete o1 = (Translog.Delete) newOp; + final Translog.Delete o2 = (Translog.Delete) prvOp; + sameOp = Objects.equals(o1.id(), o2.id()) && Objects.equals(o1.type(), o2.type()) + && o1.primaryTerm() == o2.primaryTerm() && o1.seqNo() == o2.seqNo() && o1.version() == o2.version(); + } else { + sameOp = false; + } + if (sameOp == false) { throw new AssertionError( "seqNo [" + seqNo + "] was processed twice in generation [" + generation + "], with different data. " + "prvOp [" + prvOp + "], newOp [" + newOp + "]", previous.v2()); @@ -296,7 +302,8 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { if (closed.compareAndSet(false, true)) { return new TranslogReader(getLastSyncedCheckpoint(), channel, path, header); } else { - throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed (path [" + path + "]", tragedy); + throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed (path [" + path + "]", + tragedy.get()); } } } @@ -406,7 +413,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { protected final void ensureOpen() { if (isClosed()) { - throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed", tragedy); + throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed", tragedy.get()); } } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java index 86995ae7c5a..a90f8af0af4 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java @@ -32,6 +32,7 @@ import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.Lock; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.NativeFSLockFactory; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cli.EnvironmentAwareCommand; @@ -177,6 +178,7 @@ public class TruncateTranslogCommand extends EnvironmentAwareCommand { terminal.println("Marking index with the new history uuid"); // commit the new histroy id IndexWriterConfig iwc = new IndexWriterConfig(null) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setCommitOnClose(false) // we don't want merges to happen here - we call maybe merge on the engine // later once we stared it up otherwise we would need to wait for it here diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 39346fecbef..1c83a880511 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -228,12 +228,20 @@ public class IndicesService extends AbstractLifecycleComponent this.cacheCleaner = new CacheCleaner(indicesFieldDataCache, indicesRequestCache, logger, threadPool, this.cleanInterval); this.metaStateService = metaStateService; this.engineFactoryProviders = engineFactoryProviders; + + // do not allow any plugin-provided index store type to conflict with a built-in type + for (final String indexStoreType : indexStoreFactories.keySet()) { + if (IndexModule.isBuiltinType(indexStoreType)) { + throw new IllegalStateException("registered index store type [" + indexStoreType + "] conflicts with a built-in type"); + } + } + this.indexStoreFactories = indexStoreFactories; } @Override protected void doStop() { - ExecutorService indicesStopExecutor = Executors.newFixedThreadPool(5, EsExecutors.daemonThreadFactory("indices_shutdown")); + ExecutorService indicesStopExecutor = Executors.newFixedThreadPool(5, EsExecutors.daemonThreadFactory(settings, "indices_shutdown")); // Copy indices because we modify it asynchronously in the body of the loop final Set indices = this.indices.values().stream().map(s -> s.index()).collect(Collectors.toSet()); diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java b/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java index 1ecdc797073..a22ada87d77 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java @@ -19,14 +19,17 @@ package org.elasticsearch.indices.analysis; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.analysis.LowerCaseFilter; -import org.apache.lucene.analysis.standard.StandardFilter; +import org.apache.lucene.analysis.TokenStream; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.NamedRegistry; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.analysis.AnalyzerProvider; import org.elasticsearch.index.analysis.CharFilterFactory; @@ -39,7 +42,6 @@ import org.elasticsearch.index.analysis.PreConfiguredTokenizer; import org.elasticsearch.index.analysis.ShingleTokenFilterFactory; import org.elasticsearch.index.analysis.SimpleAnalyzerProvider; import org.elasticsearch.index.analysis.StandardAnalyzerProvider; -import org.elasticsearch.index.analysis.StandardTokenFilterFactory; import org.elasticsearch.index.analysis.StandardTokenizerFactory; import org.elasticsearch.index.analysis.StopAnalyzerProvider; import org.elasticsearch.index.analysis.StopTokenFilterFactory; @@ -69,6 +71,8 @@ public final class AnalysisModule { private static final IndexSettings NA_INDEX_SETTINGS; + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(AnalysisModule.class)); + private final HunspellService hunspellService; private final AnalysisRegistry analysisRegistry; @@ -116,7 +120,29 @@ public final class AnalysisModule { hunspellService) { NamedRegistry> tokenFilters = new NamedRegistry<>("token_filter"); tokenFilters.register("stop", StopTokenFilterFactory::new); - tokenFilters.register("standard", StandardTokenFilterFactory::new); + // Add "standard" for old indices (bwc) + tokenFilters.register("standard", new AnalysisProvider() { + @Override + public TokenFilterFactory get(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + if (indexSettings.getIndexVersionCreated().before(Version.V_7_0_0_alpha1)) { + DEPRECATION_LOGGER.deprecatedAndMaybeLog("standard_deprecation", + "The [standard] token filter name is deprecated and will be removed in a future version."); + } else { + throw new IllegalArgumentException("The [standard] token filter has been removed."); + } + return new AbstractTokenFilterFactory(indexSettings, name, settings) { + @Override + public TokenStream create(TokenStream tokenStream) { + return tokenStream; + } + }; + } + + @Override + public boolean requiresAnalysisSettings() { + return false; + } + }); tokenFilters.register("shingle", ShingleTokenFilterFactory::new); tokenFilters.register("hunspell", requiresAnalysisSettings((indexSettings, env, name, settings) -> new HunspellTokenFilterFactory (indexSettings, name, settings, hunspellService))); @@ -153,7 +179,17 @@ public final class AnalysisModule { // Add filters available in lucene-core preConfiguredTokenFilters.register("lowercase", PreConfiguredTokenFilter.singleton("lowercase", true, LowerCaseFilter::new)); - preConfiguredTokenFilters.register("standard", PreConfiguredTokenFilter.singleton("standard", false, StandardFilter::new)); + // Add "standard" for old indices (bwc) + preConfiguredTokenFilters.register( "standard", + PreConfiguredTokenFilter.singletonWithVersion("standard", true, (reader, version) -> { + if (version.before(Version.V_7_0_0_alpha1)) { + DEPRECATION_LOGGER.deprecatedAndMaybeLog("standard_deprecation", + "The [standard] token filter is deprecated and will be removed in a future version."); + } else { + throw new IllegalArgumentException("The [standard] token filter has been removed."); + } + return reader; + })); /* Note that "stop" is available in lucene-core but it's pre-built * version uses a set of English stop words that are in * lucene-analyzers-common so "stop" is defined in the analysis-common diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java index 0f31a8a46f1..1b4772b3e51 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java @@ -24,6 +24,7 @@ import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.analysis.core.SimpleAnalyzer; import org.apache.lucene.analysis.core.StopAnalyzer; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; +import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.apache.lucene.analysis.standard.ClassicAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.elasticsearch.Version; @@ -61,7 +62,7 @@ public enum PreBuiltAnalyzers { STOP { @Override protected Analyzer create(Version version) { - Analyzer a = new StopAnalyzer(); + Analyzer a = new StopAnalyzer(EnglishAnalyzer.ENGLISH_STOP_WORDS_SET); a.setVersion(version.luceneVersion); return a; } diff --git a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index 7e6a9c29a83..3d05293f7b7 100644 --- a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -251,7 +251,16 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { //package private to allow overriding it in tests long currentMemoryUsage() { - return MEMORY_MX_BEAN.getHeapMemoryUsage().getUsed(); + try { + return MEMORY_MX_BEAN.getHeapMemoryUsage().getUsed(); + } catch (IllegalArgumentException ex) { + // This exception can happen (rarely) due to a race condition in the JVM when determining usage of memory pools. We do not want + // to fail requests because of this and thus return zero memory usage in this case. While we could also return the most + // recently determined memory usage, we would overestimate memory usage immediately after a garbage collection event. + assert ex.getMessage().matches("committed = \\d+ should be < max = \\d+"); + logger.info("Cannot determine current memory usage due to JDK-8207200.", ex); + return 0; + } } /** diff --git a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index e6a86d47f55..692010119dc 100644 --- a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -456,7 +456,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple AllocatedIndex indexService = null; try { indexService = indicesService.createIndex(indexMetaData, buildInIndexListener); - if (indexService.updateMapping(indexMetaData) && sendRefreshMapping) { + if (indexService.updateMapping(null, indexMetaData) && sendRefreshMapping) { nodeMappingRefreshAction.nodeMappingRefresh(state.nodes().getMasterNode(), new NodeMappingRefreshAction.NodeMappingRefreshRequest(indexMetaData.getIndex().getName(), indexMetaData.getIndexUUID(), state.nodes().getLocalNodeId()) @@ -490,7 +490,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple if (ClusterChangedEvent.indexMetaDataChanged(currentIndexMetaData, newIndexMetaData)) { indexService.updateMetaData(newIndexMetaData); try { - if (indexService.updateMapping(newIndexMetaData) && sendRefreshMapping) { + if (indexService.updateMapping(currentIndexMetaData, newIndexMetaData) && sendRefreshMapping) { nodeMappingRefreshAction.nodeMappingRefresh(state.nodes().getMasterNode(), new NodeMappingRefreshAction.NodeMappingRefreshRequest(newIndexMetaData.getIndex().getName(), newIndexMetaData.getIndexUUID(), state.nodes().getLocalNodeId()) @@ -778,7 +778,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple /** * Checks if index requires refresh from master. */ - boolean updateMapping(IndexMetaData indexMetaData) throws IOException; + boolean updateMapping(IndexMetaData currentIndexMetaData, IndexMetaData newIndexMetaData) throws IOException; /** * Returns shard with given id. diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index f01b4bb3121..fb7885a217e 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -560,9 +560,6 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } boolean includeNumDocs(Version version) { - if (version.major == Version.V_5_6_8.major) { - return version.onOrAfter(Version.V_5_6_8); - } return version.onOrAfter(Version.V_6_2_2); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java index 06e8a5734f6..ec05f0e30b0 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java @@ -173,10 +173,9 @@ public class PeerRecoverySourceService extends AbstractComponent implements Inde final RemoteRecoveryTargetHandler recoveryTarget = new RemoteRecoveryTargetHandler(request.recoveryId(), request.shardId(), transportService, request.targetNode(), recoverySettings, throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime)); - handler = new RecoverySourceHandler(shard, recoveryTarget, request, recoverySettings.getChunkSize().bytesAsInt(), settings); + handler = new RecoverySourceHandler(shard, recoveryTarget, request, recoverySettings.getChunkSize().bytesAsInt()); return handler; } } } } - diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 352f07d5764..220abf43124 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -40,7 +40,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.FutureUtils; @@ -116,13 +115,12 @@ public class RecoverySourceHandler { public RecoverySourceHandler(final IndexShard shard, RecoveryTargetHandler recoveryTarget, final StartRecoveryRequest request, - final int fileChunkSizeInBytes, - final Settings nodeSettings) { + final int fileChunkSizeInBytes) { this.shard = shard; this.recoveryTarget = recoveryTarget; this.request = request; this.shardId = this.request.shardId().id(); - this.logger = Loggers.getLogger(getClass(), nodeSettings, request.shardId(), "recover to " + request.targetNode().getName()); + this.logger = Loggers.getLogger(getClass(), request.shardId(), "recover to " + request.targetNode().getName()); this.chunkSizeInBytes = fileChunkSizeInBytes; this.response = new RecoveryResponse(); } @@ -146,11 +144,11 @@ public class RecoverySourceHandler { assert targetShardRouting.initializing() : "expected recovery target to be initializing but was " + targetShardRouting; }, shardId + " validating recovery target ["+ request.targetAllocationId() + "] registered ", shard, cancellableThreads, logger); - try (Closeable ignored = shard.acquireTranslogRetentionLock()) { + try (Closeable ignored = shard.acquireRetentionLockForPeerRecovery()) { final long startingSeqNo; final long requiredSeqNoRangeStart; final boolean isSequenceNumberBasedRecovery = request.startingSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && - isTargetSameHistory() && isTranslogReadyForSequenceNumberBasedRecovery(); + isTargetSameHistory() && shard.hasCompleteHistoryOperations("peer-recovery", request.startingSeqNo()); if (isSequenceNumberBasedRecovery) { logger.trace("performing sequence numbers based recovery. starting at [{}]", request.startingSeqNo()); startingSeqNo = request.startingSeqNo(); @@ -162,14 +160,16 @@ public class RecoverySourceHandler { } catch (final Exception e) { throw new RecoveryEngineException(shard.shardId(), 1, "snapshot failed", e); } - // we set this to 0 to create a translog roughly according to the retention policy - // on the target. Note that it will still filter out legacy operations with no sequence numbers - startingSeqNo = 0; - // but we must have everything above the local checkpoint in the commit + // We must have everything above the local checkpoint in the commit requiredSeqNoRangeStart = Long.parseLong(phase1Snapshot.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) + 1; + // If soft-deletes enabled, we need to transfer only operations after the local_checkpoint of the commit to have + // the same history on the target. However, with translog, we need to set this to 0 to create a translog roughly + // according to the retention policy on the target. Note that it will still filter out legacy operations without seqNo. + startingSeqNo = shard.indexSettings().isSoftDeleteEnabled() ? requiredSeqNoRangeStart : 0; try { - phase1(phase1Snapshot.getIndexCommit(), () -> shard.estimateTranslogOperationsFromMinSeq(startingSeqNo)); + final int estimateNumOps = shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo); + phase1(phase1Snapshot.getIndexCommit(), () -> estimateNumOps); } catch (final Exception e) { throw new RecoveryEngineException(shard.shardId(), 1, "phase1 failed", e); } finally { @@ -186,7 +186,8 @@ public class RecoverySourceHandler { try { // For a sequence based recovery, the target can keep its local translog - prepareTargetForTranslog(isSequenceNumberBasedRecovery == false, shard.estimateTranslogOperationsFromMinSeq(startingSeqNo)); + prepareTargetForTranslog(isSequenceNumberBasedRecovery == false, + shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo)); } catch (final Exception e) { throw new RecoveryEngineException(shard.shardId(), 1, "prepare target for translog failed", e); } @@ -207,11 +208,13 @@ public class RecoverySourceHandler { */ cancellableThreads.execute(() -> shard.waitForOpsToComplete(endingSeqNo)); - logger.trace("all operations up to [{}] completed, which will be used as an ending sequence number", endingSeqNo); - - logger.trace("snapshot translog for recovery; current size is [{}]", shard.estimateTranslogOperationsFromMinSeq(startingSeqNo)); + if (logger.isTraceEnabled()) { + logger.trace("all operations up to [{}] completed, which will be used as an ending sequence number", endingSeqNo); + logger.trace("snapshot translog for recovery; current size is [{}]", + shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo)); + } final long targetLocalCheckpoint; - try(Translog.Snapshot snapshot = shard.newTranslogSnapshotFromMinSeqNo(startingSeqNo)) { + try (Translog.Snapshot snapshot = shard.getHistoryOperations("peer-recovery", startingSeqNo)) { targetLocalCheckpoint = phase2(startingSeqNo, requiredSeqNoRangeStart, endingSeqNo, snapshot); } catch (Exception e) { throw new RecoveryEngineException(shard.shardId(), 2, "phase2 failed", e); @@ -268,36 +271,6 @@ public class RecoverySourceHandler { }); } - /** - * Determines if the source translog is ready for a sequence-number-based peer recovery. The main condition here is that the source - * translog contains all operations above the local checkpoint on the target. We already know the that translog contains or will contain - * all ops above the source local checkpoint, so we can stop check there. - * - * @return {@code true} if the source is ready for a sequence-number-based recovery - * @throws IOException if an I/O exception occurred reading the translog snapshot - */ - boolean isTranslogReadyForSequenceNumberBasedRecovery() throws IOException { - final long startingSeqNo = request.startingSeqNo(); - assert startingSeqNo >= 0; - final long localCheckpoint = shard.getLocalCheckpoint(); - logger.trace("testing sequence numbers in range: [{}, {}]", startingSeqNo, localCheckpoint); - // the start recovery request is initialized with the starting sequence number set to the target shard's local checkpoint plus one - if (startingSeqNo - 1 <= localCheckpoint) { - final LocalCheckpointTracker tracker = new LocalCheckpointTracker(startingSeqNo, startingSeqNo - 1); - try (Translog.Snapshot snapshot = shard.newTranslogSnapshotFromMinSeqNo(startingSeqNo)) { - Translog.Operation operation; - while ((operation = snapshot.next()) != null) { - if (operation.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { - tracker.markSeqNoAsCompleted(operation.seqNo()); - } - } - } - return tracker.getCheckpoint() >= localCheckpoint; - } else { - return false; - } - } - /** * Perform phase1 of the recovery operations. Once this {@link IndexCommit} * snapshot has been performed no commit operations (files being fsync'd) diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 1a772f0c3f8..e28b01c8a61 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -117,7 +117,7 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget this.cancellableThreads = new CancellableThreads(); this.recoveryId = idGenerator.incrementAndGet(); this.listener = listener; - this.logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); + this.logger = Loggers.getLogger(getClass(), indexShard.shardId()); this.indexShard = indexShard; this.sourceNode = sourceNode; this.shardId = indexShard.shardId(); diff --git a/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java b/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java index 3dee58febbd..373edfc3b46 100644 --- a/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java +++ b/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java @@ -67,17 +67,19 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction) raw); + } else if (raw instanceof List) { + return new UnmodifiableIngestList((List) raw); + } else if (raw instanceof byte[]) { + return ((byte[]) raw).clone(); + } + return raw; + } + + private static UnsupportedOperationException unmodifiableException() { + return new UnsupportedOperationException("Mutating ingest documents in conditionals is not supported"); + } + + private static final class UnmodifiableIngestData implements Map { + + private final Map data; + + UnmodifiableIngestData(Map data) { + this.data = data; + } + + @Override + public int size() { + return data.size(); + } + + @Override + public boolean isEmpty() { + return data.isEmpty(); + } + + @Override + public boolean containsKey(final Object key) { + return data.containsKey(key); + } + + @Override + public boolean containsValue(final Object value) { + return data.containsValue(value); + } + + @Override + public Object get(final Object key) { + return wrapUnmodifiable(data.get(key)); + } + + @Override + public Object put(final String key, final Object value) { + throw unmodifiableException(); + } + + @Override + public Object remove(final Object key) { + throw unmodifiableException(); + } + + @Override + public void putAll(final Map m) { + throw unmodifiableException(); + } + + @Override + public void clear() { + throw unmodifiableException(); + } + + @Override + public Set keySet() { + return Collections.unmodifiableSet(data.keySet()); + } + + @Override + public Collection values() { + return new UnmodifiableIngestList(new ArrayList<>(data.values())); + } + + @Override + public Set> entrySet() { + return data.entrySet().stream().map(entry -> + new Entry() { + @Override + public String getKey() { + return entry.getKey(); + } + + @Override + public Object getValue() { + return wrapUnmodifiable(entry.getValue()); + } + + @Override + public Object setValue(final Object value) { + throw unmodifiableException(); + } + + @Override + public boolean equals(final Object o) { + return entry.equals(o); + } + + @Override + public int hashCode() { + return entry.hashCode(); + } + }).collect(Collectors.toSet()); + } + } + + private static final class UnmodifiableIngestList implements List { + + private final List data; + + UnmodifiableIngestList(List data) { + this.data = data; + } + + @Override + public int size() { + return data.size(); + } + + @Override + public boolean isEmpty() { + return data.isEmpty(); + } + + @Override + public boolean contains(final Object o) { + return data.contains(o); + } + + @Override + public Iterator iterator() { + Iterator wrapped = data.iterator(); + return new Iterator() { + @Override + public boolean hasNext() { + return wrapped.hasNext(); + } + + @Override + public Object next() { + return wrapped.next(); + } + + @Override + public void remove() { + throw unmodifiableException(); + } + }; + } + + @Override + public Object[] toArray() { + Object[] wrapped = data.toArray(new Object[0]); + for (int i = 0; i < wrapped.length; i++) { + wrapped[i] = wrapUnmodifiable(wrapped[i]); + } + return wrapped; + } + + @Override + public T[] toArray(final T[] a) { + Object[] raw = data.toArray(new Object[0]); + T[] wrapped = (T[]) Arrays.copyOf(raw, a.length, a.getClass()); + for (int i = 0; i < wrapped.length; i++) { + wrapped[i] = (T) wrapUnmodifiable(wrapped[i]); + } + return wrapped; + } + + @Override + public boolean add(final Object o) { + throw unmodifiableException(); + } + + @Override + public boolean remove(final Object o) { + throw unmodifiableException(); + } + + @Override + public boolean containsAll(final Collection c) { + return data.contains(c); + } + + @Override + public boolean addAll(final Collection c) { + throw unmodifiableException(); + } + + @Override + public boolean addAll(final int index, final Collection c) { + throw unmodifiableException(); + } + + @Override + public boolean removeAll(final Collection c) { + throw unmodifiableException(); + } + + @Override + public boolean retainAll(final Collection c) { + throw unmodifiableException(); + } + + @Override + public void clear() { + throw unmodifiableException(); + } + + @Override + public Object get(final int index) { + return wrapUnmodifiable(data.get(index)); + } + + @Override + public Object set(final int index, final Object element) { + throw unmodifiableException(); + } + + @Override + public void add(final int index, final Object element) { + throw unmodifiableException(); + } + + @Override + public Object remove(final int index) { + throw unmodifiableException(); + } + + @Override + public int indexOf(final Object o) { + return data.indexOf(o); + } + + @Override + public int lastIndexOf(final Object o) { + return data.lastIndexOf(o); + } + + @Override + public ListIterator listIterator() { + return new UnmodifiableListIterator(data.listIterator()); + } + + @Override + public ListIterator listIterator(final int index) { + return new UnmodifiableListIterator(data.listIterator(index)); + } + + @Override + public List subList(final int fromIndex, final int toIndex) { + return new UnmodifiableIngestList(data.subList(fromIndex, toIndex)); + } + + private static final class UnmodifiableListIterator implements ListIterator { + + private final ListIterator data; + + UnmodifiableListIterator(ListIterator data) { + this.data = data; + } + + @Override + public boolean hasNext() { + return data.hasNext(); + } + + @Override + public Object next() { + return wrapUnmodifiable(data.next()); + } + + @Override + public boolean hasPrevious() { + return data.hasPrevious(); + } + + @Override + public Object previous() { + return wrapUnmodifiable(data.previous()); + } + + @Override + public int nextIndex() { + return data.nextIndex(); + } + + @Override + public int previousIndex() { + return data.previousIndex(); + } + + @Override + public void remove() { + throw unmodifiableException(); + } + + @Override + public void set(final Object o) { + throw unmodifiableException(); + } + + @Override + public void add(final Object o) { + throw unmodifiableException(); + } + } + } +} diff --git a/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java b/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java index 54d06d11655..d4f27f47eb8 100644 --- a/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java +++ b/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java @@ -19,9 +19,18 @@ package org.elasticsearch.ingest; +import java.io.IOException; +import java.io.InputStream; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptType; @@ -296,6 +305,7 @@ public final class ConfigurationUtils { } public static List readProcessorConfigs(List> processorConfigs, + ScriptService scriptService, Map processorFactories) throws Exception { Exception exception = null; List processors = new ArrayList<>(); @@ -303,7 +313,7 @@ public final class ConfigurationUtils { for (Map processorConfigWithKey : processorConfigs) { for (Map.Entry entry : processorConfigWithKey.entrySet()) { try { - processors.add(readProcessor(processorFactories, entry.getKey(), entry.getValue())); + processors.add(readProcessor(processorFactories, scriptService, entry.getKey(), entry.getValue())); } catch (Exception e) { exception = ExceptionsHelper.useOrSuppress(exception, e); } @@ -356,13 +366,14 @@ public final class ConfigurationUtils { @SuppressWarnings("unchecked") public static Processor readProcessor(Map processorFactories, + ScriptService scriptService, String type, Object config) throws Exception { if (config instanceof Map) { - return readProcessor(processorFactories, type, (Map) config); + return readProcessor(processorFactories, scriptService, type, (Map) config); } else if (config instanceof String && "script".equals(type)) { Map normalizedScript = new HashMap<>(1); normalizedScript.put(ScriptType.INLINE.getParseField().getPreferredName(), config); - return readProcessor(processorFactories, type, normalizedScript); + return readProcessor(processorFactories, scriptService, type, normalizedScript); } else { throw newConfigurationException(type, null, null, "property isn't a map, but of type [" + config.getClass().getName() + "]"); @@ -370,15 +381,17 @@ public final class ConfigurationUtils { } public static Processor readProcessor(Map processorFactories, + ScriptService scriptService, String type, Map config) throws Exception { String tag = ConfigurationUtils.readOptionalStringProperty(null, null, config, TAG_KEY); + Script conditionalScript = extractConditional(config); Processor.Factory factory = processorFactories.get(type); if (factory != null) { boolean ignoreFailure = ConfigurationUtils.readBooleanProperty(null, null, config, "ignore_failure", false); List> onFailureProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, Pipeline.ON_FAILURE_KEY); - List onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, processorFactories); + List onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, scriptService, processorFactories); if (onFailureProcessorConfigs != null && onFailureProcessors.isEmpty()) { throw newConfigurationException(type, tag, Pipeline.ON_FAILURE_KEY, @@ -392,14 +405,42 @@ public final class ConfigurationUtils { type, Arrays.toString(config.keySet().toArray())); } if (onFailureProcessors.size() > 0 || ignoreFailure) { - return new CompoundProcessor(ignoreFailure, Collections.singletonList(processor), onFailureProcessors); - } else { - return processor; + processor = new CompoundProcessor(ignoreFailure, Collections.singletonList(processor), onFailureProcessors); } + if (conditionalScript != null) { + processor = new ConditionalProcessor(tag, conditionalScript, scriptService, processor); + } + return processor; } catch (Exception e) { throw newConfigurationException(type, tag, null, e); } } throw newConfigurationException(type, tag, null, "No processor type exists with name [" + type + "]"); } + + private static Script extractConditional(Map config) throws IOException { + Object scriptSource = config.remove("if"); + if (scriptSource != null) { + try (XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent) + .map(normalizeScript(scriptSource)); + InputStream stream = BytesReference.bytes(builder).streamInput(); + XContentParser parser = XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, stream)) { + return Script.parse(parser); + } + } + return null; + } + + @SuppressWarnings("unchecked") + private static Map normalizeScript(Object scriptConfig) { + if (scriptConfig instanceof Map) { + return (Map) scriptConfig; + } else if (scriptConfig instanceof String) { + return Collections.singletonMap("source", scriptConfig); + } else { + throw newConfigurationException("conditional", null, "script", + "property isn't a map or string, but of type [" + scriptConfig.getClass().getName() + "]"); + } + } } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java index aad55e12cef..10cb2fd17fe 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java @@ -19,6 +19,9 @@ package org.elasticsearch.ingest; +import java.util.Collections; +import java.util.IdentityHashMap; +import java.util.Set; import org.elasticsearch.common.Strings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -55,6 +58,9 @@ public final class IngestDocument { private final Map sourceAndMetadata; private final Map ingestMetadata; + // Contains all pipelines that have been executed for this document + private final Set executedPipelines = Collections.newSetFromMap(new IdentityHashMap<>()); + public IngestDocument(String index, String type, String id, String routing, Long version, VersionType versionType, Map source) { this.sourceAndMetadata = new HashMap<>(); @@ -632,6 +638,23 @@ public final class IngestDocument { } } + /** + * Executes the given pipeline with for this document unless the pipeline has already been executed + * for this document. + * @param pipeline Pipeline to execute + * @throws Exception On exception in pipeline execution + */ + public IngestDocument executePipeline(Pipeline pipeline) throws Exception { + try { + if (this.executedPipelines.add(pipeline) == false) { + throw new IllegalStateException("Recursive invocation of pipeline [" + pipeline.getId() + "] detected."); + } + return pipeline.execute(this); + } finally { + executedPipelines.remove(pipeline); + } + } + @Override public boolean equals(Object obj) { if (obj == this) { return true; } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 01bc402e43b..5623cf30f36 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -22,14 +22,42 @@ package org.elasticsearch.ingest; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.concurrent.ScheduledFuture; -import java.util.function.BiFunction; - -import org.elasticsearch.common.settings.Settings; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.DeletePipelineRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateApplier; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.metrics.CounterMetric; +import org.elasticsearch.common.metrics.MeanMetric; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.env.Environment; +import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.index.VersionType; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.script.ScriptService; @@ -38,20 +66,42 @@ import org.elasticsearch.threadpool.ThreadPool; /** * Holder class for several ingest related services. */ -public class IngestService { +public class IngestService implements ClusterStateApplier { public static final String NOOP_PIPELINE_NAME = "_none"; - private final PipelineStore pipelineStore; - private final PipelineExecutionService pipelineExecutionService; + private final ClusterService clusterService; + private final ScriptService scriptService; + private final Map processorFactories; + // Ideally this should be in IngestMetadata class, but we don't have the processor factories around there. + // We know of all the processor factories when a node with all its plugin have been initialized. Also some + // processor factories rely on other node services. Custom metadata is statically registered when classes + // are loaded, so in the cluster state we just save the pipeline config and here we keep the actual pipelines around. + private volatile Map pipelines = new HashMap<>(); + private final ThreadPool threadPool; + private final StatsHolder totalStats = new StatsHolder(); + private volatile Map statsHolderPerPipeline = Collections.emptyMap(); - public IngestService(Settings settings, ThreadPool threadPool, + public IngestService(ClusterService clusterService, ThreadPool threadPool, Environment env, ScriptService scriptService, AnalysisRegistry analysisRegistry, List ingestPlugins) { - BiFunction> scheduler = - (delay, command) -> threadPool.schedule(TimeValue.timeValueMillis(delay), ThreadPool.Names.GENERIC, command); - Processor.Parameters parameters = new Processor.Parameters(env, scriptService, analysisRegistry, - threadPool.getThreadContext(), threadPool::relativeTimeInMillis, scheduler); + this.clusterService = clusterService; + this.scriptService = scriptService; + this.processorFactories = processorFactories( + ingestPlugins, + new Processor.Parameters( + env, scriptService, analysisRegistry, + threadPool.getThreadContext(), threadPool::relativeTimeInMillis, + (delay, command) -> threadPool.schedule( + TimeValue.timeValueMillis(delay), ThreadPool.Names.GENERIC, command + ), this + ) + ); + this.threadPool = threadPool; + } + + private static Map processorFactories(List ingestPlugins, + Processor.Parameters parameters) { Map processorFactories = new HashMap<>(); for (IngestPlugin ingestPlugin : ingestPlugins) { Map newProcessors = ingestPlugin.getProcessors(parameters); @@ -61,24 +111,396 @@ public class IngestService { } } } - this.pipelineStore = new PipelineStore(settings, Collections.unmodifiableMap(processorFactories)); - this.pipelineExecutionService = new PipelineExecutionService(pipelineStore, threadPool); + return Collections.unmodifiableMap(processorFactories); } - public PipelineStore getPipelineStore() { - return pipelineStore; + public ClusterService getClusterService() { + return clusterService; } - public PipelineExecutionService getPipelineExecutionService() { - return pipelineExecutionService; + public ScriptService getScriptService() { + return scriptService; + } + + /** + * Deletes the pipeline specified by id in the request. + */ + public void delete(DeletePipelineRequest request, ActionListener listener) { + clusterService.submitStateUpdateTask("delete-pipeline-" + request.getId(), + new AckedClusterStateUpdateTask(request, listener) { + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + return new AcknowledgedResponse(acknowledged); + } + + @Override + public ClusterState execute(ClusterState currentState) { + return innerDelete(request, currentState); + } + }); + } + + static ClusterState innerDelete(DeletePipelineRequest request, ClusterState currentState) { + IngestMetadata currentIngestMetadata = currentState.metaData().custom(IngestMetadata.TYPE); + if (currentIngestMetadata == null) { + return currentState; + } + Map pipelines = currentIngestMetadata.getPipelines(); + Set toRemove = new HashSet<>(); + for (String pipelineKey : pipelines.keySet()) { + if (Regex.simpleMatch(request.getId(), pipelineKey)) { + toRemove.add(pipelineKey); + } + } + if (toRemove.isEmpty() && Regex.isMatchAllPattern(request.getId()) == false) { + throw new ResourceNotFoundException("pipeline [{}] is missing", request.getId()); + } else if (toRemove.isEmpty()) { + return currentState; + } + final Map pipelinesCopy = new HashMap<>(pipelines); + for (String key : toRemove) { + pipelinesCopy.remove(key); + } + ClusterState.Builder newState = ClusterState.builder(currentState); + newState.metaData(MetaData.builder(currentState.getMetaData()) + .putCustom(IngestMetadata.TYPE, new IngestMetadata(pipelinesCopy)) + .build()); + return newState.build(); + } + + /** + * @return pipeline configuration specified by id. If multiple ids or wildcards are specified multiple pipelines + * may be returned + */ + // Returning PipelineConfiguration instead of Pipeline, because Pipeline and Processor interface don't + // know how to serialize themselves. + public static List getPipelines(ClusterState clusterState, String... ids) { + IngestMetadata ingestMetadata = clusterState.getMetaData().custom(IngestMetadata.TYPE); + return innerGetPipelines(ingestMetadata, ids); + } + + static List innerGetPipelines(IngestMetadata ingestMetadata, String... ids) { + if (ingestMetadata == null) { + return Collections.emptyList(); + } + + // if we didn't ask for _any_ ID, then we get them all (this is the same as if they ask for '*') + if (ids.length == 0) { + return new ArrayList<>(ingestMetadata.getPipelines().values()); + } + + List result = new ArrayList<>(ids.length); + for (String id : ids) { + if (Regex.isSimpleMatchPattern(id)) { + for (Map.Entry entry : ingestMetadata.getPipelines().entrySet()) { + if (Regex.simpleMatch(id, entry.getKey())) { + result.add(entry.getValue()); + } + } + } else { + PipelineConfiguration pipeline = ingestMetadata.getPipelines().get(id); + if (pipeline != null) { + result.add(pipeline); + } + } + } + return result; + } + + /** + * Stores the specified pipeline definition in the request. + */ + public void putPipeline(Map ingestInfos, PutPipelineRequest request, + ActionListener listener) throws Exception { + // validates the pipeline and processor configuration before submitting a cluster update task: + validatePipeline(ingestInfos, request); + clusterService.submitStateUpdateTask("put-pipeline-" + request.getId(), + new AckedClusterStateUpdateTask(request, listener) { + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + return new AcknowledgedResponse(acknowledged); + } + + @Override + public ClusterState execute(ClusterState currentState) { + return innerPut(request, currentState); + } + }); + } + + /** + * Returns the pipeline by the specified id + */ + public Pipeline getPipeline(String id) { + return pipelines.get(id); + } + + public Map getProcessorFactories() { + return processorFactories; } public IngestInfo info() { - Map processorFactories = pipelineStore.getProcessorFactories(); + Map processorFactories = getProcessorFactories(); List processorInfoList = new ArrayList<>(processorFactories.size()); for (Map.Entry entry : processorFactories.entrySet()) { processorInfoList.add(new ProcessorInfo(entry.getKey())); } return new IngestInfo(processorInfoList); } + + Map pipelines() { + return pipelines; + } + + @Override + public void applyClusterState(final ClusterChangedEvent event) { + ClusterState state = event.state(); + innerUpdatePipelines(event.previousState(), state); + IngestMetadata ingestMetadata = state.getMetaData().custom(IngestMetadata.TYPE); + if (ingestMetadata != null) { + updatePipelineStats(ingestMetadata); + } + } + + private static Pipeline substitutePipeline(String id, ElasticsearchParseException e) { + String tag = e.getHeaderKeys().contains("processor_tag") ? e.getHeader("processor_tag").get(0) : null; + String type = e.getHeaderKeys().contains("processor_type") ? e.getHeader("processor_type").get(0) : "unknown"; + String errorMessage = "pipeline with id [" + id + "] could not be loaded, caused by [" + e.getDetailedMessage() + "]"; + Processor failureProcessor = new AbstractProcessor(tag) { + @Override + public IngestDocument execute(IngestDocument ingestDocument) { + throw new IllegalStateException(errorMessage); + } + + @Override + public String getType() { + return type; + } + }; + String description = "this is a place holder pipeline, because pipeline with id [" + id + "] could not be loaded"; + return new Pipeline(id, description, null, new CompoundProcessor(failureProcessor)); + } + + static ClusterState innerPut(PutPipelineRequest request, ClusterState currentState) { + IngestMetadata currentIngestMetadata = currentState.metaData().custom(IngestMetadata.TYPE); + Map pipelines; + if (currentIngestMetadata != null) { + pipelines = new HashMap<>(currentIngestMetadata.getPipelines()); + } else { + pipelines = new HashMap<>(); + } + + pipelines.put(request.getId(), new PipelineConfiguration(request.getId(), request.getSource(), request.getXContentType())); + ClusterState.Builder newState = ClusterState.builder(currentState); + newState.metaData(MetaData.builder(currentState.getMetaData()) + .putCustom(IngestMetadata.TYPE, new IngestMetadata(pipelines)) + .build()); + return newState.build(); + } + + void validatePipeline(Map ingestInfos, PutPipelineRequest request) throws Exception { + if (ingestInfos.isEmpty()) { + throw new IllegalStateException("Ingest info is empty"); + } + + Map pipelineConfig = XContentHelper.convertToMap(request.getSource(), false, request.getXContentType()).v2(); + Pipeline pipeline = Pipeline.create(request.getId(), pipelineConfig, processorFactories, scriptService); + List exceptions = new ArrayList<>(); + for (Processor processor : pipeline.flattenAllProcessors()) { + for (Map.Entry entry : ingestInfos.entrySet()) { + String type = processor.getType(); + if (entry.getValue().containsProcessor(type) == false && ConditionalProcessor.TYPE.equals(type) == false) { + String message = "Processor type [" + processor.getType() + "] is not installed on node [" + entry.getKey() + "]"; + exceptions.add( + ConfigurationUtils.newConfigurationException(processor.getType(), processor.getTag(), null, message) + ); + } + } + } + ExceptionsHelper.rethrowAndSuppress(exceptions); + } + + public void executeBulkRequest(Iterable> actionRequests, + BiConsumer itemFailureHandler, Consumer completionHandler, + Consumer itemDroppedHandler) { + threadPool.executor(ThreadPool.Names.WRITE).execute(new AbstractRunnable() { + + @Override + public void onFailure(Exception e) { + completionHandler.accept(e); + } + + @Override + protected void doRun() { + for (DocWriteRequest actionRequest : actionRequests) { + IndexRequest indexRequest = null; + if (actionRequest instanceof IndexRequest) { + indexRequest = (IndexRequest) actionRequest; + } else if (actionRequest instanceof UpdateRequest) { + UpdateRequest updateRequest = (UpdateRequest) actionRequest; + indexRequest = updateRequest.docAsUpsert() ? updateRequest.doc() : updateRequest.upsertRequest(); + } + if (indexRequest == null) { + continue; + } + String pipelineId = indexRequest.getPipeline(); + if (NOOP_PIPELINE_NAME.equals(pipelineId) == false) { + try { + Pipeline pipeline = pipelines.get(pipelineId); + if (pipeline == null) { + throw new IllegalArgumentException("pipeline with id [" + pipelineId + "] does not exist"); + } + innerExecute(indexRequest, pipeline, itemDroppedHandler); + //this shouldn't be needed here but we do it for consistency with index api + // which requires it to prevent double execution + indexRequest.setPipeline(NOOP_PIPELINE_NAME); + } catch (Exception e) { + itemFailureHandler.accept(indexRequest, e); + } + } + } + completionHandler.accept(null); + } + }); + } + + public IngestStats stats() { + Map statsHolderPerPipeline = this.statsHolderPerPipeline; + + Map statsPerPipeline = new HashMap<>(statsHolderPerPipeline.size()); + for (Map.Entry entry : statsHolderPerPipeline.entrySet()) { + statsPerPipeline.put(entry.getKey(), entry.getValue().createStats()); + } + + return new IngestStats(totalStats.createStats(), statsPerPipeline); + } + + void updatePipelineStats(IngestMetadata ingestMetadata) { + boolean changed = false; + Map newStatsPerPipeline = new HashMap<>(statsHolderPerPipeline); + Iterator iterator = newStatsPerPipeline.keySet().iterator(); + while (iterator.hasNext()) { + String pipeline = iterator.next(); + if (ingestMetadata.getPipelines().containsKey(pipeline) == false) { + iterator.remove(); + changed = true; + } + } + for (String pipeline : ingestMetadata.getPipelines().keySet()) { + if (newStatsPerPipeline.containsKey(pipeline) == false) { + newStatsPerPipeline.put(pipeline, new StatsHolder()); + changed = true; + } + } + + if (changed) { + statsHolderPerPipeline = Collections.unmodifiableMap(newStatsPerPipeline); + } + } + + private void innerExecute(IndexRequest indexRequest, Pipeline pipeline, Consumer itemDroppedHandler) throws Exception { + if (pipeline.getProcessors().isEmpty()) { + return; + } + + long startTimeInNanos = System.nanoTime(); + // the pipeline specific stat holder may not exist and that is fine: + // (e.g. the pipeline may have been removed while we're ingesting a document + Optional pipelineStats = Optional.ofNullable(statsHolderPerPipeline.get(pipeline.getId())); + try { + totalStats.preIngest(); + pipelineStats.ifPresent(StatsHolder::preIngest); + String index = indexRequest.index(); + String type = indexRequest.type(); + String id = indexRequest.id(); + String routing = indexRequest.routing(); + Long version = indexRequest.version(); + VersionType versionType = indexRequest.versionType(); + Map sourceAsMap = indexRequest.sourceAsMap(); + IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, version, versionType, sourceAsMap); + if (pipeline.execute(ingestDocument) == null) { + itemDroppedHandler.accept(indexRequest); + } else { + Map metadataMap = ingestDocument.extractMetadata(); + //it's fine to set all metadata fields all the time, as ingest document holds their starting values + //before ingestion, which might also get modified during ingestion. + indexRequest.index((String) metadataMap.get(IngestDocument.MetaData.INDEX)); + indexRequest.type((String) metadataMap.get(IngestDocument.MetaData.TYPE)); + indexRequest.id((String) metadataMap.get(IngestDocument.MetaData.ID)); + indexRequest.routing((String) metadataMap.get(IngestDocument.MetaData.ROUTING)); + indexRequest.version(((Number) metadataMap.get(IngestDocument.MetaData.VERSION)).longValue()); + if (metadataMap.get(IngestDocument.MetaData.VERSION_TYPE) != null) { + indexRequest.versionType(VersionType.fromString((String) metadataMap.get(IngestDocument.MetaData.VERSION_TYPE))); + } + indexRequest.source(ingestDocument.getSourceAndMetadata()); + } + } catch (Exception e) { + totalStats.ingestFailed(); + pipelineStats.ifPresent(StatsHolder::ingestFailed); + throw e; + } finally { + long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeInNanos); + totalStats.postIngest(ingestTimeInMillis); + pipelineStats.ifPresent(statsHolder -> statsHolder.postIngest(ingestTimeInMillis)); + } + } + + private void innerUpdatePipelines(ClusterState previousState, ClusterState state) { + if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { + return; + } + + IngestMetadata ingestMetadata = state.getMetaData().custom(IngestMetadata.TYPE); + IngestMetadata previousIngestMetadata = previousState.getMetaData().custom(IngestMetadata.TYPE); + if (Objects.equals(ingestMetadata, previousIngestMetadata)) { + return; + } + + Map pipelines = new HashMap<>(); + List exceptions = new ArrayList<>(); + for (PipelineConfiguration pipeline : ingestMetadata.getPipelines().values()) { + try { + pipelines.put( + pipeline.getId(), + Pipeline.create(pipeline.getId(), pipeline.getConfigAsMap(), processorFactories, scriptService) + ); + } catch (ElasticsearchParseException e) { + pipelines.put(pipeline.getId(), substitutePipeline(pipeline.getId(), e)); + exceptions.add(e); + } catch (Exception e) { + ElasticsearchParseException parseException = new ElasticsearchParseException( + "Error updating pipeline with id [" + pipeline.getId() + "]", e); + pipelines.put(pipeline.getId(), substitutePipeline(pipeline.getId(), parseException)); + exceptions.add(parseException); + } + } + this.pipelines = Collections.unmodifiableMap(pipelines); + ExceptionsHelper.rethrowAndSuppress(exceptions); + } + + private static class StatsHolder { + + private final MeanMetric ingestMetric = new MeanMetric(); + private final CounterMetric ingestCurrent = new CounterMetric(); + private final CounterMetric ingestFailed = new CounterMetric(); + + void preIngest() { + ingestCurrent.inc(); + } + + void postIngest(long ingestTimeInMillis) { + ingestCurrent.dec(); + ingestMetric.inc(ingestTimeInMillis); + } + + void ingestFailed() { + ingestFailed.inc(); + } + + IngestStats.Stats createStats() { + return new IngestStats.Stats(ingestMetric.count(), ingestMetric.sum(), ingestCurrent.count(), ingestFailed.count()); + } + } } diff --git a/server/src/main/java/org/elasticsearch/ingest/Pipeline.java b/server/src/main/java/org/elasticsearch/ingest/Pipeline.java index 1b0553a5490..9f13cb1280a 100644 --- a/server/src/main/java/org/elasticsearch/ingest/Pipeline.java +++ b/server/src/main/java/org/elasticsearch/ingest/Pipeline.java @@ -26,6 +26,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; +import org.elasticsearch.script.ScriptService; /** * A pipeline is a list of {@link Processor} instances grouped under a unique id. @@ -51,11 +52,33 @@ public final class Pipeline { this.version = version; } + public static Pipeline create(String id, Map config, + Map processorFactories, ScriptService scriptService) throws Exception { + String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY); + Integer version = ConfigurationUtils.readIntProperty(null, null, config, VERSION_KEY, null); + List> processorConfigs = ConfigurationUtils.readList(null, null, config, PROCESSORS_KEY); + List processors = ConfigurationUtils.readProcessorConfigs(processorConfigs, scriptService, processorFactories); + List> onFailureProcessorConfigs = + ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY); + List onFailureProcessors = + ConfigurationUtils.readProcessorConfigs(onFailureProcessorConfigs, scriptService, processorFactories); + if (config.isEmpty() == false) { + throw new ElasticsearchParseException("pipeline [" + id + + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray())); + } + if (onFailureProcessorConfigs != null && onFailureProcessors.isEmpty()) { + throw new ElasticsearchParseException("pipeline [" + id + "] cannot have an empty on_failure option defined"); + } + CompoundProcessor compoundProcessor = new CompoundProcessor(false, Collections.unmodifiableList(processors), + Collections.unmodifiableList(onFailureProcessors)); + return new Pipeline(id, description, version, compoundProcessor); + } + /** * Modifies the data of a document to be indexed based on the processor this pipeline holds */ - public void execute(IngestDocument ingestDocument) throws Exception { - compoundProcessor.execute(ingestDocument); + public IngestDocument execute(IngestDocument ingestDocument) throws Exception { + return compoundProcessor.execute(ingestDocument); } /** @@ -113,27 +136,4 @@ public final class Pipeline { return compoundProcessor.flattenProcessors(); } - public static final class Factory { - - public Pipeline create(String id, Map config, Map processorFactories) throws Exception { - String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY); - Integer version = ConfigurationUtils.readIntProperty(null, null, config, VERSION_KEY, null); - List> processorConfigs = ConfigurationUtils.readList(null, null, config, PROCESSORS_KEY); - List processors = ConfigurationUtils.readProcessorConfigs(processorConfigs, processorFactories); - List> onFailureProcessorConfigs = - ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY); - List onFailureProcessors = ConfigurationUtils.readProcessorConfigs(onFailureProcessorConfigs, processorFactories); - if (config.isEmpty() == false) { - throw new ElasticsearchParseException("pipeline [" + id + - "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray())); - } - if (onFailureProcessorConfigs != null && onFailureProcessors.isEmpty()) { - throw new ElasticsearchParseException("pipeline [" + id + "] cannot have an empty on_failure option defined"); - } - CompoundProcessor compoundProcessor = new CompoundProcessor(false, Collections.unmodifiableList(processors), - Collections.unmodifiableList(onFailureProcessors)); - return new Pipeline(id, description, version, compoundProcessor); - } - - } } diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java index a2aa8e385e3..6778f3d1eaa 100644 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java +++ b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java @@ -19,7 +19,6 @@ package org.elasticsearch.ingest; -import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; import org.elasticsearch.common.ParseField; @@ -117,13 +116,7 @@ public final class PipelineConfiguration extends AbstractDiffable readDiffFrom(StreamInput in) throws IOException { @@ -134,9 +127,7 @@ public final class PipelineConfiguration extends AbstractDiffable statsHolderPerPipeline = Collections.emptyMap(); - - public PipelineExecutionService(PipelineStore store, ThreadPool threadPool) { - this.store = store; - this.threadPool = threadPool; - } - - public void executeBulkRequest(Iterable> actionRequests, - BiConsumer itemFailureHandler, - Consumer completionHandler) { - threadPool.executor(ThreadPool.Names.WRITE).execute(new AbstractRunnable() { - - @Override - public void onFailure(Exception e) { - completionHandler.accept(e); - } - - @Override - protected void doRun() throws Exception { - for (DocWriteRequest actionRequest : actionRequests) { - IndexRequest indexRequest = null; - if (actionRequest instanceof IndexRequest) { - indexRequest = (IndexRequest) actionRequest; - } else if (actionRequest instanceof UpdateRequest) { - UpdateRequest updateRequest = (UpdateRequest) actionRequest; - indexRequest = updateRequest.docAsUpsert() ? updateRequest.doc() : updateRequest.upsertRequest(); - } - if (indexRequest == null) { - continue; - } - String pipeline = indexRequest.getPipeline(); - if (IngestService.NOOP_PIPELINE_NAME.equals(pipeline) == false) { - try { - innerExecute(indexRequest, getPipeline(indexRequest.getPipeline())); - //this shouldn't be needed here but we do it for consistency with index api - // which requires it to prevent double execution - indexRequest.setPipeline(IngestService.NOOP_PIPELINE_NAME); - } catch (Exception e) { - itemFailureHandler.accept(indexRequest, e); - } - } - } - completionHandler.accept(null); - } - }); - } - - public IngestStats stats() { - Map statsHolderPerPipeline = this.statsHolderPerPipeline; - - Map statsPerPipeline = new HashMap<>(statsHolderPerPipeline.size()); - for (Map.Entry entry : statsHolderPerPipeline.entrySet()) { - statsPerPipeline.put(entry.getKey(), entry.getValue().createStats()); - } - - return new IngestStats(totalStats.createStats(), statsPerPipeline); - } - - @Override - public void applyClusterState(ClusterChangedEvent event) { - IngestMetadata ingestMetadata = event.state().getMetaData().custom(IngestMetadata.TYPE); - if (ingestMetadata != null) { - updatePipelineStats(ingestMetadata); - } - } - - void updatePipelineStats(IngestMetadata ingestMetadata) { - boolean changed = false; - Map newStatsPerPipeline = new HashMap<>(statsHolderPerPipeline); - Iterator iterator = newStatsPerPipeline.keySet().iterator(); - while (iterator.hasNext()) { - String pipeline = iterator.next(); - if (ingestMetadata.getPipelines().containsKey(pipeline) == false) { - iterator.remove(); - changed = true; - } - } - for (String pipeline : ingestMetadata.getPipelines().keySet()) { - if (newStatsPerPipeline.containsKey(pipeline) == false) { - newStatsPerPipeline.put(pipeline, new StatsHolder()); - changed = true; - } - } - - if (changed) { - statsHolderPerPipeline = Collections.unmodifiableMap(newStatsPerPipeline); - } - } - - private void innerExecute(IndexRequest indexRequest, Pipeline pipeline) throws Exception { - if (pipeline.getProcessors().isEmpty()) { - return; - } - - long startTimeInNanos = System.nanoTime(); - // the pipeline specific stat holder may not exist and that is fine: - // (e.g. the pipeline may have been removed while we're ingesting a document - Optional pipelineStats = Optional.ofNullable(statsHolderPerPipeline.get(pipeline.getId())); - try { - totalStats.preIngest(); - pipelineStats.ifPresent(StatsHolder::preIngest); - String index = indexRequest.index(); - String type = indexRequest.type(); - String id = indexRequest.id(); - String routing = indexRequest.routing(); - Long version = indexRequest.version(); - VersionType versionType = indexRequest.versionType(); - Map sourceAsMap = indexRequest.sourceAsMap(); - IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, version, versionType, sourceAsMap); - pipeline.execute(ingestDocument); - - Map metadataMap = ingestDocument.extractMetadata(); - //it's fine to set all metadata fields all the time, as ingest document holds their starting values - //before ingestion, which might also get modified during ingestion. - indexRequest.index((String) metadataMap.get(IngestDocument.MetaData.INDEX)); - indexRequest.type((String) metadataMap.get(IngestDocument.MetaData.TYPE)); - indexRequest.id((String) metadataMap.get(IngestDocument.MetaData.ID)); - indexRequest.routing((String) metadataMap.get(IngestDocument.MetaData.ROUTING)); - indexRequest.version(((Number) metadataMap.get(IngestDocument.MetaData.VERSION)).longValue()); - if (metadataMap.get(IngestDocument.MetaData.VERSION_TYPE) != null) { - indexRequest.versionType(VersionType.fromString((String) metadataMap.get(IngestDocument.MetaData.VERSION_TYPE))); - } - indexRequest.source(ingestDocument.getSourceAndMetadata()); - } catch (Exception e) { - totalStats.ingestFailed(); - pipelineStats.ifPresent(StatsHolder::ingestFailed); - throw e; - } finally { - long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeInNanos); - totalStats.postIngest(ingestTimeInMillis); - pipelineStats.ifPresent(statsHolder -> statsHolder.postIngest(ingestTimeInMillis)); - } - } - - private Pipeline getPipeline(String pipelineId) { - Pipeline pipeline = store.get(pipelineId); - if (pipeline == null) { - throw new IllegalArgumentException("pipeline with id [" + pipelineId + "] does not exist"); - } - return pipeline; - } - - static class StatsHolder { - - private final MeanMetric ingestMetric = new MeanMetric(); - private final CounterMetric ingestCurrent = new CounterMetric(); - private final CounterMetric ingestFailed = new CounterMetric(); - - void preIngest() { - ingestCurrent.inc(); - } - - void postIngest(long ingestTimeInMillis) { - ingestCurrent.dec(); - ingestMetric.inc(ingestTimeInMillis); - } - - void ingestFailed() { - ingestFailed.inc(); - } - - IngestStats.Stats createStats() { - return new IngestStats.Stats(ingestMetric.count(), ingestMetric.sum(), ingestCurrent.count(), ingestFailed.count()); - } - - } - -} diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineStore.java b/server/src/main/java/org/elasticsearch/ingest/PipelineStore.java deleted file mode 100644 index 9fceaf1a9a5..00000000000 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineStore.java +++ /dev/null @@ -1,275 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.ingest; - -import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ingest.DeletePipelineRequest; -import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.cluster.AckedClusterStateUpdateTask; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateApplier; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.gateway.GatewayService; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; - -public class PipelineStore extends AbstractComponent implements ClusterStateApplier { - - private final Pipeline.Factory factory = new Pipeline.Factory(); - private final Map processorFactories; - - // Ideally this should be in IngestMetadata class, but we don't have the processor factories around there. - // We know of all the processor factories when a node with all its plugin have been initialized. Also some - // processor factories rely on other node services. Custom metadata is statically registered when classes - // are loaded, so in the cluster state we just save the pipeline config and here we keep the actual pipelines around. - volatile Map pipelines = new HashMap<>(); - - public PipelineStore(Settings settings, Map processorFactories) { - super(settings); - this.processorFactories = processorFactories; - } - - @Override - public void applyClusterState(ClusterChangedEvent event) { - innerUpdatePipelines(event.previousState(), event.state()); - } - - void innerUpdatePipelines(ClusterState previousState, ClusterState state) { - if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { - return; - } - - IngestMetadata ingestMetadata = state.getMetaData().custom(IngestMetadata.TYPE); - IngestMetadata previousIngestMetadata = previousState.getMetaData().custom(IngestMetadata.TYPE); - if (Objects.equals(ingestMetadata, previousIngestMetadata)) { - return; - } - - Map pipelines = new HashMap<>(); - List exceptions = new ArrayList<>(); - for (PipelineConfiguration pipeline : ingestMetadata.getPipelines().values()) { - try { - pipelines.put(pipeline.getId(), factory.create(pipeline.getId(), pipeline.getConfigAsMap(), processorFactories)); - } catch (ElasticsearchParseException e) { - pipelines.put(pipeline.getId(), substitutePipeline(pipeline.getId(), e)); - exceptions.add(e); - } catch (Exception e) { - ElasticsearchParseException parseException = new ElasticsearchParseException( - "Error updating pipeline with id [" + pipeline.getId() + "]", e); - pipelines.put(pipeline.getId(), substitutePipeline(pipeline.getId(), parseException)); - exceptions.add(parseException); - } - } - this.pipelines = Collections.unmodifiableMap(pipelines); - ExceptionsHelper.rethrowAndSuppress(exceptions); - } - - private Pipeline substitutePipeline(String id, ElasticsearchParseException e) { - String tag = e.getHeaderKeys().contains("processor_tag") ? e.getHeader("processor_tag").get(0) : null; - String type = e.getHeaderKeys().contains("processor_type") ? e.getHeader("processor_type").get(0) : "unknown"; - String errorMessage = "pipeline with id [" + id + "] could not be loaded, caused by [" + e.getDetailedMessage() + "]"; - Processor failureProcessor = new AbstractProcessor(tag) { - @Override - public void execute(IngestDocument ingestDocument) { - throw new IllegalStateException(errorMessage); - } - - @Override - public String getType() { - return type; - } - }; - String description = "this is a place holder pipeline, because pipeline with id [" + id + "] could not be loaded"; - return new Pipeline(id, description, null, new CompoundProcessor(failureProcessor)); - } - - /** - * Deletes the pipeline specified by id in the request. - */ - public void delete(ClusterService clusterService, DeletePipelineRequest request, ActionListener listener) { - clusterService.submitStateUpdateTask("delete-pipeline-" + request.getId(), - new AckedClusterStateUpdateTask(request, listener) { - - @Override - protected AcknowledgedResponse newResponse(boolean acknowledged) { - return new AcknowledgedResponse(acknowledged); - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - return innerDelete(request, currentState); - } - }); - } - - ClusterState innerDelete(DeletePipelineRequest request, ClusterState currentState) { - IngestMetadata currentIngestMetadata = currentState.metaData().custom(IngestMetadata.TYPE); - if (currentIngestMetadata == null) { - return currentState; - } - Map pipelines = currentIngestMetadata.getPipelines(); - Set toRemove = new HashSet<>(); - for (String pipelineKey : pipelines.keySet()) { - if (Regex.simpleMatch(request.getId(), pipelineKey)) { - toRemove.add(pipelineKey); - } - } - if (toRemove.isEmpty() && Regex.isMatchAllPattern(request.getId()) == false) { - throw new ResourceNotFoundException("pipeline [{}] is missing", request.getId()); - } else if (toRemove.isEmpty()) { - return currentState; - } - final Map pipelinesCopy = new HashMap<>(pipelines); - for (String key : toRemove) { - pipelinesCopy.remove(key); - } - ClusterState.Builder newState = ClusterState.builder(currentState); - newState.metaData(MetaData.builder(currentState.getMetaData()) - .putCustom(IngestMetadata.TYPE, new IngestMetadata(pipelinesCopy)) - .build()); - return newState.build(); - } - - /** - * Stores the specified pipeline definition in the request. - */ - public void put(ClusterService clusterService, Map ingestInfos, PutPipelineRequest request, - ActionListener listener) throws Exception { - // validates the pipeline and processor configuration before submitting a cluster update task: - validatePipeline(ingestInfos, request); - clusterService.submitStateUpdateTask("put-pipeline-" + request.getId(), - new AckedClusterStateUpdateTask(request, listener) { - - @Override - protected AcknowledgedResponse newResponse(boolean acknowledged) { - return new AcknowledgedResponse(acknowledged); - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - return innerPut(request, currentState); - } - }); - } - - void validatePipeline(Map ingestInfos, PutPipelineRequest request) throws Exception { - if (ingestInfos.isEmpty()) { - throw new IllegalStateException("Ingest info is empty"); - } - - Map pipelineConfig = XContentHelper.convertToMap(request.getSource(), false, request.getXContentType()).v2(); - Pipeline pipeline = factory.create(request.getId(), pipelineConfig, processorFactories); - List exceptions = new ArrayList<>(); - for (Processor processor : pipeline.flattenAllProcessors()) { - for (Map.Entry entry : ingestInfos.entrySet()) { - if (entry.getValue().containsProcessor(processor.getType()) == false) { - String message = "Processor type [" + processor.getType() + "] is not installed on node [" + entry.getKey() + "]"; - exceptions.add(ConfigurationUtils.newConfigurationException(processor.getType(), processor.getTag(), null, message)); - } - } - } - ExceptionsHelper.rethrowAndSuppress(exceptions); - } - - ClusterState innerPut(PutPipelineRequest request, ClusterState currentState) { - IngestMetadata currentIngestMetadata = currentState.metaData().custom(IngestMetadata.TYPE); - Map pipelines; - if (currentIngestMetadata != null) { - pipelines = new HashMap<>(currentIngestMetadata.getPipelines()); - } else { - pipelines = new HashMap<>(); - } - - pipelines.put(request.getId(), new PipelineConfiguration(request.getId(), request.getSource(), request.getXContentType())); - ClusterState.Builder newState = ClusterState.builder(currentState); - newState.metaData(MetaData.builder(currentState.getMetaData()) - .putCustom(IngestMetadata.TYPE, new IngestMetadata(pipelines)) - .build()); - return newState.build(); - } - - /** - * Returns the pipeline by the specified id - */ - public Pipeline get(String id) { - return pipelines.get(id); - } - - public Map getProcessorFactories() { - return processorFactories; - } - - /** - * @return pipeline configuration specified by id. If multiple ids or wildcards are specified multiple pipelines - * may be returned - */ - // Returning PipelineConfiguration instead of Pipeline, because Pipeline and Processor interface don't - // know how to serialize themselves. - public List getPipelines(ClusterState clusterState, String... ids) { - IngestMetadata ingestMetadata = clusterState.getMetaData().custom(IngestMetadata.TYPE); - return innerGetPipelines(ingestMetadata, ids); - } - - List innerGetPipelines(IngestMetadata ingestMetadata, String... ids) { - if (ingestMetadata == null) { - return Collections.emptyList(); - } - - // if we didn't ask for _any_ ID, then we get them all (this is the same as if they ask for '*') - if (ids.length == 0) { - return new ArrayList<>(ingestMetadata.getPipelines().values()); - } - - List result = new ArrayList<>(ids.length); - for (String id : ids) { - if (Regex.isSimpleMatchPattern(id)) { - for (Map.Entry entry : ingestMetadata.getPipelines().entrySet()) { - if (Regex.simpleMatch(id, entry.getKey())) { - result.add(entry.getValue()); - } - } - } else { - PipelineConfiguration pipeline = ingestMetadata.getPipelines().get(id); - if (pipeline != null) { - result.add(pipeline); - } - } - } - return result; - } -} diff --git a/server/src/main/java/org/elasticsearch/ingest/Processor.java b/server/src/main/java/org/elasticsearch/ingest/Processor.java index c318d478814..498ec3a7710 100644 --- a/server/src/main/java/org/elasticsearch/ingest/Processor.java +++ b/server/src/main/java/org/elasticsearch/ingest/Processor.java @@ -40,7 +40,7 @@ public interface Processor { /** * Introspect and potentially modify the incoming data. */ - void execute(IngestDocument ingestDocument) throws Exception; + IngestDocument execute(IngestDocument ingestDocument) throws Exception; /** * Gets the type of a processor @@ -97,22 +97,26 @@ public interface Processor { * instances that have run prior to in ingest. */ public final ThreadContext threadContext; - + public final LongSupplier relativeTimeSupplier; - + + public final IngestService ingestService; + /** * Provides scheduler support */ public final BiFunction> scheduler; public Parameters(Environment env, ScriptService scriptService, AnalysisRegistry analysisRegistry, ThreadContext threadContext, - LongSupplier relativeTimeSupplier, BiFunction> scheduler) { + LongSupplier relativeTimeSupplier, BiFunction> scheduler, + IngestService ingestService) { this.env = env; this.scriptService = scriptService; this.threadContext = threadContext; this.analysisRegistry = analysisRegistry; this.relativeTimeSupplier = relativeTimeSupplier; this.scheduler = scheduler; + this.ingestService = ingestService; } } diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java b/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java index 637f4cf1cbe..3bdfe95f1e2 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java @@ -52,11 +52,7 @@ public class OsStats implements Writeable, ToXContentFragment { this.cpu = new Cpu(in); this.mem = new Mem(in); this.swap = new Swap(in); - if (in.getVersion().onOrAfter(Version.V_5_1_1)) { - this.cgroup = in.readOptionalWriteable(Cgroup::new); - } else { - this.cgroup = null; - } + this.cgroup = in.readOptionalWriteable(Cgroup::new); } @Override @@ -65,9 +61,7 @@ public class OsStats implements Writeable, ToXContentFragment { cpu.writeTo(out); mem.writeTo(out); swap.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_5_1_1)) { - out.writeOptionalWriteable(cgroup); - } + out.writeOptionalWriteable(cgroup); } public long getTimestamp() { diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index c65488bd08e..67c3894ddf4 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -45,9 +45,11 @@ import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.InternalClusterInfoService; import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; +import org.elasticsearch.cluster.metadata.AliasValidator; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService; import org.elasticsearch.cluster.metadata.TemplateUpgradeService; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -71,6 +73,7 @@ import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.SettingUpgrader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.transport.BoundTransportAddress; @@ -148,6 +151,8 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.usage.UsageService; import org.elasticsearch.watcher.ResourceWatcherService; +import javax.net.ssl.SNIHostName; + import java.io.BufferedWriter; import java.io.Closeable; import java.io.IOException; @@ -180,7 +185,7 @@ import static java.util.stream.Collectors.toList; * A node represent a node within a cluster ({@code cluster.name}). The {@link #client()} can be used * in order to use a {@link Client} to perform actions/operations against the cluster. */ -public class Node implements Closeable { +public abstract class Node implements Closeable { public static final Setting WRITE_PORTS_FILE_SETTING = @@ -207,6 +212,13 @@ public class Node implements Closeable { throw new IllegalArgumentException(key + " cannot have leading or trailing whitespace " + "[" + value + "]"); } + if (value.length() > 0 && "node.attr.server_name".equals(key)) { + try { + new SNIHostName(value); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("invalid node.attr.server_name [" + value + "]", e ); + } + } return value; }, Property.NodeScope)); public static final Setting BREAKER_TYPE_KEY = new Setting<>("indices.breaker.type", "hierarchy", (s) -> { @@ -219,17 +231,6 @@ public class Node implements Closeable { } }, Setting.Property.NodeScope); - /** - * Adds a default node name to the given setting, if it doesn't already exist - * @return the given setting if node name is already set, or a new copy with a default node name set. - */ - public static final Settings addNodeNameIfNeeded(Settings settings, final String nodeId) { - if (NODE_NAME_SETTING.exists(settings)) { - return settings; - } - return Settings.builder().put(settings).put(NODE_NAME_SETTING.getKey(), nodeId.substring(0, 7)).build(); - } - private static final String CLIENT_TYPE = "node"; private final Lifecycle lifecycle = new Lifecycle(); @@ -260,10 +261,19 @@ public class Node implements Closeable { } public Node(Environment environment) { - this(environment, Collections.emptyList()); + this(environment, Collections.emptyList(), true); } - protected Node(final Environment environment, Collection> classpathPlugins) { + /** + * Constructs a node + * + * @param environment the environment for this node + * @param classpathPlugins the plugins to be loaded from the classpath + * @param forbidPrivateIndexSettings whether or not private index settings are forbidden when creating an index; this is used in the + * test framework for tests that rely on being able to set private settings + */ + protected Node( + final Environment environment, Collection> classpathPlugins, boolean forbidPrivateIndexSettings) { logger = Loggers.getLogger(Node.class); final List resourcesToClose = new ArrayList<>(); // register everything we need to release in the case of an error boolean success = false; @@ -272,24 +282,34 @@ public class Node implements Closeable { Settings tmpSettings = Settings.builder().put(environment.settings()) .put(Client.CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE).build(); - // create the node environment as soon as possible, to recover the node id and enable logging + /* + * Create the node environment as soon as possible so we can + * recover the node id which we might have to use to derive the + * node name. And it is important to get *that* as soon as possible + * so that log lines can contain it. + */ + boolean nodeNameExplicitlyDefined = NODE_NAME_SETTING.exists(tmpSettings); try { - nodeEnvironment = new NodeEnvironment(tmpSettings, environment); + Consumer nodeIdConsumer = nodeNameExplicitlyDefined ? + nodeId -> {} : nodeId -> registerDerivedNodeNameWithLogger(nodeIdToNodeName(nodeId)); + nodeEnvironment = new NodeEnvironment(tmpSettings, environment, nodeIdConsumer); resourcesToClose.add(nodeEnvironment); } catch (IOException ex) { throw new IllegalStateException("Failed to create node environment", ex); } - final boolean hadPredefinedNodeName = NODE_NAME_SETTING.exists(tmpSettings); - final String nodeId = nodeEnvironment.nodeId(); - tmpSettings = addNodeNameIfNeeded(tmpSettings, nodeId); - // this must be captured after the node name is possibly added to the settings - final String nodeName = NODE_NAME_SETTING.get(tmpSettings); - if (hadPredefinedNodeName == false) { - logger.info("node name derived from node ID [{}]; set [{}] to override", nodeId, NODE_NAME_SETTING.getKey()); + if (nodeNameExplicitlyDefined) { + logger.info("node name [{}], node ID [{}]", + NODE_NAME_SETTING.get(tmpSettings), nodeEnvironment.nodeId()); } else { - logger.info("node name [{}], node ID [{}]", nodeName, nodeId); + tmpSettings = Settings.builder() + .put(tmpSettings) + .put(NODE_NAME_SETTING.getKey(), nodeIdToNodeName(nodeEnvironment.nodeId())) + .build(); + logger.info("node name derived from node ID [{}]; set [{}] to override", + nodeEnvironment.nodeId(), NODE_NAME_SETTING.getKey()); } + final JvmInfo jvmInfo = JvmInfo.jvmInfo(); logger.info( "version[{}], pid[{}], build[{}/{}/{}/{}], OS[{}/{}/{}], JVM[{}/{}/{}/{}]", @@ -342,7 +362,15 @@ public class Node implements Closeable { AnalysisModule analysisModule = new AnalysisModule(this.environment, pluginsService.filterPlugins(AnalysisPlugin.class)); // this is as early as we can validate settings at this point. we already pass them to ScriptModule as well as ThreadPool // so we might be late here already - final SettingsModule settingsModule = new SettingsModule(this.settings, additionalSettings, additionalSettingsFilter); + + final Set> settingsUpgraders = pluginsService.filterPlugins(Plugin.class) + .stream() + .map(Plugin::getSettingUpgraders) + .flatMap(List::stream) + .collect(Collectors.toSet()); + + final SettingsModule settingsModule = + new SettingsModule(this.settings, additionalSettings, additionalSettingsFilter, settingsUpgraders); scriptModule.registerClusterSettingsListeners(settingsModule.getClusterSettings()); resourcesToClose.add(resourceWatcherService); final NetworkService networkService = new NetworkService( @@ -352,7 +380,7 @@ public class Node implements Closeable { final ClusterService clusterService = new ClusterService(settings, settingsModule.getClusterSettings(), threadPool); clusterService.addStateApplier(scriptModule.getScriptService()); resourcesToClose.add(clusterService); - final IngestService ingestService = new IngestService(settings, threadPool, this.environment, + final IngestService ingestService = new IngestService(clusterService, threadPool, this.environment, scriptModule.getScriptService(), analysisModule.getAnalysisRegistry(), pluginsService.filterPlugins(IngestPlugin.class)); final DiskThresholdMonitor listener = new DiskThresholdMonitor(settings, clusterService::state, clusterService.getClusterSettings(), client); @@ -424,6 +452,19 @@ public class Node implements Closeable { threadPool, settingsModule.getIndexScopedSettings(), circuitBreakerService, bigArrays, scriptModule.getScriptService(), client, metaStateService, engineFactoryProviders, indexStoreFactories); + final AliasValidator aliasValidator = new AliasValidator(settings); + + final MetaDataCreateIndexService metaDataCreateIndexService = new MetaDataCreateIndexService( + settings, + clusterService, + indicesService, + clusterModule.getAllocationService(), + aliasValidator, + environment, + settingsModule.getIndexScopedSettings(), + threadPool, + xContentRegistry, + forbidPrivateIndexSettings); Collection pluginComponents = pluginsService.filterPlugins(Plugin.class).stream() .flatMap(p -> p.createComponents(client, clusterService, threadPool, resourceWatcherService, @@ -471,7 +512,7 @@ public class Node implements Closeable { final DiscoveryModule discoveryModule = new DiscoveryModule(this.settings, threadPool, transportService, namedWriteableRegistry, networkService, clusterService.getMasterService(), clusterService.getClusterApplierService(), clusterService.getClusterSettings(), pluginsService.filterPlugins(DiscoveryPlugin.class), - clusterModule.getAllocationService()); + clusterModule.getAllocationService(), environment.configFile()); this.nodeService = new NodeService(settings, threadPool, monitorService, discoveryModule.getDiscovery(), transportService, indicesService, pluginsService, circuitBreakerService, scriptModule.getScriptService(), httpServerTransport, ingestService, clusterService, settingsModule.getSettingsFilter(), responseCollectorService, @@ -513,6 +554,8 @@ public class Node implements Closeable { b.bind(MetaDataUpgrader.class).toInstance(metaDataUpgrader); b.bind(MetaStateService.class).toInstance(metaStateService); b.bind(IndicesService.class).toInstance(indicesService); + b.bind(AliasValidator.class).toInstance(aliasValidator); + b.bind(MetaDataCreateIndexService.class).toInstance(metaDataCreateIndexService); b.bind(SearchService.class).toInstance(searchService); b.bind(SearchTransportService.class).toInstance(searchTransportService); b.bind(SearchPhaseController.class).toInstance(new SearchPhaseController(settings, @@ -975,6 +1018,18 @@ public class Node implements Closeable { return networkModule.getHttpServerTransportSupplier().get(); } + /** + * If the node name was derived from the node id this is called with the + * node name as soon as it is available so that we can register the + * node name with the logger. If the node name defined in elasticsearch.yml + * this is never called. + */ + protected abstract void registerDerivedNodeNameWithLogger(String nodeName); + + private String nodeIdToNodeName(String nodeId) { + return nodeId.substring(0, 7); + } + private static class LocalNodeFactory implements Function { private final SetOnce localNode = new SetOnce<>(); private final String persistentNodeId; diff --git a/server/src/main/java/org/elasticsearch/node/NodeService.java b/server/src/main/java/org/elasticsearch/node/NodeService.java index 0e19b5a6502..207886c5cf2 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeService.java +++ b/server/src/main/java/org/elasticsearch/node/NodeService.java @@ -37,7 +37,6 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.ingest.IngestService; import org.elasticsearch.monitor.MonitorService; -import org.elasticsearch.node.ResponseCollectorService; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.ThreadPool; @@ -83,8 +82,7 @@ public class NodeService extends AbstractComponent implements Closeable { this.scriptService = scriptService; this.responseCollectorService = responseCollectorService; this.searchTransportService = searchTransportService; - clusterService.addStateApplier(ingestService.getPipelineStore()); - clusterService.addStateApplier(ingestService.getPipelineExecutionService()); + clusterService.addStateApplier(ingestService); } public NodeInfo info(boolean settings, boolean os, boolean process, boolean jvm, boolean threadPool, @@ -120,7 +118,7 @@ public class NodeService extends AbstractComponent implements Closeable { circuitBreaker ? circuitBreakerService.stats() : null, script ? scriptService.stats() : null, discoveryStats ? discovery.stats() : null, - ingest ? ingestService.getPipelineExecutionService().stats() : null, + ingest ? ingestService.stats() : null, adaptiveSelection ? responseCollectorService.getAdaptiveStats(searchTransportService.getPendingSearchRequests()) : null ); } diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java index f81b7c770e5..b7a179e41e3 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java @@ -188,7 +188,7 @@ public final class PersistentTasksCustomMetaData extends AbstractNamedDiffable getSettingsFilter() { return Collections.emptyList(); } + /** + * Get the setting upgraders provided by this plugin. + * + * @return the settings upgraders + */ + public List> getSettingUpgraders() { + return Collections.emptyList(); + } + /** * Provides a function to modify global custom meta data on startup. *

    diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginInfo.java b/server/src/main/java/org/elasticsearch/plugins/PluginInfo.java index 74a911b0ae4..d211efef517 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginInfo.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginInfo.java @@ -107,11 +107,7 @@ public class PluginInfo implements Writeable, ToXContentObject { } else { extendedPlugins = Collections.emptyList(); } - if (in.getVersion().onOrAfter(Version.V_5_4_0)) { - hasNativeController = in.readBoolean(); - } else { - hasNativeController = false; - } + hasNativeController = in.readBoolean(); if (in.getVersion().onOrAfter(Version.V_6_0_0_beta2) && in.getVersion().before(Version.V_6_3_0)) { /* * Elasticsearch versions in [6.0.0-beta2, 6.3.0) allowed plugins to specify that they require the keystore and this was @@ -134,9 +130,7 @@ public class PluginInfo implements Writeable, ToXContentObject { if (out.getVersion().onOrAfter(Version.V_6_2_0)) { out.writeStringList(extendedPlugins); } - if (out.getVersion().onOrAfter(Version.V_5_4_0)) { - out.writeBoolean(hasNativeController); - } + out.writeBoolean(hasNativeController); if (out.getVersion().onOrAfter(Version.V_6_0_0_beta2) && out.getVersion().before(Version.V_6_3_0)) { /* * Elasticsearch versions in [6.0.0-beta2, 6.3.0) allowed plugins to specify that they require the keystore and this was diff --git a/server/src/main/java/org/elasticsearch/plugins/ScriptPlugin.java b/server/src/main/java/org/elasticsearch/plugins/ScriptPlugin.java index 88af291983a..384f98609a5 100644 --- a/server/src/main/java/org/elasticsearch/plugins/ScriptPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/ScriptPlugin.java @@ -44,7 +44,7 @@ public interface ScriptPlugin { /** * Return script contexts this plugin wants to allow using. */ - default List getContexts() { + default List> getContexts() { return Collections.emptyList(); } } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index cc1d27425e1..4c36cc5eed8 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -39,7 +39,6 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.Version; -import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; @@ -719,7 +718,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp protected void writeIndexGen(final RepositoryData repositoryData, final long repositoryStateId) throws IOException { assert isReadOnly() == false; // can not write to a read only repository final long currentGen = latestIndexBlobId(); - if (repositoryStateId != SnapshotsInProgress.UNDEFINED_REPOSITORY_STATE_ID && currentGen != repositoryStateId) { + if (currentGen != repositoryStateId) { // the index file was updated by a concurrent operation, so we were operating on stale // repository data throw new RepositoryException(metadata.name(), "concurrent modification of the index-N file, expected current generation [" + @@ -1338,7 +1337,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp } private void failStoreIfCorrupted(Exception e) { - if (e instanceof CorruptIndexException || e instanceof IndexFormatTooOldException || e instanceof IndexFormatTooNewException) { + if (Lucene.isCorruptionException(e)) { try { store.markStoreCorrupted((IOException) e); } catch (IOException inner) { @@ -1493,6 +1492,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp // empty shard would cause exceptions to be thrown. Since there is no data to restore from an empty // shard anyway, we just create the empty shard here and then exit. IndexWriter writer = new IndexWriter(store.directory(), new IndexWriterConfig(null) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setOpenMode(IndexWriterConfig.OpenMode.CREATE) .setCommitOnClose(true)); writer.close(); diff --git a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index 585713b641f..6b9432483f3 100644 --- a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -19,7 +19,7 @@ package org.elasticsearch.rest; -import org.apache.lucene.search.spell.LevensteinDistance; +import org.apache.lucene.search.spell.LevenshteinDistance; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.CheckedConsumer; @@ -110,7 +110,7 @@ public abstract class BaseRestHandler extends AbstractComponent implements RestH invalids.size() > 1 ? "s" : "")); boolean first = true; for (final String invalid : invalids) { - final LevensteinDistance ld = new LevensteinDistance(); + final LevenshteinDistance ld = new LevenshteinDistance(); final List> scoredParams = new ArrayList<>(); for (final String candidate : candidates) { final float distance = ld.getDistance(invalid, candidate); diff --git a/server/src/main/java/org/elasticsearch/rest/action/RestActions.java b/server/src/main/java/org/elasticsearch/rest/action/RestActions.java index 759cd4a773d..f25fd107e51 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/RestActions.java +++ b/server/src/main/java/org/elasticsearch/rest/action/RestActions.java @@ -90,8 +90,7 @@ public class RestActions { builder.field(FAILED_FIELD.getPreferredName(), failed); if (shardFailures != null && shardFailures.length > 0) { builder.startArray(FAILURES_FIELD.getPreferredName()); - final boolean group = params.paramAsBoolean("group_shard_failures", true); // we group by default - for (ShardOperationFailedException shardFailure : group ? ExceptionsHelper.groupBy(shardFailures) : shardFailures) { + for (ShardOperationFailedException shardFailure : ExceptionsHelper.groupBy(shardFailures)) { builder.startObject(); shardFailure.toXContent(builder, params); builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java index b452b62eb5e..746bb643bf6 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java @@ -87,13 +87,19 @@ public class RestClusterGetSettingsAction extends BaseRestHandler { private XContentBuilder renderResponse(ClusterState state, boolean renderDefaults, XContentBuilder builder, ToXContent.Params params) throws IOException { - return - new ClusterGetSettingsResponse( - state.metaData().persistentSettings(), - state.metaData().transientSettings(), - renderDefaults ? - settingsFilter.filter(clusterSettings.diff(state.metaData().settings(), this.settings)) : - Settings.EMPTY - ).toXContent(builder, params); + return response(state, renderDefaults, settingsFilter, clusterSettings, settings).toXContent(builder, params); } + + static ClusterGetSettingsResponse response( + final ClusterState state, + final boolean renderDefaults, + final SettingsFilter settingsFilter, + final ClusterSettings clusterSettings, + final Settings settings) { + return new ClusterGetSettingsResponse( + settingsFilter.filter(state.metaData().persistentSettings()), + settingsFilter.filter(state.metaData().transientSettings()), + renderDefaults ? settingsFilter.filter(clusterSettings.diff(state.metaData().settings(), settings)) : Settings.EMPTY); + } + } diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index 2a60262b32f..6239015dae4 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -86,6 +86,14 @@ public class RestMultiSearchAction extends BaseRestHandler { int preFilterShardSize = restRequest.paramAsInt("pre_filter_shard_size", SearchRequest.DEFAULT_PRE_FILTER_SHARD_SIZE); + final Integer maxConcurrentShardRequests; + if (restRequest.hasParam("max_concurrent_shard_requests")) { + // only set if we have the parameter since we auto adjust the max concurrency on the coordinator + // based on the number of nodes in the cluster + maxConcurrentShardRequests = restRequest.paramAsInt("max_concurrent_shard_requests", Integer.MIN_VALUE); + } else { + maxConcurrentShardRequests = null; + } parseMultiLineRequest(restRequest, multiRequest.indicesOptions(), allowExplicitIndex, (searchRequest, parser) -> { searchRequest.source(SearchSourceBuilder.fromXContent(parser, false)); @@ -96,6 +104,9 @@ public class RestMultiSearchAction extends BaseRestHandler { for (SearchRequest request : requests) { // preserve if it's set on the request request.setPreFilterShardSize(Math.min(preFilterShardSize, request.getPreFilterShardSize())); + if (maxConcurrentShardRequests != null) { + request.setMaxConcurrentShardRequests(maxConcurrentShardRequests); + } } return multiRequest; } diff --git a/server/src/main/java/org/elasticsearch/script/IngestConditionalScript.java b/server/src/main/java/org/elasticsearch/script/IngestConditionalScript.java new file mode 100644 index 00000000000..27ce29b95dc --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/IngestConditionalScript.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.script; + +import java.util.Map; + +/** + * A script used by {@link org.elasticsearch.ingest.ConditionalProcessor}. + */ +public abstract class IngestConditionalScript { + + public static final String[] PARAMETERS = { "ctx" }; + + /** The context used to compile {@link IngestConditionalScript} factories. */ + public static final ScriptContext CONTEXT = new ScriptContext<>("processor_conditional", Factory.class); + + /** The generic runtime parameters for the script. */ + private final Map params; + + public IngestConditionalScript(Map params) { + this.params = params; + } + + /** Return the parameters for this script. */ + public Map getParams() { + return params; + } + + public abstract boolean execute(Map ctx); + + public interface Factory { + IngestConditionalScript newInstance(Map params); + } +} diff --git a/server/src/main/java/org/elasticsearch/script/ScoreAccessor.java b/server/src/main/java/org/elasticsearch/script/ScoreAccessor.java index e8c433347b9..b3cdecb3e04 100644 --- a/server/src/main/java/org/elasticsearch/script/ScoreAccessor.java +++ b/server/src/main/java/org/elasticsearch/script/ScoreAccessor.java @@ -19,7 +19,7 @@ package org.elasticsearch.script; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.search.lookup.DocLookup; import java.io.IOException; @@ -32,9 +32,9 @@ import java.io.IOException; */ public final class ScoreAccessor extends Number implements Comparable { - Scorer scorer; + Scorable scorer; - public ScoreAccessor(Scorer scorer) { + public ScoreAccessor(Scorable scorer) { this.scorer = scorer; } diff --git a/server/src/main/java/org/elasticsearch/script/ScoreScript.java b/server/src/main/java/org/elasticsearch/script/ScoreScript.java index d9e56d5573c..11b135e9a65 100644 --- a/server/src/main/java/org/elasticsearch/script/ScoreScript.java +++ b/server/src/main/java/org/elasticsearch/script/ScoreScript.java @@ -19,7 +19,7 @@ package org.elasticsearch.script; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.SearchLookup; @@ -33,40 +33,40 @@ import java.util.function.DoubleSupplier; * A script used for adjusting the score on a per document basis. */ public abstract class ScoreScript { - + public static final String[] PARAMETERS = new String[]{}; - + /** The generic runtime parameters for the script. */ private final Map params; - + /** A leaf lookup for the bound segment this script will operate on. */ private final LeafSearchLookup leafLookup; - + private DoubleSupplier scoreSupplier = () -> 0.0; - + public ScoreScript(Map params, SearchLookup lookup, LeafReaderContext leafContext) { this.params = params; this.leafLookup = lookup.getLeafSearchLookup(leafContext); } - + public abstract double execute(); - + /** Return the parameters for this script. */ public Map getParams() { return params; } - + /** The doc lookup for the Lucene segment this script was created for. */ public final Map> getDoc() { return leafLookup.doc(); } - + /** Set the current document to run the script on next. */ public void setDocument(int docid) { leafLookup.setDocument(docid); } - - public void setScorer(Scorer scorer) { + + public void setScorer(Scorable scorer) { this.scoreSupplier = () -> { try { return scorer.score(); @@ -75,28 +75,28 @@ public abstract class ScoreScript { } }; } - + public double get_score() { return scoreSupplier.getAsDouble(); } - + /** A factory to construct {@link ScoreScript} instances. */ public interface LeafFactory { - + /** * Return {@code true} if the script needs {@code _score} calculated, or {@code false} otherwise. */ boolean needs_score(); - + ScoreScript newInstance(LeafReaderContext ctx) throws IOException; } - + /** A factory to construct stateful {@link ScoreScript} factories for a specific index. */ public interface Factory { - + ScoreScript.LeafFactory newFactory(Map params, SearchLookup lookup); - + } - + public static final ScriptContext CONTEXT = new ScriptContext<>("score", ScoreScript.Factory.class); } diff --git a/server/src/main/java/org/elasticsearch/script/Script.java b/server/src/main/java/org/elasticsearch/script/Script.java index a64a3ecd376..67ea4f24b83 100644 --- a/server/src/main/java/org/elasticsearch/script/Script.java +++ b/server/src/main/java/org/elasticsearch/script/Script.java @@ -19,7 +19,6 @@ package org.elasticsearch.script; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -451,133 +450,24 @@ public final class Script implements ToXContentObject, Writeable { * Creates a {@link Script} read from an input stream. */ public Script(StreamInput in) throws IOException { - // Version 5.3 allows lang to be an optional parameter for stored scripts and expects - // options to be null for stored and file scripts. - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - this.type = ScriptType.readFrom(in); - this.lang = in.readOptionalString(); - this.idOrCode = in.readString(); - @SuppressWarnings("unchecked") - Map options = (Map)(Map)in.readMap(); - this.options = options; - this.params = in.readMap(); - // Version 5.1 to 5.3 (exclusive) requires all Script members to be non-null and supports the potential - // for more options than just XContentType. Reorders the read in contents to be in - // same order as the constructor. - } else if (in.getVersion().onOrAfter(Version.V_5_1_1)) { - this.type = ScriptType.readFrom(in); - String lang = in.readString(); - this.lang = this.type == ScriptType.STORED ? null : lang; - - this.idOrCode = in.readString(); - @SuppressWarnings("unchecked") - Map options = (Map)(Map)in.readMap(); - - if (this.type != ScriptType.INLINE && options.isEmpty()) { - this.options = null; - } else { - this.options = options; - } - - this.params = in.readMap(); - // Prior to version 5.1 the script members are read in certain cases as optional and given - // default values when necessary. Also the only option supported is for XContentType. - } else { - this.idOrCode = in.readString(); - - if (in.readBoolean()) { - this.type = ScriptType.readFrom(in); - } else { - this.type = DEFAULT_SCRIPT_TYPE; - } - - String lang = in.readOptionalString(); - - if (lang == null) { - this.lang = this.type == ScriptType.STORED ? null : DEFAULT_SCRIPT_LANG; - } else { - this.lang = lang; - } - - Map params = in.readMap(); - - if (params == null) { - this.params = new HashMap<>(); - } else { - this.params = params; - } - - if (in.readBoolean()) { - this.options = new HashMap<>(); - XContentType contentType = in.readEnum(XContentType.class); - this.options.put(CONTENT_TYPE_OPTION, contentType.mediaType()); - } else if (type == ScriptType.INLINE) { - options = new HashMap<>(); - } else { - this.options = null; - } - } + this.type = ScriptType.readFrom(in); + this.lang = in.readOptionalString(); + this.idOrCode = in.readString(); + @SuppressWarnings("unchecked") + Map options = (Map)(Map)in.readMap(); + this.options = options; + this.params = in.readMap(); } @Override public void writeTo(StreamOutput out) throws IOException { - // Version 5.3+ allows lang to be an optional parameter for stored scripts and expects - // options to be null for stored and file scripts. - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - type.writeTo(out); - out.writeOptionalString(lang); - out.writeString(idOrCode); - @SuppressWarnings("unchecked") - Map options = (Map)(Map)this.options; - out.writeMap(options); - out.writeMap(params); - // Version 5.1 to 5.3 (exclusive) requires all Script members to be non-null and supports the potential - // for more options than just XContentType. Reorders the written out contents to be in - // same order as the constructor. - } else if (out.getVersion().onOrAfter(Version.V_5_1_1)) { - type.writeTo(out); - - if (lang == null) { - out.writeString(""); - } else { - out.writeString(lang); - } - - out.writeString(idOrCode); - @SuppressWarnings("unchecked") - Map options = (Map)(Map)this.options; - - if (options == null) { - out.writeMap(new HashMap<>()); - } else { - out.writeMap(options); - } - - out.writeMap(params); - // Prior to version 5.1 the Script members were possibly written as optional or null, though there is no case where a null - // value wasn't equivalent to it's default value when actually compiling/executing a script. Meaning, there are no - // backwards compatibility issues, and now there's enforced consistency. Also the only supported compiler - // option was XContentType. - } else { - out.writeString(idOrCode); - out.writeBoolean(true); - type.writeTo(out); - out.writeOptionalString(lang); - - if (params.isEmpty()) { - out.writeMap(null); - } else { - out.writeMap(params); - } - - if (options != null && options.containsKey(CONTENT_TYPE_OPTION)) { - XContentType contentType = XContentType.fromMediaTypeOrFormat(options.get(CONTENT_TYPE_OPTION)); - out.writeBoolean(true); - out.writeEnum(contentType); - } else { - out.writeBoolean(false); - } - } + type.writeTo(out); + out.writeOptionalString(lang); + out.writeString(idOrCode); + @SuppressWarnings("unchecked") + Map options = (Map) (Map) this.options; + out.writeMap(options); + out.writeMap(params); } /** diff --git a/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java b/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java index 59d824eb313..35a7c2e60d6 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java @@ -292,25 +292,7 @@ public final class ScriptMetaData implements MetaData.Custom, Writeable, ToXCont for (int i = 0; i < size; i++) { String id = in.readString(); - - // Prior to version 5.3 all scripts were stored using the deprecated namespace. - // Split the id to find the language then use StoredScriptSource to parse the - // expected BytesReference after which a new StoredScriptSource is created - // with the appropriate language and options. - if (in.getVersion().before(Version.V_5_3_0)) { - int split = id.indexOf('#'); - - if (split == -1) { - throw new IllegalArgumentException("illegal stored script id [" + id + "], does not contain lang"); - } else { - source = new StoredScriptSource(in); - source = new StoredScriptSource(id.substring(0, split), source.getSource(), Collections.emptyMap()); - } - // Version 5.3+ can just be parsed normally using StoredScriptSource. - } else { - source = new StoredScriptSource(in); - } - + source = new StoredScriptSource(in); scripts.put(id, source); } @@ -319,34 +301,11 @@ public final class ScriptMetaData implements MetaData.Custom, Writeable, ToXCont @Override public void writeTo(StreamOutput out) throws IOException { - // Version 5.3+ will output the contents of the scripts' Map using - // StoredScriptSource to stored the language, code, and options. - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeVInt(scripts.size()); + out.writeVInt(scripts.size()); - for (Map.Entry entry : scripts.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); - } - // Prior to Version 5.3, stored scripts can only be read using the deprecated - // namespace. Scripts using the deprecated namespace are first isolated in a - // temporary Map, then written out. Since all scripts will be stored using the - // deprecated namespace, no scripts will be lost. - } else { - Map filtered = new HashMap<>(); - - for (Map.Entry entry : scripts.entrySet()) { - if (entry.getKey().contains("#")) { - filtered.put(entry.getKey(), entry.getValue()); - } - } - - out.writeVInt(filtered.size()); - - for (Map.Entry entry : filtered.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); - } + for (Map.Entry entry : scripts.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/script/ScriptModule.java b/server/src/main/java/org/elasticsearch/script/ScriptModule.java index f04e690fa42..6dc507fa0d8 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptModule.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptModule.java @@ -51,6 +51,7 @@ public class ScriptModule { BucketAggregationSelectorScript.CONTEXT, SignificantTermsHeuristicScoreScript.CONTEXT, IngestScript.CONTEXT, + IngestConditionalScript.CONTEXT, FilterScript.CONTEXT, SimilarityScript.CONTEXT, SimilarityWeightScript.CONTEXT, @@ -69,8 +70,8 @@ public class ScriptModule { Map engines = new HashMap<>(); Map> contexts = new HashMap<>(CORE_CONTEXTS); for (ScriptPlugin plugin : scriptPlugins) { - for (ScriptContext context : plugin.getContexts()) { - ScriptContext oldContext = contexts.put(context.name, context); + for (ScriptContext context : plugin.getContexts()) { + ScriptContext oldContext = contexts.put(context.name, context); if (oldContext != null) { throw new IllegalArgumentException("Context name [" + context.name + "] defined twice"); } diff --git a/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java b/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java index 0c34c59b7be..e72d597a6af 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java @@ -20,10 +20,8 @@ package org.elasticsearch.script; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.SearchLookup; @@ -33,30 +31,11 @@ import java.util.List; import java.util.Map; public class ScriptedMetricAggContexts { - private static final DeprecationLogger DEPRECATION_LOGGER = - new DeprecationLogger(Loggers.getLogger(ScriptedMetricAggContexts.class)); - - // Public for access from tests - public static final String AGG_PARAM_DEPRECATION_WARNING = - "params._agg/_aggs for scripted metric aggregations are deprecated, use state/states (not in params) instead. " + - "Use -Des.aggregations.enable_scripted_metric_agg_param=false to disable."; - - public static boolean deprecatedAggParamEnabled() { - boolean enabled = Boolean.parseBoolean( - System.getProperty("es.aggregations.enable_scripted_metric_agg_param", "true")); - - if (enabled) { - DEPRECATION_LOGGER.deprecatedAndMaybeLog("enable_scripted_metric_agg_param", AGG_PARAM_DEPRECATION_WARNING); - } - - return enabled; - } - private abstract static class ParamsAndStateBase { private final Map params; - private final Object state; + private final Map state; - ParamsAndStateBase(Map params, Object state) { + ParamsAndStateBase(Map params, Map state) { this.params = params; this.state = state; } @@ -71,14 +50,14 @@ public class ScriptedMetricAggContexts { } public abstract static class InitScript extends ParamsAndStateBase { - public InitScript(Map params, Object state) { + public InitScript(Map params, Map state) { super(params, state); } public abstract void execute(); public interface Factory { - InitScript newInstance(Map params, Object state); + InitScript newInstance(Map params, Map state); } public static String[] PARAMETERS = {}; @@ -87,9 +66,9 @@ public class ScriptedMetricAggContexts { public abstract static class MapScript extends ParamsAndStateBase { private final LeafSearchLookup leafLookup; - private Scorer scorer; + private Scorable scorer; - public MapScript(Map params, Object state, SearchLookup lookup, LeafReaderContext leafContext) { + public MapScript(Map params, Map state, SearchLookup lookup, LeafReaderContext leafContext) { super(params, state); this.leafLookup = leafContext == null ? null : lookup.getLeafSearchLookup(leafContext); @@ -107,7 +86,7 @@ public class ScriptedMetricAggContexts { } } - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { this.scorer = scorer; } @@ -131,7 +110,7 @@ public class ScriptedMetricAggContexts { } public interface Factory { - LeafFactory newFactory(Map params, Object state, SearchLookup lookup); + LeafFactory newFactory(Map params, Map state, SearchLookup lookup); } public static String[] PARAMETERS = new String[] {}; @@ -139,14 +118,14 @@ public class ScriptedMetricAggContexts { } public abstract static class CombineScript extends ParamsAndStateBase { - public CombineScript(Map params, Object state) { + public CombineScript(Map params, Map state) { super(params, state); } public abstract Object execute(); public interface Factory { - CombineScript newInstance(Map params, Object state); + CombineScript newInstance(Map params, Map state); } public static String[] PARAMETERS = {}; diff --git a/server/src/main/java/org/elasticsearch/script/SearchScript.java b/server/src/main/java/org/elasticsearch/script/SearchScript.java index 43ea020aa6e..fb5f950d61d 100644 --- a/server/src/main/java/org/elasticsearch/script/SearchScript.java +++ b/server/src/main/java/org/elasticsearch/script/SearchScript.java @@ -19,7 +19,7 @@ package org.elasticsearch.script; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.lucene.ScorerAware; import org.elasticsearch.search.lookup.LeafDocLookup; @@ -46,22 +46,14 @@ public abstract class SearchScript implements ScorerAware, ExecutableScript { /** The generic runtime parameters for the script. */ private final Map params; - /** A lookup for the index this script will operate on. */ - private final SearchLookup lookup; - - /** A leaf lookup for the bound segment this script will operate on. */ - private final LeafReaderContext leafContext; - /** A leaf lookup for the bound segment this script will operate on. */ private final LeafSearchLookup leafLookup; /** A scorer that will return the score for the current document when the script is run. */ - private Scorer scorer; + private Scorable scorer; public SearchScript(Map params, SearchLookup lookup, LeafReaderContext leafContext) { this.params = params; - this.lookup = lookup; - this.leafContext = leafContext; // TODO: remove leniency when painless does not implement SearchScript for executable script cases this.leafLookup = leafContext == null ? null : lookup.getLeafSearchLookup(leafContext); } @@ -76,11 +68,6 @@ public abstract class SearchScript implements ScorerAware, ExecutableScript { return leafLookup; } - /** The leaf context for the Lucene segment this script was created for. */ - protected final LeafReaderContext getLeafContext() { - return leafContext; - } - /** The doc lookup for the Lucene segment this script was created for. */ public final LeafDocLookup getDoc() { // TODO: remove leniency when painless does not implement SearchScript for executable script cases @@ -96,7 +83,7 @@ public abstract class SearchScript implements ScorerAware, ExecutableScript { } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { this.scorer = scorer; } diff --git a/server/src/main/java/org/elasticsearch/script/SimilarityScript.java b/server/src/main/java/org/elasticsearch/script/SimilarityScript.java index c410a0bd6eb..4aeb4063959 100644 --- a/server/src/main/java/org/elasticsearch/script/SimilarityScript.java +++ b/server/src/main/java/org/elasticsearch/script/SimilarityScript.java @@ -21,8 +21,6 @@ package org.elasticsearch.script; import org.elasticsearch.index.similarity.ScriptedSimilarity; -import java.io.IOException; - /** A script that is used to build {@link ScriptedSimilarity} instances. */ public abstract class SimilarityScript { @@ -34,7 +32,7 @@ public abstract class SimilarityScript { * @param doc per-document statistics */ public abstract double execute(double weight, ScriptedSimilarity.Query query, - ScriptedSimilarity.Field field, ScriptedSimilarity.Term term, ScriptedSimilarity.Doc doc) throws IOException; + ScriptedSimilarity.Field field, ScriptedSimilarity.Term term, ScriptedSimilarity.Doc doc); public interface Factory { SimilarityScript newInstance(); diff --git a/server/src/main/java/org/elasticsearch/script/SimilarityWeightScript.java b/server/src/main/java/org/elasticsearch/script/SimilarityWeightScript.java index f48a9c93e02..04bbc3cccf4 100644 --- a/server/src/main/java/org/elasticsearch/script/SimilarityWeightScript.java +++ b/server/src/main/java/org/elasticsearch/script/SimilarityWeightScript.java @@ -21,8 +21,6 @@ package org.elasticsearch.script; import org.elasticsearch.index.similarity.ScriptedSimilarity; -import java.io.IOException; - /** A script that is used to compute scoring factors that are the same for all documents. */ public abstract class SimilarityWeightScript { @@ -32,7 +30,7 @@ public abstract class SimilarityWeightScript { * @param term term-level statistics */ public abstract double execute(ScriptedSimilarity.Query query, ScriptedSimilarity.Field field, - ScriptedSimilarity.Term term) throws IOException; + ScriptedSimilarity.Term term); public interface Factory { SimilarityWeightScript newInstance(); diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index 28a600c0d21..71ea55e97a7 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -82,7 +82,7 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable p.getFetchSubPhases(context), this::registerFetchSubPhase); diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 4bf5e03b8a7..5cb9f81626c 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -98,6 +98,8 @@ import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.transport.TransportRequest; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -789,14 +791,21 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv context.fetchSourceContext(source.fetchSource()); } if (source.docValueFields() != null) { - int maxAllowedDocvalueFields = context.mapperService().getIndexSettings().getMaxDocvalueFields(); - if (source.docValueFields().size() > maxAllowedDocvalueFields) { - throw new IllegalArgumentException( - "Trying to retrieve too many docvalue_fields. Must be less than or equal to: [" + maxAllowedDocvalueFields - + "] but was [" + source.docValueFields().size() + "]. This limit can be set by changing the [" - + IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING.getKey() + "] index level setting."); + List docValueFields = new ArrayList<>(); + for (DocValueFieldsContext.FieldAndFormat format : source.docValueFields()) { + Collection fieldNames = context.mapperService().simpleMatchToFullName(format.field); + for (String fieldName: fieldNames) { + docValueFields.add(new DocValueFieldsContext.FieldAndFormat(fieldName, format.format)); + } } - context.docValueFieldsContext(new DocValueFieldsContext(source.docValueFields())); + int maxAllowedDocvalueFields = context.mapperService().getIndexSettings().getMaxDocvalueFields(); + if (docValueFields.size() > maxAllowedDocvalueFields) { + throw new IllegalArgumentException( + "Trying to retrieve too many docvalue_fields. Must be less than or equal to: [" + maxAllowedDocvalueFields + + "] but was [" + docValueFields.size() + "]. This limit can be set by changing the [" + + IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING.getKey() + "] index level setting."); + } + context.docValueFieldsContext(new DocValueFieldsContext(docValueFields)); } if (source.highlighter() != null) { HighlightBuilder highlightBuilder = source.highlighter(); @@ -886,13 +895,13 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv completionSuggestions = Collections.emptyList(); } if (context.request().scroll() != null) { - TopDocs topDocs = context.queryResult().topDocs(); + TopDocs topDocs = context.queryResult().topDocs().topDocs; docIdsToLoad = new int[topDocs.scoreDocs.length + numSuggestDocs]; for (int i = 0; i < topDocs.scoreDocs.length; i++) { docIdsToLoad[docsOffset++] = topDocs.scoreDocs[i].doc; } } else { - TopDocs topDocs = context.queryResult().topDocs(); + TopDocs topDocs = context.queryResult().topDocs().topDocs; if (topDocs.scoreDocs.length < context.from()) { // no more docs... docIdsToLoad = new int[numSuggestDocs]; diff --git a/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java b/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java index 19c0f8c64d5..4a46c7202d1 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java +++ b/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java @@ -19,7 +19,6 @@ package org.elasticsearch.search; -import org.elasticsearch.Version; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; @@ -52,11 +51,7 @@ public final class SearchShardTarget implements Writeable, Comparable metaData() { @@ -183,7 +184,7 @@ public abstract class AggregatorBase extends Aggregator { @Override public final void preCollection() throws IOException { List collectors = Arrays.asList(subAggregators); - collectableSubAggregators = BucketCollector.wrap(collectors); + collectableSubAggregators = MultiBucketCollector.wrap(collectors); doPreCollection(); collectableSubAggregators.preCollection(); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java index 88cc7319948..d6e7aca46a6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java @@ -20,7 +20,8 @@ package org.elasticsearch.search.aggregations; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArray; @@ -74,8 +75,8 @@ public abstract class AggregatorFactory> { } @Override - public boolean needsScores() { - return first.needsScores(); + public ScoreMode scoreMode() { + return first.scoreMode(); } @Override @@ -109,10 +110,10 @@ public abstract class AggregatorFactory> { collectors.set(i, null); } return new LeafBucketCollector() { - Scorer scorer; + Scorable scorer; @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { this.scorer = scorer; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java index 40e66bd9645..c50dd615c7b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java @@ -22,12 +22,9 @@ package org.elasticsearch.search.aggregations; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Collector; +import org.apache.lucene.search.ScoreMode; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.stream.StreamSupport; /** * A Collector that can collect data in separate buckets. @@ -49,66 +46,11 @@ public abstract class BucketCollector implements Collector { // no-op } @Override - public boolean needsScores() { - return false; + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE_NO_SCORES; } }; - /** - * Wrap the given collectors into a single instance. - */ - public static BucketCollector wrap(Iterable collectorList) { - final BucketCollector[] collectors = - StreamSupport.stream(collectorList.spliterator(), false).toArray(size -> new BucketCollector[size]); - switch (collectors.length) { - case 0: - return NO_OP_COLLECTOR; - case 1: - return collectors[0]; - default: - return new BucketCollector() { - - @Override - public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException { - List leafCollectors = new ArrayList<>(collectors.length); - for (BucketCollector c : collectors) { - leafCollectors.add(c.getLeafCollector(ctx)); - } - return LeafBucketCollector.wrap(leafCollectors); - } - - @Override - public void preCollection() throws IOException { - for (BucketCollector collector : collectors) { - collector.preCollection(); - } - } - - @Override - public void postCollection() throws IOException { - for (BucketCollector collector : collectors) { - collector.postCollection(); - } - } - - @Override - public boolean needsScores() { - for (BucketCollector collector : collectors) { - if (collector.needsScores()) { - return true; - } - } - return false; - } - - @Override - public String toString() { - return Arrays.toString(collectors); - } - }; - } - } - @Override public abstract LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/LeafBucketCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/LeafBucketCollector.java index f5b7f15bb94..367e1cce060 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/LeafBucketCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/LeafBucketCollector.java @@ -20,7 +20,7 @@ package org.elasticsearch.search.aggregations; import org.apache.lucene.search.LeafCollector; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import java.io.IOException; import java.util.stream.Stream; @@ -33,7 +33,7 @@ public abstract class LeafBucketCollector implements LeafCollector { public static final LeafBucketCollector NO_OP_COLLECTOR = new LeafBucketCollector() { @Override - public void setScorer(Scorer arg0) throws IOException { + public void setScorer(Scorable arg0) throws IOException { // no-op } @Override @@ -55,7 +55,7 @@ public abstract class LeafBucketCollector implements LeafCollector { return new LeafBucketCollector() { @Override - public void setScorer(Scorer s) throws IOException { + public void setScorer(Scorable s) throws IOException { for (LeafBucketCollector c : colls) { c.setScorer(s); } @@ -83,7 +83,7 @@ public abstract class LeafBucketCollector implements LeafCollector { } @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { // no-op by default } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/LeafBucketCollectorBase.java b/server/src/main/java/org/elasticsearch/search/aggregations/LeafBucketCollectorBase.java index 45e7db08e2d..529483107b1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/LeafBucketCollectorBase.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/LeafBucketCollectorBase.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.common.lucene.ScorerAware; import java.io.IOException; @@ -48,7 +48,7 @@ public class LeafBucketCollectorBase extends LeafBucketCollector { } @Override - public void setScorer(Scorer s) throws IOException { + public void setScorer(Scorable s) throws IOException { sub.setScorer(s); if (values != null) { values.setScorer(s); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java new file mode 100644 index 00000000000..552ad8c024d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java @@ -0,0 +1,212 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.CollectionTerminatedException; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.MultiCollector; +import org.apache.lucene.search.Scorable; +import org.apache.lucene.search.ScoreCachingWrappingScorer; +import org.apache.lucene.search.ScoreMode; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * A {@link BucketCollector} which allows running a bucket collection with several + * {@link BucketCollector}s. It is similar to the {@link MultiCollector} except that the + * {@link #wrap} method filters out the {@link BucketCollector#NO_OP_COLLECTOR}s and not + * the null ones. + */ +public class MultiBucketCollector extends BucketCollector { + + /** See {@link #wrap(Iterable)}. */ + public static BucketCollector wrap(BucketCollector... collectors) { + return wrap(Arrays.asList(collectors)); + } + + /** + * Wraps a list of {@link BucketCollector}s with a {@link MultiBucketCollector}. This + * method works as follows: + *

      + *
    • Filters out the {@link BucketCollector#NO_OP_COLLECTOR}s collectors, so they are not used + * during search time. + *
    • If the input contains 1 real collector, it is returned. + *
    • Otherwise the method returns a {@link MultiBucketCollector} which wraps the + * non-{@link BucketCollector#NO_OP_COLLECTOR} collectors. + *
    + */ + public static BucketCollector wrap(Iterable collectors) { + // For the user's convenience, we allow NO_OP collectors to be passed. + // However, to improve performance, these null collectors are found + // and dropped from the array we save for actual collection time. + int n = 0; + for (BucketCollector c : collectors) { + if (c != NO_OP_COLLECTOR) { + n++; + } + } + + if (n == 0) { + return NO_OP_COLLECTOR; + } else if (n == 1) { + // only 1 Collector - return it. + BucketCollector col = null; + for (BucketCollector c : collectors) { + if (c != null) { + col = c; + break; + } + } + return col; + } else { + BucketCollector[] colls = new BucketCollector[n]; + n = 0; + for (BucketCollector c : collectors) { + if (c != null) { + colls[n++] = c; + } + } + return new MultiBucketCollector(colls); + } + } + + private final boolean cacheScores; + private final BucketCollector[] collectors; + + private MultiBucketCollector(BucketCollector... collectors) { + this.collectors = collectors; + int numNeedsScores = 0; + for (Collector collector : collectors) { + if (collector.scoreMode().needsScores()) { + numNeedsScores += 1; + } + } + this.cacheScores = numNeedsScores >= 2; + } + + @Override + public ScoreMode scoreMode() { + ScoreMode scoreMode = null; + for (Collector collector : collectors) { + if (scoreMode == null) { + scoreMode = collector.scoreMode(); + } else if (scoreMode != collector.scoreMode()) { + return ScoreMode.COMPLETE; + } + } + return scoreMode; + } + + @Override + public void preCollection() throws IOException { + for (BucketCollector collector : collectors) { + collector.preCollection(); + } + } + + @Override + public void postCollection() throws IOException { + for (BucketCollector collector : collectors) { + collector.postCollection(); + } + } + + @Override + public String toString() { + return Arrays.toString(collectors); + } + + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext context) throws IOException { + final List leafCollectors = new ArrayList<>(); + for (BucketCollector collector : collectors) { + final LeafBucketCollector leafCollector; + try { + leafCollector = collector.getLeafCollector(context); + } catch (CollectionTerminatedException e) { + // this leaf collector does not need this segment + continue; + } + leafCollectors.add(leafCollector); + } + switch (leafCollectors.size()) { + case 0: + throw new CollectionTerminatedException(); + case 1: + return leafCollectors.get(0); + default: + return new MultiLeafBucketCollector(leafCollectors, cacheScores); + } + } + + private static class MultiLeafBucketCollector extends LeafBucketCollector { + + private final boolean cacheScores; + private final LeafBucketCollector[] collectors; + private int numCollectors; + + private MultiLeafBucketCollector(List collectors, boolean cacheScores) { + this.collectors = collectors.toArray(new LeafBucketCollector[collectors.size()]); + this.cacheScores = cacheScores; + this.numCollectors = this.collectors.length; + } + + @Override + public void setScorer(Scorable scorer) throws IOException { + if (cacheScores) { + scorer = new ScoreCachingWrappingScorer(scorer); + } + for (int i = 0; i < numCollectors; ++i) { + final LeafCollector c = collectors[i]; + c.setScorer(scorer); + } + } + + private void removeCollector(int i) { + System.arraycopy(collectors, i + 1, collectors, i, numCollectors - i - 1); + --numCollectors; + collectors[numCollectors] = null; + } + + @Override + public void collect(int doc, long bucket) throws IOException { + final LeafBucketCollector[] collectors = this.collectors; + int numCollectors = this.numCollectors; + for (int i = 0; i < numCollectors; ) { + final LeafBucketCollector collector = collectors[i]; + try { + collector.collect(doc, bucket); + ++i; + } catch (CollectionTerminatedException e) { + removeCollector(i); + numCollectors = this.numCollectors; + if (numCollectors == 0) { + throw new CollectionTerminatedException(); + } + } + } + } + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java index d6be0f57866..32695ac69a8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.packed.PackedInts; @@ -33,6 +34,7 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.BucketCollector; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.MultiBucketCollector; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -80,17 +82,17 @@ public class BestBucketsDeferringCollector extends DeferringBucketCollector { } @Override - public boolean needsScores() { + public ScoreMode scoreMode() { if (collector == null) { throw new IllegalStateException(); } - return collector.needsScores(); + return collector.scoreMode(); } /** Set the deferred collectors. */ @Override public void setDeferredCollector(Iterable deferredCollectors) { - this.collector = BucketCollector.wrap(deferredCollectors); + this.collector = MultiBucketCollector.wrap(deferredCollectors); } private void finishLeaf() { @@ -152,11 +154,11 @@ public class BestBucketsDeferringCollector extends DeferringBucketCollector { } this.selectedBuckets = hash; - boolean needsScores = needsScores(); + boolean needsScores = scoreMode().needsScores(); Weight weight = null; if (needsScores) { Query query = isGlobal ? new MatchAllDocsQuery() : searchContext.query(); - weight = searchContext.searcher().createNormalizedWeight(query, true); + weight = searchContext.searcher().createWeight(searchContext.searcher().rewrite(query), ScoreMode.COMPLETE, 1f); } for (Entry entry : entries) { final LeafBucketCollector leafCollector = collector.getLeafCollector(entry.context); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketUtils.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketUtils.java index d145e32c45b..17b50fa9bef 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketUtils.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketUtils.java @@ -31,27 +31,21 @@ public final class BucketUtils { * * @param finalSize * The number of terms required in the final reduce phase. - * @param numberOfShards - * The number of shards being queried. + * @param singleShard + * whether a single shard is being queried, or multiple shards * @return A suggested default for the size of any shard-side PriorityQueues */ - public static int suggestShardSideQueueSize(int finalSize, int numberOfShards) { + public static int suggestShardSideQueueSize(int finalSize, boolean singleShard) { if (finalSize < 1) { throw new IllegalArgumentException("size must be positive, got " + finalSize); } - if (numberOfShards < 1) { - throw new IllegalArgumentException("number of shards must be positive, got " + numberOfShards); - } - - if (numberOfShards == 1) { + if (singleShard) { // In the case of a single shard, we do not need to over-request return finalSize; } - // Request 50% more buckets on the shards in order to improve accuracy // as well as a small constant that should help with small values of 'size' final long shardSampleSize = (long) (finalSize * 1.5 + 10); return (int) Math.min(Integer.MAX_VALUE, shardSampleSize); } - } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferableBucketAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferableBucketAggregator.java index 0ff5ea12b97..b4e2243f17a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferableBucketAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferableBucketAggregator.java @@ -22,6 +22,7 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.BucketCollector; +import org.elasticsearch.search.aggregations.MultiBucketCollector; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.internal.SearchContext; @@ -59,7 +60,7 @@ public abstract class DeferableBucketAggregator extends BucketsAggregator { recordingWrapper.setDeferredCollector(deferredCollectors); collectors.add(recordingWrapper); } - collectableSubAggregators = BucketCollector.wrap(collectors); + collectableSubAggregators = MultiBucketCollector.wrap(collectors); } public static boolean descendsFromGlobalAggregator(Aggregator parent) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java index 3c63df2c06a..7151a6f33d9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.bucket; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.BucketCollector; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -62,8 +63,8 @@ public abstract class DeferringBucketCollector extends BucketCollector { } @Override - public boolean needsScores() { - return in.needsScores(); + public ScoreMode scoreMode() { + return in.scoreMode(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java index f357e9d286f..53049d0301c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.aggregations.bucket; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.packed.PackedInts; @@ -31,6 +32,7 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.BucketCollector; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.MultiBucketCollector; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -61,15 +63,15 @@ public class MergingBucketsDeferringCollector extends DeferringBucketCollector { @Override public void setDeferredCollector(Iterable deferredCollectors) { - this.collector = BucketCollector.wrap(deferredCollectors); + this.collector = MultiBucketCollector.wrap(deferredCollectors); } @Override - public boolean needsScores() { + public ScoreMode scoreMode() { if (collector == null) { throw new IllegalStateException(); } - return collector.needsScores(); + return collector.scoreMode(); } @Override @@ -157,10 +159,12 @@ public class MergingBucketsDeferringCollector extends DeferringBucketCollector { } this.selectedBuckets = hash; - boolean needsScores = collector.needsScores(); + boolean needsScores = collector.scoreMode().needsScores(); Weight weight = null; if (needsScores) { - weight = searchContext.searcher().createNormalizedWeight(searchContext.query(), true); + weight = searchContext.searcher().createWeight( + searchContext.searcher().rewrite(searchContext.query()), + ScoreMode.COMPLETE, 1f); } for (Entry entry : entries) { final LeafBucketCollector leafCollector = collector.getLeafCollector(entry.context); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java index 6df88379d4e..69bc2de39dc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.aggregations.bucket.adjacency; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -51,7 +52,7 @@ public class AdjacencyMatrixAggregatorFactory extends AggregatorFactory collectors = Arrays.asList(subAggregators); - deferredCollectors = BucketCollector.wrap(collectors); + deferredCollectors = MultiBucketCollector.wrap(collectors); collectableSubAggregators = BucketCollector.NO_OP_COLLECTOR; } @@ -203,11 +205,11 @@ final class CompositeAggregator extends BucketsAggregator { * the {@link #deferredCollectors}. */ private void runDeferredCollections() throws IOException { - final boolean needsScores = needsScores(); + final boolean needsScores = scoreMode().needsScores(); Weight weight = null; if (needsScores) { Query query = context.query(); - weight = context.searcher().createNormalizedWeight(query, true); + weight = context.searcher().createWeight(context.searcher().rewrite(query), ScoreMode.COMPLETE, 1f); } deferredCollectors.preCollection(); for (Entry entry : entries) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java index d0f2d6ef946..9bf51e57df0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java @@ -25,7 +25,7 @@ import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.Query; import org.apache.lucene.util.DocIdSetBuilder; -import org.apache.lucene.util.StringHelper; +import org.apache.lucene.util.FutureArrays; import java.io.IOException; import java.util.function.ToLongFunction; @@ -147,8 +147,10 @@ class PointsSortedDocsProducer extends SortedDocsProducer { @Override public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue) { - if ((upperPointQuery != null && StringHelper.compare(bytesPerDim, minPackedValue, 0, upperPointQuery, 0) > 0) || - (lowerPointQuery != null && StringHelper.compare(bytesPerDim, maxPackedValue, 0, lowerPointQuery, 0) < 0)) { + if ((upperPointQuery != null && + FutureArrays.compareUnsigned(minPackedValue, 0, bytesPerDim, upperPointQuery, 0, bytesPerDim) > 0) || + (lowerPointQuery != null && + FutureArrays.compareUnsigned(maxPackedValue, 0, bytesPerDim, lowerPointQuery, 0, bytesPerDim) < 0)) { // does not match the query return PointValues.Relation.CELL_OUTSIDE_QUERY; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java index 4b54dccbf96..c8b1e630b85 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.aggregations.bucket.filter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.AggregationInitializationException; @@ -58,7 +59,7 @@ public class FilterAggregatorFactory extends AggregatorFactory buckets, int targetBuckets, BucketInfo emptyBucketInfo, DocValueFormat formatter, - List pipelineAggregators, Map metaData) { + List pipelineAggregators, Map metaData, long bucketInnerInterval) { super(name, pipelineAggregators, metaData); this.buckets = buckets; this.bucketInfo = emptyBucketInfo; this.format = formatter; this.targetBuckets = targetBuckets; + this.bucketInnerInterval = bucketInnerInterval; } /** @@ -238,6 +239,13 @@ public final class InternalAutoDateHistogram extends out.writeVInt(targetBuckets); } + public DateHistogramInterval getInterval() { + + RoundingInfo roundingInfo = this.bucketInfo.roundingInfos[this.bucketInfo.roundingIdx]; + String unitAbbreviation = roundingInfo.unitAbbreviation; + return new DateHistogramInterval(Long.toString(bucketInnerInterval) + unitAbbreviation); + } + @Override public String getWriteableName() { return AutoDateHistogramAggregationBuilder.NAME; @@ -262,7 +270,7 @@ public final class InternalAutoDateHistogram extends @Override public InternalAutoDateHistogram create(List buckets) { - return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators(), metaData); + return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators(), metaData, 1); } @Override @@ -279,7 +287,6 @@ public final class InternalAutoDateHistogram extends this.iterator = iterator; current = iterator.next(); } - } /** @@ -365,7 +372,7 @@ public final class InternalAutoDateHistogram extends reduceRoundingInfo = bucketInfo.roundingInfos[reduceRoundingIdx]; reducedBuckets = mergeBuckets(reducedBuckets, reduceRoundingInfo.rounding, reduceContext); } - return new BucketReduceResult(reducedBuckets, reduceRoundingInfo, reduceRoundingIdx); + return new BucketReduceResult(reducedBuckets, reduceRoundingInfo, reduceRoundingIdx, 1); } private List mergeBuckets(List reducedBuckets, Rounding reduceRounding, ReduceContext reduceContext) { @@ -403,12 +410,13 @@ public final class InternalAutoDateHistogram extends List buckets; RoundingInfo roundingInfo; int roundingIdx; + long innerInterval; - BucketReduceResult(List buckets, RoundingInfo roundingInfo, int roundingIdx) { + BucketReduceResult(List buckets, RoundingInfo roundingInfo, int roundingIdx, long innerInterval) { this.buckets = buckets; this.roundingInfo = roundingInfo; this.roundingIdx = roundingIdx; - + this.innerInterval = innerInterval; } } @@ -444,7 +452,7 @@ public final class InternalAutoDateHistogram extends } lastBucket = iter.next(); } - return new BucketReduceResult(list, roundingInfo, roundingIdx); + return new BucketReduceResult(list, roundingInfo, roundingIdx, currentResult.innerInterval); } static int getAppropriateRounding(long minKey, long maxKey, int roundingIdx, @@ -507,7 +515,7 @@ public final class InternalAutoDateHistogram extends this.bucketInfo.emptySubAggregations); return new InternalAutoDateHistogram(getName(), reducedBucketsResult.buckets, targetBuckets, bucketInfo, format, - pipelineAggregators(), getMetaData()); + pipelineAggregators(), getMetaData(), reducedBucketsResult.innerInterval); } private BucketReduceResult maybeMergeConsecutiveBuckets(BucketReduceResult reducedBucketsResult, @@ -547,7 +555,7 @@ public final class InternalAutoDateHistogram extends reduceContext.consumeBucketsAndMaybeBreak(1); mergedBuckets.add(sameKeyedBuckets.get(0).reduce(sameKeyedBuckets, roundingInfo.rounding, reduceContext)); } - return new BucketReduceResult(mergedBuckets, roundingInfo, roundingIdx); + return new BucketReduceResult(mergedBuckets, roundingInfo, roundingIdx, mergeInterval); } @Override @@ -557,6 +565,7 @@ public final class InternalAutoDateHistogram extends bucket.toXContent(builder, params); } builder.endArray(); + builder.field("interval", getInterval().toString()); return builder; } @@ -580,7 +589,7 @@ public final class InternalAutoDateHistogram extends buckets2.add((Bucket) b); } buckets2 = Collections.unmodifiableList(buckets2); - return new InternalAutoDateHistogram(name, buckets2, targetBuckets, bucketInfo, format, pipelineAggregators(), getMetaData()); + return new InternalAutoDateHistogram(name, buckets2, targetBuckets, bucketInfo, format, pipelineAggregators(), getMetaData(), 1); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedAutoDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedAutoDateHistogram.java index caca44f9f2e..c9ff1389f8a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedAutoDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedAutoDateHistogram.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.histogram; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -36,6 +37,16 @@ public class ParsedAutoDateHistogram extends ParsedMultiBucketAggregation getBuckets() { return buckets; @@ -47,6 +58,8 @@ public class ParsedAutoDateHistogram extends ParsedMultiBucketAggregation ParsedBucket.fromXContent(parser, false), parser -> ParsedBucket.fromXContent(parser, true)); + PARSER.declareString((parsed, value) -> parsed.interval = value, + new ParseField("interval")); } public static ParsedAutoDateHistogram fromXContent(XContentParser parser, String name) throws IOException { @@ -55,6 +68,14 @@ public class ParsedAutoDateHistogram extends ParsedMultiBucketAggregation deferredCollectors) { - this.deferred = BucketCollector.wrap(deferredCollectors); + this.deferred = MultiBucketCollector.wrap(deferredCollectors); } @Override @@ -87,7 +88,7 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme // Deferring collector return new LeafBucketCollector() { @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { perSegCollector.setScorer(scorer); } @@ -101,7 +102,7 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme // Designed to be overridden by subclasses that may score docs by criteria // other than Lucene score protected TopDocsCollector createTopDocsCollector(int size) throws IOException { - return TopScoreDocCollector.create(size); + return TopScoreDocCollector.create(size, Integer.MAX_VALUE); } @Override @@ -154,7 +155,7 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme private long parentBucket; private int matchedDocs; - PerParentBucketSamples(long parentBucket, Scorer scorer, LeafReaderContext readerContext) { + PerParentBucketSamples(long parentBucket, Scorable scorer, LeafReaderContext readerContext) { try { this.parentBucket = parentBucket; tdc = createTopDocsCollector(shardSize); @@ -183,7 +184,7 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme currentLeafCollector.collect(doc); } - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { currentLeafCollector.setScorer(scorer); } @@ -196,19 +197,18 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme } } - class PerSegmentCollects extends Scorer { + class PerSegmentCollects extends Scorable { private LeafReaderContext readerContext; int maxDocId = Integer.MIN_VALUE; private float currentScore; private int currentDocId = -1; - private Scorer currentScorer; + private Scorable currentScorer; PerSegmentCollects(LeafReaderContext readerContext) throws IOException { // The publisher behaviour for Reader/Scorer listeners triggers a // call to this constructor with a null scorer so we can't call // scorer.getWeight() and pass the Weight to our base class. // However, passing null seems to have no adverse effects here... - super(null); this.readerContext = readerContext; for (int i = 0; i < perBucketSamples.size(); i++) { PerParentBucketSamples perBucketSample = perBucketSamples.get(i); @@ -219,7 +219,7 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme } } - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { this.currentScorer = scorer; for (int i = 0; i < perBucketSamples.size(); i++) { PerParentBucketSamples perBucketSample = perBucketSamples.get(i); @@ -264,11 +264,6 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme return currentDocId; } - @Override - public DocIdSetIterator iterator() { - throw new ElasticsearchException("This caching scorer implementation only implements score() and docID()"); - } - public void collect(int docId, long parentBucket) throws IOException { perBucketSamples = bigArrays.grow(perBucketSamples, parentBucket + 1); PerParentBucketSamples sampler = perBucketSamples.get((int) parentBucket); @@ -279,6 +274,7 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme sampler.collect(docId); maxDocId = Math.max(maxDocId, docId); } + } public int getDocCount(long parentBucket) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java index 59e491705c6..d4995f75616 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.sampler; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -149,8 +150,8 @@ public class SamplerAggregator extends DeferableBucketAggregator implements Sing } @Override - public boolean needsScores() { - return true; + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java index df1bd115e2b..d612014e017 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java @@ -195,7 +195,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac // such are impossible to differentiate from non-significant terms // at that early stage. bucketCountThresholds.setShardSize(2 * BucketUtils.suggestShardSideQueueSize(bucketCountThresholds.getRequiredSize(), - context.numberOfShards())); + context.numberOfShards() == 1)); } if (valuesSource instanceof ValuesSource.Bytes) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorFactory.java index ea9a8a91aea..a51a33defdd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorFactory.java @@ -176,7 +176,7 @@ public class SignificantTextAggregatorFactory extends AggregatorFactory pipelineAggregators, Map metaData) throws IOException { super(name, context, parent, pipelineAggregators, metaData); @@ -65,8 +66,8 @@ public abstract class AbstractHDRPercentilesAggregator extends NumericMetricsAgg } @Override - public boolean needsScores() { - return valuesSource != null && valuesSource.needsScores(); + public ScoreMode scoreMode() { + return valuesSource != null && valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/AbstractInternalHDRPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java similarity index 97% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/AbstractInternalHDRPercentiles.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java index a7b359d5937..7050254f279 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/AbstractInternalHDRPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.hdr; +package org.elasticsearch.search.aggregations.metrics; import org.HdrHistogram.DoubleHistogram; import org.elasticsearch.common.io.stream.StreamInput; @@ -25,7 +25,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/AbstractInternalTDigestPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java similarity index 97% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/AbstractInternalTDigestPercentiles.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java index 0938710406a..6e6ff3cf3a8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/AbstractInternalTDigestPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java @@ -17,14 +17,13 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/AbstractTDigestPercentilesAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractTDigestPercentilesAggregator.java similarity index 90% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/AbstractTDigestPercentilesAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractTDigestPercentilesAggregator.java index 1b5ed510f8d..15ad622fce5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/AbstractTDigestPercentilesAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractTDigestPercentilesAggregator.java @@ -17,9 +17,10 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.ArrayUtils; import org.elasticsearch.common.util.BigArrays; @@ -29,7 +30,6 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; @@ -38,7 +38,7 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public abstract class AbstractTDigestPercentilesAggregator extends NumericMetricsAggregator.MultiValue { +abstract class AbstractTDigestPercentilesAggregator extends NumericMetricsAggregator.MultiValue { private static int indexOfKey(double[] keys, double key) { return ArrayUtils.binarySearch(keys, key, 0.001); @@ -51,7 +51,7 @@ public abstract class AbstractTDigestPercentilesAggregator extends NumericMetric protected final double compression; protected final boolean keyed; - public AbstractTDigestPercentilesAggregator(String name, ValuesSource.Numeric valuesSource, SearchContext context, Aggregator parent, + AbstractTDigestPercentilesAggregator(String name, ValuesSource.Numeric valuesSource, SearchContext context, Aggregator parent, double[] keys, double compression, boolean keyed, DocValueFormat formatter, List pipelineAggregators, Map metaData) throws IOException { super(name, context, parent, pipelineAggregators, metaData); @@ -64,8 +64,8 @@ public abstract class AbstractTDigestPercentilesAggregator extends NumericMetric } @Override - public boolean needsScores() { - return valuesSource != null && valuesSource.needsScores(); + public ScoreMode scoreMode() { + return valuesSource != null && valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/Avg.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/Avg.java similarity index 87% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/Avg.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/Avg.java index e36b8df7deb..1b9f02c5270 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/Avg.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/Avg.java @@ -16,9 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.avg; - -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; +package org.elasticsearch.search.aggregations.metrics; /** * An aggregation that computes the average of the values in the current bucket. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregationBuilder.java similarity index 98% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregationBuilder.java index f0d917715ac..1f57964f667 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.avg; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregator.java similarity index 93% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregator.java index 27890efbff1..22142799a93 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregator.java @@ -16,9 +16,10 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.avg; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.DoubleArray; @@ -29,7 +30,6 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; @@ -38,7 +38,7 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class AvgAggregator extends NumericMetricsAggregator.SingleValue { +class AvgAggregator extends NumericMetricsAggregator.SingleValue { final ValuesSource.Numeric valuesSource; @@ -47,7 +47,7 @@ public class AvgAggregator extends NumericMetricsAggregator.SingleValue { DoubleArray compensations; DocValueFormat format; - public AvgAggregator(String name, ValuesSource.Numeric valuesSource, DocValueFormat formatter, SearchContext context, + AvgAggregator(String name, ValuesSource.Numeric valuesSource, DocValueFormat formatter, SearchContext context, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { super(name, context, parent, pipelineAggregators, metaData); this.valuesSource = valuesSource; @@ -61,8 +61,8 @@ public class AvgAggregator extends NumericMetricsAggregator.SingleValue { } @Override - public boolean needsScores() { - return valuesSource != null && valuesSource.needsScores(); + public ScoreMode scoreMode() { + return valuesSource != null && valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorFactory.java similarity index 89% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorFactory.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorFactory.java index f1fc12ef4e5..817e40db26e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.avg; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -33,9 +33,9 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class AvgAggregatorFactory extends ValuesSourceAggregatorFactory { +class AvgAggregatorFactory extends ValuesSourceAggregatorFactory { - public AvgAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, + AvgAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/Cardinality.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/Cardinality.java similarity index 87% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/Cardinality.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/Cardinality.java index 92f3b8bb261..f85070d1359 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/Cardinality.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/Cardinality.java @@ -17,9 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.cardinality; - -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; +package org.elasticsearch.search.aggregations.metrics; /** * An aggregation that computes approximate numbers of unique terms. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java similarity index 98% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java index 17b3849c5eb..244aa1dda3f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.cardinality; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregator.java similarity index 92% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregator.java index 7a8483b1b26..80dd9beac92 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregator.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.cardinality; +package org.elasticsearch.search.aggregations.metrics; import com.carrotsearch.hppc.BitMixer; @@ -25,6 +25,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.RamUsageEstimator; @@ -39,7 +40,6 @@ import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; @@ -51,7 +51,7 @@ import java.util.Map; /** * An aggregator that computes approximate counts of unique values. */ -public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue { +class CardinalityAggregator extends NumericMetricsAggregator.SingleValue { private final int precision; private final ValuesSource valuesSource; @@ -62,8 +62,13 @@ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue private Collector collector; - public CardinalityAggregator(String name, ValuesSource valuesSource, int precision, - SearchContext context, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { + CardinalityAggregator(String name, + ValuesSource valuesSource, + int precision, + SearchContext context, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { super(name, context, parent, pipelineAggregators, metaData); this.valuesSource = valuesSource; this.precision = precision; @@ -71,8 +76,8 @@ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue } @Override - public boolean needsScores() { - return valuesSource != null && valuesSource.needsScores(); + public ScoreMode scoreMode() { + return valuesSource != null && valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } private Collector pickCollector(LeafReaderContext ctx) throws IOException { @@ -82,7 +87,8 @@ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue if (valuesSource instanceof ValuesSource.Numeric) { ValuesSource.Numeric source = (ValuesSource.Numeric) valuesSource; - MurmurHash3Values hashValues = source.isFloatingPoint() ? MurmurHash3Values.hash(source.doubleValues(ctx)) : MurmurHash3Values.hash(source.longValues(ctx)); + MurmurHash3Values hashValues = source.isFloatingPoint() ? + MurmurHash3Values.hash(source.doubleValues(ctx)) : MurmurHash3Values.hash(source.longValues(ctx)); return new DirectCollector(counts, hashValues); } @@ -269,7 +275,8 @@ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue final org.elasticsearch.common.hash.MurmurHash3.Hash128 hash = new org.elasticsearch.common.hash.MurmurHash3.Hash128(); try (LongArray hashes = bigArrays.newLongArray(maxOrd, false)) { - for (int ord = allVisitedOrds.nextSetBit(0); ord < DocIdSetIterator.NO_MORE_DOCS; ord = ord + 1 < maxOrd ? allVisitedOrds.nextSetBit(ord + 1) : DocIdSetIterator.NO_MORE_DOCS) { + for (int ord = allVisitedOrds.nextSetBit(0); ord < DocIdSetIterator.NO_MORE_DOCS; + ord = ord + 1 < maxOrd ? allVisitedOrds.nextSetBit(ord + 1) : DocIdSetIterator.NO_MORE_DOCS) { final BytesRef value = values.lookupOrd(ord); org.elasticsearch.common.hash.MurmurHash3.hash128(value.bytes, value.offset, value.length, 0, hash); hashes.set(ord, hash.h1); @@ -278,7 +285,8 @@ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue for (long bucket = visitedOrds.size() - 1; bucket >= 0; --bucket) { final FixedBitSet bits = visitedOrds.get(bucket); if (bits != null) { - for (int ord = bits.nextSetBit(0); ord < DocIdSetIterator.NO_MORE_DOCS; ord = ord + 1 < maxOrd ? bits.nextSetBit(ord + 1) : DocIdSetIterator.NO_MORE_DOCS) { + for (int ord = bits.nextSetBit(0); ord < DocIdSetIterator.NO_MORE_DOCS; + ord = ord + 1 < maxOrd ? bits.nextSetBit(ord + 1) : DocIdSetIterator.NO_MORE_DOCS) { counts.collect(bucket, hashes.get(ord)); } } @@ -375,7 +383,8 @@ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue private static class Bytes extends MurmurHash3Values { - private final org.elasticsearch.common.hash.MurmurHash3.Hash128 hash = new org.elasticsearch.common.hash.MurmurHash3.Hash128(); + private final org.elasticsearch.common.hash.MurmurHash3.Hash128 hash = + new org.elasticsearch.common.hash.MurmurHash3.Hash128(); private final SortedBinaryDocValues values; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorFactory.java similarity index 89% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregatorFactory.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorFactory.java index 0d2d32f0469..413c896fbcb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.cardinality; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -32,11 +32,11 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory { +class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory { private final Long precisionThreshold; - public CardinalityAggregatorFactory(String name, ValuesSourceConfig config, Long precisionThreshold, + CardinalityAggregatorFactory(String name, ValuesSourceConfig config, Long precisionThreshold, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStats.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStats.java similarity index 93% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStats.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStats.java index 8a198a5825a..68dac3e373d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStats.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStats.java @@ -16,9 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.stats.extended; - -import org.elasticsearch.search.aggregations.metrics.stats.Stats; +package org.elasticsearch.search.aggregations.metrics; /** * Statistics over a set of values (either aggregated over field data or scripts) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregationBuilder.java similarity index 98% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregationBuilder.java index 09a12fb188f..33caa5f8400 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.stats.extended; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregator.java similarity index 95% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregator.java index 8339c06aefd..1d383a2ae19 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregator.java @@ -16,9 +16,10 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.stats.extended; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; @@ -30,7 +31,6 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; @@ -39,9 +39,9 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class ExtendedStatsAggregator extends NumericMetricsAggregator.MultiValue { +class ExtendedStatsAggregator extends NumericMetricsAggregator.MultiValue { - public static final ParseField SIGMA_FIELD = new ParseField("sigma"); + static final ParseField SIGMA_FIELD = new ParseField("sigma"); final ValuesSource.Numeric valuesSource; final DocValueFormat format; @@ -55,7 +55,7 @@ public class ExtendedStatsAggregator extends NumericMetricsAggregator.MultiValue DoubleArray sumOfSqrs; DoubleArray compensationOfSqrs; - public ExtendedStatsAggregator(String name, ValuesSource.Numeric valuesSource, DocValueFormat formatter, + ExtendedStatsAggregator(String name, ValuesSource.Numeric valuesSource, DocValueFormat formatter, SearchContext context, Aggregator parent, double sigma, List pipelineAggregators, Map metaData) throws IOException { @@ -78,8 +78,8 @@ public class ExtendedStatsAggregator extends NumericMetricsAggregator.MultiValue } @Override - public boolean needsScores() { - return valuesSource != null && valuesSource.needsScores(); + public ScoreMode scoreMode() { + return valuesSource != null && valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorFactory.java similarity index 88% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregatorFactory.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorFactory.java index 521ea8f68a6..890f3199498 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.stats.extended; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -33,11 +33,11 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class ExtendedStatsAggregatorFactory extends ValuesSourceAggregatorFactory { +class ExtendedStatsAggregatorFactory extends ValuesSourceAggregatorFactory { private final double sigma; - public ExtendedStatsAggregatorFactory(String name, ValuesSourceConfig config, double sigma, + ExtendedStatsAggregatorFactory(String name, ValuesSourceConfig config, double sigma, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBounds.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBounds.java similarity index 95% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBounds.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBounds.java index 76b8ed11fc9..22fd5b501f9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBounds.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBounds.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.geobounds; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.search.aggregations.Aggregation; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java similarity index 98% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java index 2d616ebe071..9955f62f80a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.geobounds; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregator.java similarity index 93% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregator.java index 5c0cb4ba60a..e6d591482be 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregator.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.geobounds; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.common.ParseField; @@ -30,7 +30,6 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; -import org.elasticsearch.search.aggregations.metrics.MetricsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; @@ -39,7 +38,7 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public final class GeoBoundsAggregator extends MetricsAggregator { +final class GeoBoundsAggregator extends MetricsAggregator { static final ParseField WRAP_LONGITUDE_FIELD = new ParseField("wrap_longitude"); @@ -52,7 +51,7 @@ public final class GeoBoundsAggregator extends MetricsAggregator { DoubleArray negLefts; DoubleArray negRights; - protected GeoBoundsAggregator(String name, SearchContext aggregationContext, Aggregator parent, + GeoBoundsAggregator(String name, SearchContext aggregationContext, Aggregator parent, ValuesSource.GeoPoint valuesSource, boolean wrapLongitude, List pipelineAggregators, Map metaData) throws IOException { super(name, aggregationContext, parent, pipelineAggregators, metaData); @@ -154,13 +153,15 @@ public final class GeoBoundsAggregator extends MetricsAggregator { double posRight = posRights.get(owningBucketOrdinal); double negLeft = negLefts.get(owningBucketOrdinal); double negRight = negRights.get(owningBucketOrdinal); - return new InternalGeoBounds(name, top, bottom, posLeft, posRight, negLeft, negRight, wrapLongitude, pipelineAggregators(), metaData()); + return new InternalGeoBounds(name, top, bottom, posLeft, posRight, negLeft, negRight, wrapLongitude, + pipelineAggregators(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { return new InternalGeoBounds(name, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY, - Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, wrapLongitude, pipelineAggregators(), metaData()); + Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, wrapLongitude, + pipelineAggregators(), metaData()); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregatorFactory.java similarity index 88% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregatorFactory.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregatorFactory.java index e67ad49115a..e6080d16cbf 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregatorFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.geobounds; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -32,11 +32,11 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class GeoBoundsAggregatorFactory extends ValuesSourceAggregatorFactory { +class GeoBoundsAggregatorFactory extends ValuesSourceAggregatorFactory { private final boolean wrapLongitude; - public GeoBoundsAggregatorFactory(String name, ValuesSourceConfig config, boolean wrapLongitude, + GeoBoundsAggregatorFactory(String name, ValuesSourceConfig config, boolean wrapLongitude, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroid.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroid.java similarity index 85% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroid.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroid.java index 2cdf462f042..7276bf400dd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroid.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroid.java @@ -17,13 +17,13 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.geocentroid; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.search.aggregations.Aggregation; /** - * Interface for {@link org.elasticsearch.search.aggregations.metrics.geocentroid.GeoCentroidAggregator} + * Interface for {@link GeoCentroidAggregator} */ public interface GeoCentroid extends Aggregation { GeoPoint centroid(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregationBuilder.java similarity index 98% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregationBuilder.java index 32fcaf32775..088483656f8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.geocentroid; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregator.java similarity index 97% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregator.java index 795524e5a0f..f0f570ebace 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregator.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.geocentroid; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.common.geo.GeoPoint; @@ -29,7 +29,6 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; -import org.elasticsearch.search.aggregations.metrics.MetricsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregatorFactory.java similarity index 97% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregatorFactory.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregatorFactory.java index d153da3afa3..2bfb31c4993 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregatorFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.geocentroid; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentileRanksAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregator.java similarity index 90% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentileRanksAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregator.java index 1360999d866..881d7a4bf4f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentileRanksAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregator.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.hdr; +package org.elasticsearch.search.aggregations.metrics; import org.HdrHistogram.DoubleHistogram; import org.elasticsearch.search.DocValueFormat; @@ -30,9 +30,9 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class HDRPercentileRanksAggregator extends AbstractHDRPercentilesAggregator { +class HDRPercentileRanksAggregator extends AbstractHDRPercentilesAggregator { - public HDRPercentileRanksAggregator(String name, Numeric valuesSource, SearchContext context, Aggregator parent, + HDRPercentileRanksAggregator(String name, Numeric valuesSource, SearchContext context, Aggregator parent, double[] percents, int numberOfSignificantValueDigits, boolean keyed, DocValueFormat format, List pipelineAggregators, Map metaData) throws IOException { super(name, valuesSource, context, parent, percents, numberOfSignificantValueDigits, keyed, format, pipelineAggregators, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentileRanksAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorFactory.java similarity index 92% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentileRanksAggregatorFactory.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorFactory.java index d89a9a85b28..1bb96e17da7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentileRanksAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.hdr; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -33,14 +33,14 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class HDRPercentileRanksAggregatorFactory +class HDRPercentileRanksAggregatorFactory extends ValuesSourceAggregatorFactory { private final double[] values; private final int numberOfSignificantValueDigits; private final boolean keyed; - public HDRPercentileRanksAggregatorFactory(String name, ValuesSourceConfig config, double[] values, + HDRPercentileRanksAggregatorFactory(String name, ValuesSourceConfig config, double[] values, int numberOfSignificantValueDigits, boolean keyed, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesAggregator.java similarity index 90% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesAggregator.java index 93fd92e4fbf..f1a4a03b24b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesAggregator.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.hdr; +package org.elasticsearch.search.aggregations.metrics; import org.HdrHistogram.DoubleHistogram; import org.elasticsearch.search.DocValueFormat; @@ -30,9 +30,9 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class HDRPercentilesAggregator extends AbstractHDRPercentilesAggregator { +class HDRPercentilesAggregator extends AbstractHDRPercentilesAggregator { - public HDRPercentilesAggregator(String name, Numeric valuesSource, SearchContext context, Aggregator parent, double[] percents, + HDRPercentilesAggregator(String name, Numeric valuesSource, SearchContext context, Aggregator parent, double[] percents, int numberOfSignificantValueDigits, boolean keyed, DocValueFormat formatter, List pipelineAggregators, Map metaData) throws IOException { super(name, valuesSource, context, parent, percents, numberOfSignificantValueDigits, keyed, formatter, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesAggregatorFactory.java similarity index 89% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregatorFactory.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesAggregatorFactory.java index 1074b6e142d..fe53f32889a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesAggregatorFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.hdr; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -33,13 +33,13 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class HDRPercentilesAggregatorFactory extends ValuesSourceAggregatorFactory { +class HDRPercentilesAggregatorFactory extends ValuesSourceAggregatorFactory { private final double[] percents; private final int numberOfSignificantValueDigits; private final boolean keyed; - public HDRPercentilesAggregatorFactory(String name, ValuesSourceConfig config, double[] percents, + HDRPercentilesAggregatorFactory(String name, ValuesSourceConfig config, double[] percents, int numberOfSignificantValueDigits, boolean keyed, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java similarity index 99% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java index 1dfe70d4b7f..e8989868b07 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.cardinality; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LongBitSet; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalAvg.java similarity index 96% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalAvg.java index 285ea469aed..1b30afc0874 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalAvg.java @@ -16,14 +16,13 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.avg; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinality.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalCardinality.java similarity index 96% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinality.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalCardinality.java index ce1e9fc8939..b3fcb33a4fb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinality.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalCardinality.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.cardinality; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -25,7 +25,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStats.java similarity index 98% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStats.java index 1f259fbe87d..608fd1de435 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStats.java @@ -16,14 +16,13 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.stats.extended; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.metrics.stats.InternalStats; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalGeoBounds.java similarity index 99% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalGeoBounds.java index 69fc6fcaffe..7f259baca69 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalGeoBounds.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.geobounds; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.geo.GeoPoint; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalGeoCentroid.java similarity index 97% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalGeoCentroid.java index b8d317ff787..d5d537ab66e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalGeoCentroid.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.geocentroid; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.geo.GeoEncodingUtils; import org.elasticsearch.common.ParseField; @@ -41,7 +41,8 @@ public class InternalGeoCentroid extends InternalAggregation implements GeoCentr private final long count; public static long encodeLatLon(double lat, double lon) { - return (Integer.toUnsignedLong(GeoEncodingUtils.encodeLatitude(lat)) << 32) | Integer.toUnsignedLong(GeoEncodingUtils.encodeLongitude(lon)); + return (Integer.toUnsignedLong(GeoEncodingUtils.encodeLatitude(lat)) << 32) | + Integer.toUnsignedLong(GeoEncodingUtils.encodeLongitude(lon)); } public static double decodeLatitude(long encodedLatLon) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentileRanks.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalHDRPercentileRanks.java similarity index 90% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentileRanks.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalHDRPercentileRanks.java index cb058128c5a..bfe483d0e3c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentileRanks.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalHDRPercentileRanks.java @@ -16,13 +16,11 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.hdr; +package org.elasticsearch.search.aggregations.metrics; import org.HdrHistogram.DoubleHistogram; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; @@ -33,7 +31,7 @@ import java.util.Map; public class InternalHDRPercentileRanks extends AbstractInternalHDRPercentiles implements PercentileRanks { public static final String NAME = "hdr_percentile_ranks"; - public InternalHDRPercentileRanks(String name, double[] cdfValues, DoubleHistogram state, boolean keyed, DocValueFormat formatter, + InternalHDRPercentileRanks(String name, double[] cdfValues, DoubleHistogram state, boolean keyed, DocValueFormat formatter, List pipelineAggregators, Map metaData) { super(name, cdfValues, state, keyed, formatter, pipelineAggregators, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalHDRPercentiles.java similarity index 90% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentiles.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalHDRPercentiles.java index a153e497f7b..5a62de8a964 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalHDRPercentiles.java @@ -16,13 +16,11 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.hdr; +package org.elasticsearch.search.aggregations.metrics; import org.HdrHistogram.DoubleHistogram; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; @@ -33,7 +31,7 @@ import java.util.Map; public class InternalHDRPercentiles extends AbstractInternalHDRPercentiles implements Percentiles { public static final String NAME = "hdr_percentiles"; - public InternalHDRPercentiles(String name, double[] percents, DoubleHistogram state, boolean keyed, DocValueFormat formatter, + InternalHDRPercentiles(String name, double[] percents, DoubleHistogram state, boolean keyed, DocValueFormat formatter, List pipelineAggregators, Map metaData) { super(name, percents, state, keyed, formatter, pipelineAggregators, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMax.java similarity index 93% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMax.java index 449351b88b1..300c82710f6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMax.java @@ -16,14 +16,13 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.max; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; @@ -34,8 +33,8 @@ import java.util.Objects; public class InternalMax extends InternalNumericMetricsAggregation.SingleValue implements Max { private final double max; - public InternalMax(String name, double max, DocValueFormat formatter, List pipelineAggregators, - Map metaData) { + public InternalMax(String name, double max, DocValueFormat formatter, + List pipelineAggregators, Map metaData) { super(name, pipelineAggregators, metaData); this.format = formatter; this.max = max; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMin.java similarity index 95% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMin.java index 886642c222b..60ed785edfe 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMin.java @@ -16,14 +16,13 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.min; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetric.java similarity index 92% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetric.java index 4124a8eeb76..ec2419e03ab 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetric.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.scripted; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -40,7 +40,7 @@ public class InternalScriptedMetric extends InternalAggregation implements Scrip final Script reduceScript; private final List aggregation; - public InternalScriptedMetric(String name, Object aggregation, Script reduceScript, List pipelineAggregators, + InternalScriptedMetric(String name, Object aggregation, Script reduceScript, List pipelineAggregators, Map metaData) { this(name, Collections.singletonList(aggregation), reduceScript, pipelineAggregators, metaData); } @@ -95,11 +95,6 @@ public class InternalScriptedMetric extends InternalAggregation implements Scrip params.putAll(firstAggregation.reduceScript.getParams()); } - // Add _aggs to params map for backwards compatibility (redundant with a context variable on the ReduceScript created below). - if (ScriptedMetricAggContexts.deprecatedAggParamEnabled()) { - params.put("_aggs", aggregationObjects); - } - ScriptedMetricAggContexts.ReduceScript.Factory factory = reduceContext.scriptService().compile( firstAggregation.reduceScript, ScriptedMetricAggContexts.ReduceScript.CONTEXT); ScriptedMetricAggContexts.ReduceScript script = factory.newInstance(params, aggregationObjects); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalStats.java similarity index 96% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalStats.java index 19f74cd72c8..a05d6db7024 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalStats.java @@ -16,14 +16,13 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.stats; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; @@ -47,8 +46,7 @@ public class InternalStats extends InternalNumericMetricsAggregation.MultiValue protected final double sum; public InternalStats(String name, long count, double sum, double min, double max, DocValueFormat formatter, - List pipelineAggregators, - Map metaData) { + List pipelineAggregators, Map metaData) { super(name, pipelineAggregators, metaData); this.count = count; this.sum = sum; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalSum.java similarity index 91% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalSum.java index cedcdd4aab0..c3bb7173b3f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalSum.java @@ -16,14 +16,13 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.sum; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; @@ -34,8 +33,8 @@ import java.util.Objects; public class InternalSum extends InternalNumericMetricsAggregation.SingleValue implements Sum { private final double sum; - public InternalSum(String name, double sum, DocValueFormat formatter, List pipelineAggregators, - Map metaData) { + InternalSum(String name, double sum, DocValueFormat formatter, List pipelineAggregators, + Map metaData) { super(name, pipelineAggregators, metaData); this.sum = sum; this.format = formatter; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentileRanks.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTDigestPercentileRanks.java similarity index 88% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentileRanks.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTDigestPercentileRanks.java index 666993f41fd..aa82ac5ba6a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentileRanks.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTDigestPercentileRanks.java @@ -16,12 +16,10 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; @@ -32,8 +30,8 @@ import java.util.Map; public class InternalTDigestPercentileRanks extends AbstractInternalTDigestPercentiles implements PercentileRanks { public static final String NAME = "tdigest_percentile_ranks"; - public InternalTDigestPercentileRanks(String name, double[] cdfValues, TDigestState state, boolean keyed, DocValueFormat formatter, - List pipelineAggregators, Map metaData) { + InternalTDigestPercentileRanks(String name, double[] cdfValues, TDigestState state, boolean keyed, DocValueFormat formatter, + List pipelineAggregators, Map metaData) { super(name, cdfValues, state, keyed, formatter, pipelineAggregators, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTDigestPercentiles.java similarity index 89% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentiles.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTDigestPercentiles.java index 5a62f24933b..28f1230bec7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTDigestPercentiles.java @@ -16,12 +16,10 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; @@ -32,7 +30,7 @@ import java.util.Map; public class InternalTDigestPercentiles extends AbstractInternalTDigestPercentiles implements Percentiles { public static final String NAME = "tdigest_percentiles"; - public InternalTDigestPercentiles(String name, double[] percents, TDigestState state, boolean keyed, DocValueFormat formatter, + InternalTDigestPercentiles(String name, double[] percents, TDigestState state, boolean keyed, DocValueFormat formatter, List pipelineAggregators, Map metaData) { super(name, percents, state, keyed, formatter, pipelineAggregators, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java similarity index 76% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java index 58fac4b9520..0c85191379f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java @@ -16,16 +16,18 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.tophits; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; +import org.apache.lucene.search.TotalHits.Relation; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; @@ -43,10 +45,10 @@ import java.util.Map; public class InternalTopHits extends InternalAggregation implements TopHits { private int from; private int size; - private TopDocs topDocs; + private TopDocsAndMaxScore topDocs; private SearchHits searchHits; - public InternalTopHits(String name, int from, int size, TopDocs topDocs, SearchHits searchHits, + public InternalTopHits(String name, int from, int size, TopDocsAndMaxScore topDocs, SearchHits searchHits, List pipelineAggregators, Map metaData) { super(name, pipelineAggregators, metaData); this.from = from; @@ -85,7 +87,7 @@ public class InternalTopHits extends InternalAggregation implements TopHits { return searchHits; } - TopDocs getTopDocs() { + TopDocsAndMaxScore getTopDocs() { return topDocs; } @@ -115,12 +117,12 @@ public class InternalTopHits extends InternalAggregation implements TopHits { final TopDocs reducedTopDocs; final TopDocs[] shardDocs; - if (topDocs instanceof TopFieldDocs) { - Sort sort = new Sort(((TopFieldDocs) topDocs).fields); + if (topDocs.topDocs instanceof TopFieldDocs) { + Sort sort = new Sort(((TopFieldDocs) topDocs.topDocs).fields); shardDocs = new TopFieldDocs[aggregations.size()]; for (int i = 0; i < shardDocs.length; i++) { InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i); - shardDocs[i] = topHitsAgg.topDocs; + shardDocs[i] = topHitsAgg.topDocs.topDocs; shardHits[i] = topHitsAgg.searchHits; } reducedTopDocs = TopDocs.merge(sort, from, size, (TopFieldDocs[]) shardDocs, true); @@ -128,12 +130,24 @@ public class InternalTopHits extends InternalAggregation implements TopHits { shardDocs = new TopDocs[aggregations.size()]; for (int i = 0; i < shardDocs.length; i++) { InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i); - shardDocs[i] = topHitsAgg.topDocs; + shardDocs[i] = topHitsAgg.topDocs.topDocs; shardHits[i] = topHitsAgg.searchHits; } reducedTopDocs = TopDocs.merge(from, size, shardDocs, true); } + float maxScore = Float.NaN; + for (InternalAggregation agg : aggregations) { + InternalTopHits topHitsAgg = (InternalTopHits) agg; + if (Float.isNaN(topHitsAgg.topDocs.maxScore) == false) { + if (Float.isNaN(maxScore)) { + maxScore = topHitsAgg.topDocs.maxScore; + } else { + maxScore = Math.max(maxScore, topHitsAgg.topDocs.maxScore); + } + } + } + final int[] tracker = new int[shardHits.length]; SearchHit[] hits = new SearchHit[reducedTopDocs.scoreDocs.length]; for (int i = 0; i < reducedTopDocs.scoreDocs.length; i++) { @@ -144,9 +158,10 @@ public class InternalTopHits extends InternalAggregation implements TopHits { } while (shardDocs[scoreDoc.shardIndex].scoreDocs[position] != scoreDoc); hits[i] = shardHits[scoreDoc.shardIndex].getAt(position); } - return new InternalTopHits(name, this.from, this.size, reducedTopDocs, new SearchHits(hits, reducedTopDocs.totalHits, - reducedTopDocs.getMaxScore()), - pipelineAggregators(), getMetaData()); + assert reducedTopDocs.totalHits.relation == Relation.EQUAL_TO; + return new InternalTopHits(name, this.from, this.size, + new TopDocsAndMaxScore(reducedTopDocs, maxScore), + new SearchHits(hits, reducedTopDocs.totalHits.value, maxScore), pipelineAggregators(), getMetaData()); } @Override @@ -170,11 +185,12 @@ public class InternalTopHits extends InternalAggregation implements TopHits { InternalTopHits other = (InternalTopHits) obj; if (from != other.from) return false; if (size != other.size) return false; - if (topDocs.totalHits != other.topDocs.totalHits) return false; - if (topDocs.scoreDocs.length != other.topDocs.scoreDocs.length) return false; - for (int d = 0; d < topDocs.scoreDocs.length; d++) { - ScoreDoc thisDoc = topDocs.scoreDocs[d]; - ScoreDoc otherDoc = other.topDocs.scoreDocs[d]; + if (topDocs.topDocs.totalHits.value != other.topDocs.topDocs.totalHits.value) return false; + if (topDocs.topDocs.totalHits.relation != other.topDocs.topDocs.totalHits.relation) return false; + if (topDocs.topDocs.scoreDocs.length != other.topDocs.topDocs.scoreDocs.length) return false; + for (int d = 0; d < topDocs.topDocs.scoreDocs.length; d++) { + ScoreDoc thisDoc = topDocs.topDocs.scoreDocs[d]; + ScoreDoc otherDoc = other.topDocs.topDocs.scoreDocs[d]; if (thisDoc.doc != otherDoc.doc) return false; if (Double.compare(thisDoc.score, otherDoc.score) != 0) return false; if (thisDoc.shardIndex != otherDoc.shardIndex) return false; @@ -195,9 +211,10 @@ public class InternalTopHits extends InternalAggregation implements TopHits { protected int doHashCode() { int hashCode = from; hashCode = 31 * hashCode + size; - hashCode = 31 * hashCode + Long.hashCode(topDocs.totalHits); - for (int d = 0; d < topDocs.scoreDocs.length; d++) { - ScoreDoc doc = topDocs.scoreDocs[d]; + hashCode = 31 * hashCode + Long.hashCode(topDocs.topDocs.totalHits.value); + hashCode = 31 * hashCode + topDocs.topDocs.totalHits.relation.hashCode(); + for (int d = 0; d < topDocs.topDocs.scoreDocs.length; d++) { + ScoreDoc doc = topDocs.topDocs.scoreDocs[d]; hashCode = 31 * hashCode + doc.doc; hashCode = 31 * hashCode + Float.floatToIntBits(doc.score); hashCode = 31 * hashCode + doc.shardIndex; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalValueCount.java similarity index 92% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalValueCount.java index 0ac42ff9f45..36f2749c791 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalValueCount.java @@ -16,13 +16,12 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.valuecount; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; @@ -36,7 +35,7 @@ import java.util.Objects; public class InternalValueCount extends InternalNumericMetricsAggregation.SingleValue implements ValueCount { private final long value; - public InternalValueCount(String name, long value, List pipelineAggregators, + InternalValueCount(String name, long value, List pipelineAggregators, Map metaData) { super(name, pipelineAggregators, metaData); this.value = value; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/InternalWeightedAvg.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalWeightedAvg.java similarity index 93% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/InternalWeightedAvg.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalWeightedAvg.java index 9ad1a1df78a..e06ffbc7b4a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/InternalWeightedAvg.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalWeightedAvg.java @@ -16,14 +16,13 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.weighted_avg; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; @@ -35,8 +34,8 @@ public class InternalWeightedAvg extends InternalNumericMetricsAggregation.Singl private final double sum; private final double weight; - public InternalWeightedAvg(String name, double sum, double weight, DocValueFormat format, List pipelineAggregators, - Map metaData) { + InternalWeightedAvg(String name, double sum, double weight, DocValueFormat format, List pipelineAggregators, + Map metaData) { super(name, pipelineAggregators, metaData); this.sum = sum; this.weight = weight; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/Max.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/Max.java similarity index 87% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/Max.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/Max.java index bee808d16a1..ee592fd75fb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/Max.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/Max.java @@ -16,9 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.max; - -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; +package org.elasticsearch.search.aggregations.metrics; /** * An aggregation that computes the maximum of the values in the current bucket. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregationBuilder.java similarity index 98% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregationBuilder.java index 7135aceba95..0c3229f08fd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.max; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregator.java similarity index 92% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregator.java index ff76e6637ba..c65277d389c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregator.java @@ -16,9 +16,10 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.max; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.DoubleArray; @@ -30,7 +31,6 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; @@ -39,14 +39,14 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class MaxAggregator extends NumericMetricsAggregator.SingleValue { +class MaxAggregator extends NumericMetricsAggregator.SingleValue { final ValuesSource.Numeric valuesSource; final DocValueFormat formatter; DoubleArray maxes; - public MaxAggregator(String name, ValuesSource.Numeric valuesSource, DocValueFormat formatter, + MaxAggregator(String name, ValuesSource.Numeric valuesSource, DocValueFormat formatter, SearchContext context, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { @@ -60,8 +60,8 @@ public class MaxAggregator extends NumericMetricsAggregator.SingleValue { } @Override - public boolean needsScores() { - return valuesSource != null && valuesSource.needsScores(); + public ScoreMode scoreMode() { + return valuesSource != null && valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorFactory.java similarity index 89% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregatorFactory.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorFactory.java index aedba76e0c7..314e1106b37 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.max; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -33,9 +33,9 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class MaxAggregatorFactory extends ValuesSourceAggregatorFactory { +class MaxAggregatorFactory extends ValuesSourceAggregatorFactory { - public MaxAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, + MaxAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/Min.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/Min.java similarity index 87% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/Min.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/Min.java index 3b5488199e8..5fd1984da88 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/Min.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/Min.java @@ -16,9 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.min; - -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; +package org.elasticsearch.search.aggregations.metrics; /** * An aggregation that computes the minimum of the values in the current bucket. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregationBuilder.java similarity index 96% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregationBuilder.java index 380569f1896..2d23539189d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.min; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -27,7 +27,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregator.java similarity index 92% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregator.java index e4b371514bd..ea8e160e138 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregator.java @@ -16,9 +16,10 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.min; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.DoubleArray; @@ -30,7 +31,6 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; @@ -39,14 +39,14 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class MinAggregator extends NumericMetricsAggregator.SingleValue { +class MinAggregator extends NumericMetricsAggregator.SingleValue { final ValuesSource.Numeric valuesSource; final DocValueFormat format; DoubleArray mins; - public MinAggregator(String name, ValuesSource.Numeric valuesSource, DocValueFormat formatter, + MinAggregator(String name, ValuesSource.Numeric valuesSource, DocValueFormat formatter, SearchContext context, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { super(name, context, parent, pipelineAggregators, metaData); @@ -59,8 +59,8 @@ public class MinAggregator extends NumericMetricsAggregator.SingleValue { } @Override - public boolean needsScores() { - return valuesSource != null && valuesSource.needsScores(); + public ScoreMode scoreMode() { + return valuesSource != null && valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorFactory.java similarity index 89% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregatorFactory.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorFactory.java index 8f5538fb7a2..d08b8199a33 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.min; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -33,9 +33,9 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class MinAggregatorFactory extends ValuesSourceAggregatorFactory { +class MinAggregatorFactory extends ValuesSourceAggregatorFactory { - public MinAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, + MinAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/ParsedAvg.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedAvg.java similarity index 93% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/ParsedAvg.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedAvg.java index 16d91bd08f0..0e15d417f87 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/ParsedAvg.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedAvg.java @@ -17,12 +17,11 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.avg; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.metrics.ParsedSingleValueNumericMetricsAggregation; import java.io.IOException; @@ -61,4 +60,4 @@ public class ParsedAvg extends ParsedSingleValueNumericMetricsAggregation implem avg.setName(name); return avg; } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/ParsedCardinality.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedCardinality.java similarity index 97% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/ParsedCardinality.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedCardinality.java index 5a615f61a4a..848f2e6fd01 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/ParsedCardinality.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedCardinality.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.cardinality; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -70,4 +70,4 @@ public class ParsedCardinality extends ParsedAggregation implements Cardinality builder.field(CommonFields.VALUE.getPreferredName(), cardinalityValue); return builder; } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ParsedExtendedStats.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedExtendedStats.java similarity index 97% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ParsedExtendedStats.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedExtendedStats.java index 59311127368..cee96c07a24 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ParsedExtendedStats.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedExtendedStats.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.stats.extended; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.collect.Tuple; @@ -26,8 +26,7 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.metrics.stats.ParsedStats; -import org.elasticsearch.search.aggregations.metrics.stats.extended.InternalExtendedStats.Fields; +import org.elasticsearch.search.aggregations.metrics.InternalExtendedStats.Fields; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/ParsedGeoBounds.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedGeoBounds.java similarity index 87% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/ParsedGeoBounds.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedGeoBounds.java index 70abe15d290..11d36d2ceee 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/ParsedGeoBounds.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedGeoBounds.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.geobounds; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.geo.GeoPoint; @@ -30,11 +30,11 @@ import org.elasticsearch.search.aggregations.ParsedAggregation; import java.io.IOException; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.search.aggregations.metrics.geobounds.InternalGeoBounds.BOTTOM_RIGHT_FIELD; -import static org.elasticsearch.search.aggregations.metrics.geobounds.InternalGeoBounds.BOUNDS_FIELD; -import static org.elasticsearch.search.aggregations.metrics.geobounds.InternalGeoBounds.LAT_FIELD; -import static org.elasticsearch.search.aggregations.metrics.geobounds.InternalGeoBounds.LON_FIELD; -import static org.elasticsearch.search.aggregations.metrics.geobounds.InternalGeoBounds.TOP_LEFT_FIELD; +import static org.elasticsearch.search.aggregations.metrics.InternalGeoBounds.BOTTOM_RIGHT_FIELD; +import static org.elasticsearch.search.aggregations.metrics.InternalGeoBounds.BOUNDS_FIELD; +import static org.elasticsearch.search.aggregations.metrics.InternalGeoBounds.LAT_FIELD; +import static org.elasticsearch.search.aggregations.metrics.InternalGeoBounds.LON_FIELD; +import static org.elasticsearch.search.aggregations.metrics.InternalGeoBounds.TOP_LEFT_FIELD; public class ParsedGeoBounds extends ParsedAggregation implements GeoBounds { private GeoPoint topLeft; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/ParsedGeoCentroid.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedGeoCentroid.java similarity index 95% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/ParsedGeoCentroid.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedGeoCentroid.java index 7ce1f5d86fe..ff40d33de42 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/ParsedGeoCentroid.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedGeoCentroid.java @@ -17,14 +17,14 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.geocentroid; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.ParsedAggregation; -import org.elasticsearch.search.aggregations.metrics.geocentroid.InternalGeoCentroid.Fields; +import org.elasticsearch.search.aggregations.metrics.InternalGeoCentroid.Fields; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/ParsedHDRPercentileRanks.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedHDRPercentileRanks.java similarity index 87% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/ParsedHDRPercentileRanks.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedHDRPercentileRanks.java index f5fd7717e04..eac1f210905 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/ParsedHDRPercentileRanks.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedHDRPercentileRanks.java @@ -17,13 +17,10 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.hdr; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.metrics.percentiles.ParsedPercentiles; -import org.elasticsearch.search.aggregations.metrics.percentiles.ParsedPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import java.io.IOException; import java.util.Iterator; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/ParsedHDRPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedHDRPercentiles.java similarity index 88% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/ParsedHDRPercentiles.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedHDRPercentiles.java index 1b1ba906aa0..bb34d8550d0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/ParsedHDRPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedHDRPercentiles.java @@ -17,12 +17,10 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.hdr; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.metrics.percentiles.ParsedPercentiles; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/ParsedMax.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedMax.java similarity index 93% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/ParsedMax.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedMax.java index f6a3190cd04..4a284c2d204 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/ParsedMax.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedMax.java @@ -17,12 +17,11 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.max; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.metrics.ParsedSingleValueNumericMetricsAggregation; import java.io.IOException; @@ -59,4 +58,4 @@ public class ParsedMax extends ParsedSingleValueNumericMetricsAggregation implem max.setName(name); return max; } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/ParsedMin.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedMin.java similarity index 93% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/ParsedMin.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedMin.java index 9b214bb3462..51a53d50d7a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/ParsedMin.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedMin.java @@ -17,12 +17,11 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.min; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.metrics.ParsedSingleValueNumericMetricsAggregation; import java.io.IOException; @@ -59,4 +58,4 @@ public class ParsedMin extends ParsedSingleValueNumericMetricsAggregation implem min.setName(name); return min; } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/ParsedPercentileRanks.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentileRanks.java similarity index 85% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/ParsedPercentileRanks.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentileRanks.java index 2c80d0328dd..5c38bc684a8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/ParsedPercentileRanks.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentileRanks.java @@ -17,9 +17,9 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles; +package org.elasticsearch.search.aggregations.metrics; -public abstract class ParsedPercentileRanks extends ParsedPercentiles implements PercentileRanks { +abstract class ParsedPercentileRanks extends ParsedPercentiles implements PercentileRanks { @Override public double percent(double value) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/ParsedPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentiles.java similarity index 99% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/ParsedPercentiles.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentiles.java index 2c7da76446d..2742050862c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/ParsedPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentiles.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ParsedScriptedMetric.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedScriptedMetric.java similarity index 98% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ParsedScriptedMetric.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedScriptedMetric.java index f2aae9f5e8a..696c12219a4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ParsedScriptedMetric.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedScriptedMetric.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.scripted; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.xcontent.ObjectParser; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/ParsedStats.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedStats.java similarity index 97% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/ParsedStats.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedStats.java index 4c676cf2278..e45dd3c87c1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/ParsedStats.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedStats.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.stats; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; @@ -25,7 +25,7 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.ParsedAggregation; -import org.elasticsearch.search.aggregations.metrics.stats.InternalStats.Fields; +import org.elasticsearch.search.aggregations.metrics.InternalStats.Fields; import java.io.IOException; import java.util.HashMap; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/ParsedSum.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedSum.java similarity index 92% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/ParsedSum.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedSum.java index a51f03d3565..514edaa750b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/ParsedSum.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedSum.java @@ -17,12 +17,11 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.sum; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.metrics.ParsedSingleValueNumericMetricsAggregation; import java.io.IOException; @@ -58,4 +57,4 @@ public class ParsedSum extends ParsedSingleValueNumericMetricsAggregation implem sum.setName(name); return sum; } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/ParsedTDigestPercentileRanks.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedTDigestPercentileRanks.java similarity index 87% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/ParsedTDigestPercentileRanks.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedTDigestPercentileRanks.java index 01929f374d4..f17bc8784ae 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/ParsedTDigestPercentileRanks.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedTDigestPercentileRanks.java @@ -17,13 +17,10 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.metrics.percentiles.ParsedPercentiles; -import org.elasticsearch.search.aggregations.metrics.percentiles.ParsedPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import java.io.IOException; import java.util.Iterator; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/ParsedTDigestPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedTDigestPercentiles.java similarity index 88% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/ParsedTDigestPercentiles.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedTDigestPercentiles.java index cbae25d61e0..2453c702b96 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/ParsedTDigestPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedTDigestPercentiles.java @@ -17,12 +17,10 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.metrics.percentiles.ParsedPercentiles; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/ParsedTopHits.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedTopHits.java similarity index 97% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/ParsedTopHits.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedTopHits.java index 362423abca8..321ed5709e8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/ParsedTopHits.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedTopHits.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.tophits; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ParsedValueCount.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedValueCount.java similarity index 97% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ParsedValueCount.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedValueCount.java index 7430bca08de..0f60b145fd1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ParsedValueCount.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedValueCount.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.valuecount; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -71,4 +71,4 @@ public class ParsedValueCount extends ParsedAggregation implements ValueCount { sum.setName(name); return sum; } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/ParsedWeightedAvg.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedWeightedAvg.java similarity index 89% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/ParsedWeightedAvg.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedWeightedAvg.java index dcda79ce33e..984b8509db7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/ParsedWeightedAvg.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedWeightedAvg.java @@ -17,16 +17,15 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.weighted_avg; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.metrics.ParsedSingleValueNumericMetricsAggregation; import java.io.IOException; -public class ParsedWeightedAvg extends ParsedSingleValueNumericMetricsAggregation implements WeightedAvg { +class ParsedWeightedAvg extends ParsedSingleValueNumericMetricsAggregation implements WeightedAvg { @Override public double getValue() { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/Percentile.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/Percentile.java similarity index 95% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/Percentile.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/Percentile.java index ca62ca6b200..85c1184cc06 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/Percentile.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/Percentile.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles; +package org.elasticsearch.search.aggregations.metrics; import java.util.Objects; @@ -56,4 +56,4 @@ public class Percentile { public int hashCode() { return Objects.hash(percent, value); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanks.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanks.java similarity index 89% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanks.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanks.java index 8a2dc9d9026..468045a14f4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanks.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanks.java @@ -17,9 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles; - -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; +package org.elasticsearch.search.aggregations.metrics; /** * An aggregation that computes approximate percentiles given values. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksAggregationBuilder.java similarity index 97% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksAggregationBuilder.java index 6bb956452ef..3bf70d20989 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -29,8 +29,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.HDRPercentileRanksAggregatorFactory; -import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.TDigestPercentileRanksAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/Percentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/Percentiles.java similarity index 89% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/Percentiles.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/Percentiles.java index a9052536dc4..213eede90bf 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/Percentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/Percentiles.java @@ -16,9 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles; - -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; +package org.elasticsearch.search.aggregations.metrics; /** * An aggregation that computes approximate percentiles. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesAggregationBuilder.java similarity index 98% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesAggregationBuilder.java index 5c90832bb15..3a6f5f89622 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -28,8 +28,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.HDRPercentilesAggregatorFactory; -import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.TDigestPercentilesAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesMethod.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesMethod.java similarity index 96% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesMethod.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesMethod.java index 3b8085793dc..3797e01e899 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesMethod.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesMethod.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetric.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetric.java similarity index 94% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetric.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetric.java index 9733e5f4979..4043e98ba69 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetric.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetric.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.scripted; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.Aggregation; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java similarity index 99% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java index 8b6d834184d..6a25c51737b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.scripted; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java similarity index 76% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java index ffdff44b783..345b21d0388 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java @@ -17,18 +17,18 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.scripted; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.util.CollectionUtils; -import org.elasticsearch.script.ScriptedMetricAggContexts; import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptedMetricAggContexts; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; -import org.elasticsearch.search.aggregations.metrics.MetricsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.internal.SearchContext; @@ -36,17 +36,22 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class ScriptedMetricAggregator extends MetricsAggregator { +class ScriptedMetricAggregator extends MetricsAggregator { private final ScriptedMetricAggContexts.MapScript.LeafFactory mapScript; private final ScriptedMetricAggContexts.CombineScript combineScript; private final Script reduceScript; - private Object aggState; + private Map aggState; - protected ScriptedMetricAggregator(String name, ScriptedMetricAggContexts.MapScript.LeafFactory mapScript, ScriptedMetricAggContexts.CombineScript combineScript, - Script reduceScript, Object aggState, SearchContext context, Aggregator parent, - List pipelineAggregators, Map metaData) - throws IOException { + ScriptedMetricAggregator(String name, + ScriptedMetricAggContexts.MapScript.LeafFactory mapScript, + ScriptedMetricAggContexts.CombineScript combineScript, + Script reduceScript, + Map aggState, + SearchContext context, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { super(name, context, parent, pipelineAggregators, metaData); this.aggState = aggState; this.mapScript = mapScript; @@ -55,8 +60,8 @@ public class ScriptedMetricAggregator extends MetricsAggregator { } @Override - public boolean needsScores() { - return true; // TODO: how can we know if the script relies on scores? + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE; // TODO: how can we know if the script relies on scores? } @Override @@ -65,7 +70,7 @@ public class ScriptedMetricAggregator extends MetricsAggregator { final ScriptedMetricAggContexts.MapScript leafMapScript = mapScript.newInstance(ctx); return new LeafBucketCollectorBase(sub, leafMapScript) { @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { leafMapScript.setScorer(scorer); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java similarity index 77% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java index 076c29fecea..e08835f0bea 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.scripted; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.script.ScriptedMetricAggContexts; import org.elasticsearch.common.util.CollectionUtils; @@ -36,7 +36,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -public class ScriptedMetricAggregatorFactory extends AggregatorFactory { +class ScriptedMetricAggregatorFactory extends AggregatorFactory { private final ScriptedMetricAggContexts.MapScript.Factory mapScript; private final Map mapScriptParams; @@ -48,13 +48,13 @@ public class ScriptedMetricAggregatorFactory extends AggregatorFactory initScriptParams; - public ScriptedMetricAggregatorFactory(String name, - ScriptedMetricAggContexts.MapScript.Factory mapScript, Map mapScriptParams, - ScriptedMetricAggContexts.InitScript.Factory initScript, Map initScriptParams, - ScriptedMetricAggContexts.CombineScript.Factory combineScript, - Map combineScriptParams, Script reduceScript, Map aggParams, - SearchLookup lookup, SearchContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactories, Map metaData) throws IOException { + ScriptedMetricAggregatorFactory(String name, + ScriptedMetricAggContexts.MapScript.Factory mapScript, Map mapScriptParams, + ScriptedMetricAggContexts.InitScript.Factory initScript, Map initScriptParams, + ScriptedMetricAggContexts.CombineScript.Factory combineScript, + Map combineScriptParams, Script reduceScript, Map aggParams, + SearchLookup lookup, SearchContext context, AggregatorFactory parent, + AggregatorFactories.Builder subFactories, Map metaData) throws IOException { super(name, context, parent, subFactories, metaData); this.mapScript = mapScript; this.mapScriptParams = mapScriptParams; @@ -80,20 +80,7 @@ public class ScriptedMetricAggregatorFactory extends AggregatorFactory(); } - // Add _agg to params map for backwards compatibility (redundant with context variables on the scripts created below). - // When this is removed, aggState (as passed to ScriptedMetricAggregator) can be changed to Map, since - // it won't be possible to completely replace it with another type as is possible when it's an entry in params. - Object aggState = new HashMap(); - if (ScriptedMetricAggContexts.deprecatedAggParamEnabled()) { - if (aggParams.containsKey("_agg") == false) { - // Add _agg if it wasn't added manually - aggParams.put("_agg", aggState); - } else { - // If it was added manually, also use it for the agg context variable to reduce the likelihood of - // weird behavior due to multiple different variables. - aggState = aggParams.get("_agg"); - } - } + Map aggState = new HashMap(); final ScriptedMetricAggContexts.InitScript initScript = this.initScript.newInstance( mergeParams(aggParams, initScriptParams), aggState); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/Stats.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/Stats.java similarity index 93% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/Stats.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/Stats.java index 46620f51dc2..5b8be9390fd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/Stats.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/Stats.java @@ -16,9 +16,8 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.stats; +package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; /** * Statistics over a set of values (either aggregated over field data or scripts) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregationBuilder.java similarity index 98% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregationBuilder.java index 3d9d9e6c030..d96bbba4475 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.stats; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregator.java similarity index 93% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregator.java index 321e9e10f0f..1093ecb0692 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregator.java @@ -16,9 +16,10 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.stats; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.DoubleArray; @@ -29,7 +30,6 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; @@ -38,7 +38,7 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class StatsAggregator extends NumericMetricsAggregator.MultiValue { +class StatsAggregator extends NumericMetricsAggregator.MultiValue { final ValuesSource.Numeric valuesSource; final DocValueFormat format; @@ -50,10 +50,9 @@ public class StatsAggregator extends NumericMetricsAggregator.MultiValue { DoubleArray maxes; - public StatsAggregator(String name, ValuesSource.Numeric valuesSource, DocValueFormat format, - SearchContext context, - Aggregator parent, List pipelineAggregators, - Map metaData) throws IOException { + StatsAggregator(String name, ValuesSource.Numeric valuesSource, DocValueFormat format, + SearchContext context, Aggregator parent, + List pipelineAggregators, Map metaData) throws IOException { super(name, context, parent, pipelineAggregators, metaData); this.valuesSource = valuesSource; if (valuesSource != null) { @@ -70,8 +69,8 @@ public class StatsAggregator extends NumericMetricsAggregator.MultiValue { } @Override - public boolean needsScores() { - return valuesSource != null && valuesSource.needsScores(); + public ScoreMode scoreMode() { + return valuesSource != null && valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorFactory.java similarity index 89% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregatorFactory.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorFactory.java index a6e59d7c75b..82dce359037 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.stats; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -33,9 +33,9 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class StatsAggregatorFactory extends ValuesSourceAggregatorFactory { +class StatsAggregatorFactory extends ValuesSourceAggregatorFactory { - public StatsAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, + StatsAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/Sum.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/Sum.java similarity index 87% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/Sum.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/Sum.java index d9cacdba114..f499b3ecc6e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/Sum.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/Sum.java @@ -16,9 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.sum; - -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; +package org.elasticsearch.search.aggregations.metrics; /** * An aggregation that computes the sum of the values in the current bucket. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregationBuilder.java similarity index 98% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregationBuilder.java index ed47f245111..8035a3ad671 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.sum; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregator.java similarity index 95% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregator.java index 9ed8103a1e1..07e91f5e12b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregator.java @@ -16,9 +16,10 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.sum; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.DoubleArray; @@ -28,7 +29,6 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; @@ -37,7 +37,7 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class SumAggregator extends NumericMetricsAggregator.SingleValue { +class SumAggregator extends NumericMetricsAggregator.SingleValue { private final ValuesSource.Numeric valuesSource; private final DocValueFormat format; @@ -57,8 +57,8 @@ public class SumAggregator extends NumericMetricsAggregator.SingleValue { } @Override - public boolean needsScores() { - return valuesSource != null && valuesSource.needsScores(); + public ScoreMode scoreMode() { + return valuesSource != null && valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorFactory.java similarity index 89% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregatorFactory.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorFactory.java index 8b6103214a7..d8fa88541cb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.sum; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -33,9 +33,9 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class SumAggregatorFactory extends ValuesSourceAggregatorFactory { +class SumAggregatorFactory extends ValuesSourceAggregatorFactory { - public SumAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, + SumAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentileRanksAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregator.java similarity index 71% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentileRanksAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregator.java index 0e86eea6364..69e385151ea 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentileRanksAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregator.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; @@ -29,12 +29,18 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class TDigestPercentileRanksAggregator extends AbstractTDigestPercentilesAggregator { +class TDigestPercentileRanksAggregator extends AbstractTDigestPercentilesAggregator { - public TDigestPercentileRanksAggregator(String name, Numeric valuesSource, SearchContext context, Aggregator parent, double[] percents, - double compression, boolean keyed, DocValueFormat formatter, List pipelineAggregators, - Map metaData) - throws IOException { + TDigestPercentileRanksAggregator(String name, + Numeric valuesSource, + SearchContext context, + Aggregator parent, + double[] percents, + double compression, + boolean keyed, + DocValueFormat formatter, + List pipelineAggregators, + Map metaData) throws IOException { super(name, valuesSource, context, parent, percents, compression, keyed, formatter, pipelineAggregators, metaData); } @@ -50,7 +56,8 @@ public class TDigestPercentileRanksAggregator extends AbstractTDigestPercentiles @Override public InternalAggregation buildEmptyAggregation() { - return new InternalTDigestPercentileRanks(name, keys, new TDigestState(compression), keyed, formatter, pipelineAggregators(), metaData()); + return new InternalTDigestPercentileRanks(name, keys, new TDigestState(compression), keyed, + formatter, pipelineAggregators(), metaData()); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentileRanksAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorFactory.java similarity index 92% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentileRanksAggregatorFactory.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorFactory.java index 223d25216bc..10913bf59d1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentileRanksAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -33,14 +33,14 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class TDigestPercentileRanksAggregatorFactory +class TDigestPercentileRanksAggregatorFactory extends ValuesSourceAggregatorFactory { private final double[] percents; private final double compression; private final boolean keyed; - public TDigestPercentileRanksAggregatorFactory(String name, ValuesSourceConfig config, double[] percents, + TDigestPercentileRanksAggregatorFactory(String name, ValuesSourceConfig config, double[] percents, double compression, boolean keyed, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesAggregator.java similarity index 72% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesAggregator.java index b7c1134e935..81bbe15e821 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesAggregator.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; @@ -29,12 +29,18 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class TDigestPercentilesAggregator extends AbstractTDigestPercentilesAggregator { +class TDigestPercentilesAggregator extends AbstractTDigestPercentilesAggregator { - public TDigestPercentilesAggregator(String name, Numeric valuesSource, SearchContext context, - Aggregator parent, double[] percents, - double compression, boolean keyed, DocValueFormat formatter, List pipelineAggregators, - Map metaData) throws IOException { + TDigestPercentilesAggregator(String name, + Numeric valuesSource, + SearchContext context, + Aggregator parent, + double[] percents, + double compression, + boolean keyed, + DocValueFormat formatter, + List pipelineAggregators, + Map metaData) throws IOException { super(name, valuesSource, context, parent, percents, compression, keyed, formatter, pipelineAggregators, metaData); } @@ -60,6 +66,7 @@ public class TDigestPercentilesAggregator extends AbstractTDigestPercentilesAggr @Override public InternalAggregation buildEmptyAggregation() { - return new InternalTDigestPercentiles(name, keys, new TDigestState(compression), keyed, formatter, pipelineAggregators(), metaData()); + return new InternalTDigestPercentiles(name, keys, new TDigestState(compression), keyed, + formatter, pipelineAggregators(), metaData()); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesAggregatorFactory.java similarity index 92% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorFactory.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesAggregatorFactory.java index 47b17d84f3b..0c1396196fb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesAggregatorFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -33,14 +33,14 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class TDigestPercentilesAggregatorFactory +class TDigestPercentilesAggregatorFactory extends ValuesSourceAggregatorFactory { private final double[] percents; private final double compression; private final boolean keyed; - public TDigestPercentilesAggregatorFactory(String name, ValuesSourceConfig config, double[] percents, + TDigestPercentilesAggregatorFactory(String name, ValuesSourceConfig config, double[] percents, double compression, boolean keyed, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestState.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java similarity index 97% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestState.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java index bcf000e5e09..33b967fca86 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestState.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; +package org.elasticsearch.search.aggregations.metrics; import com.tdunning.math.stats.AVLTreeDigest; import com.tdunning.math.stats.Centroid; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHits.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHits.java similarity index 94% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHits.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHits.java index 565a80a13c8..7c1b84b750a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHits.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHits.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.tophits; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregation; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java similarity index 99% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java index 6b8ae8d79ca..38b783e6b95 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.tophits; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParsingException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java similarity index 59% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java index e59299754ae..c017eb4a5e3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java @@ -17,24 +17,29 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.tophits; +package org.elasticsearch.search.aggregations.metrics; import com.carrotsearch.hppc.LongObjectHashMap; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Collector; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.MultiCollector; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.search.TopFieldCollector; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TopScoreDocCollector; +import org.apache.lucene.search.TotalHits; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.search.MaxScoreCollector; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.util.LongObjectPagedHashMap; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; @@ -42,7 +47,6 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; -import org.elasticsearch.search.aggregations.metrics.MetricsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.search.fetch.FetchSearchResult; @@ -55,11 +59,23 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class TopHitsAggregator extends MetricsAggregator { +class TopHitsAggregator extends MetricsAggregator { + + private static class Collectors { + public final TopDocsCollector topDocsCollector; + public final MaxScoreCollector maxScoreCollector; + public final Collector collector; + + Collectors(TopDocsCollector topDocsCollector, MaxScoreCollector maxScoreCollector) { + this.topDocsCollector = topDocsCollector; + this.maxScoreCollector = maxScoreCollector; + collector = MultiCollector.wrap(topDocsCollector, maxScoreCollector); + } + } private final FetchPhase fetchPhase; private final SubSearchContext subSearchContext; - private final LongObjectPagedHashMap> topDocsCollectors; + private final LongObjectPagedHashMap topDocsCollectors; TopHitsAggregator(FetchPhase fetchPhase, SubSearchContext subSearchContext, String name, SearchContext context, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { @@ -70,13 +86,13 @@ public class TopHitsAggregator extends MetricsAggregator { } @Override - public boolean needsScores() { + public ScoreMode scoreMode() { SortAndFormats sort = subSearchContext.sort(); if (sort != null) { - return sort.sort.needsScores() || subSearchContext.trackScores(); + return sort.sort.needsScores() || subSearchContext.trackScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } else { // sort by score - return true; + return ScoreMode.COMPLETE; } } @@ -89,10 +105,10 @@ public class TopHitsAggregator extends MetricsAggregator { final LongObjectHashMap leafCollectors = new LongObjectHashMap<>(1); return new LeafBucketCollectorBase(sub, null) { - Scorer scorer; + Scorable scorer; @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { this.scorer = scorer; super.setScorer(scorer); for (ObjectCursor cursor : leafCollectors.values()) { @@ -102,8 +118,8 @@ public class TopHitsAggregator extends MetricsAggregator { @Override public void collect(int docId, long bucket) throws IOException { - TopDocsCollector topDocsCollector = topDocsCollectors.get(bucket); - if (topDocsCollector == null) { + Collectors collectors = topDocsCollectors.get(bucket); + if (collectors == null) { SortAndFormats sort = subSearchContext.sort(); int topN = subSearchContext.from() + subSearchContext.size(); if (sort == null) { @@ -115,20 +131,21 @@ public class TopHitsAggregator extends MetricsAggregator { // but here we create collectors ourselves and we need prevent OOM because of crazy an offset and size. topN = Math.min(topN, subSearchContext.searcher().getIndexReader().maxDoc()); if (sort == null) { - topDocsCollector = TopScoreDocCollector.create(topN); + collectors = new Collectors(TopScoreDocCollector.create(topN, Integer.MAX_VALUE), null); } else { // TODO: can we pass trackTotalHits=subSearchContext.trackTotalHits(){ // Note that this would require to catch CollectionTerminatedException - topDocsCollector = TopFieldCollector.create(sort.sort, topN, true, subSearchContext.trackScores(), - subSearchContext.trackScores(), true); + collectors = new Collectors( + TopFieldCollector.create(sort.sort, topN, Integer.MAX_VALUE), + subSearchContext.trackScores() ? new MaxScoreCollector() : null); } - topDocsCollectors.put(bucket, topDocsCollector); + topDocsCollectors.put(bucket, collectors); } final LeafCollector leafCollector; final int key = leafCollectors.indexOf(bucket); if (key < 0) { - leafCollector = topDocsCollector.getLeafCollector(ctx); + leafCollector = collectors.collector.getLeafCollector(ctx); if (scorer != null) { leafCollector.setScorer(scorer); } @@ -142,58 +159,65 @@ public class TopHitsAggregator extends MetricsAggregator { } @Override - public InternalAggregation buildAggregation(long owningBucketOrdinal) { - TopDocsCollector topDocsCollector = topDocsCollectors.get(owningBucketOrdinal); - final InternalTopHits topHits; - if (topDocsCollector == null) { - topHits = buildEmptyAggregation(); - } else { - TopDocs topDocs = topDocsCollector.topDocs(); - if (subSearchContext.sort() == null) { - for (RescoreContext ctx : context().rescore()) { - try { - topDocs = ctx.rescorer().rescore(topDocs, context.searcher(), ctx); - } catch (IOException e) { - throw new ElasticsearchException("Rescore TopHits Failed", e); - } - } - } - subSearchContext.queryResult().topDocs(topDocs, - subSearchContext.sort() == null ? null : subSearchContext.sort().formats); - int[] docIdsToLoad = new int[topDocs.scoreDocs.length]; - for (int i = 0; i < topDocs.scoreDocs.length; i++) { - docIdsToLoad[i] = topDocs.scoreDocs[i].doc; - } - subSearchContext.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length); - fetchPhase.execute(subSearchContext); - FetchSearchResult fetchResult = subSearchContext.fetchResult(); - SearchHit[] internalHits = fetchResult.fetchResult().hits().getHits(); - for (int i = 0; i < internalHits.length; i++) { - ScoreDoc scoreDoc = topDocs.scoreDocs[i]; - SearchHit searchHitFields = internalHits[i]; - searchHitFields.shard(subSearchContext.shardTarget()); - searchHitFields.score(scoreDoc.score); - if (scoreDoc instanceof FieldDoc) { - FieldDoc fieldDoc = (FieldDoc) scoreDoc; - searchHitFields.sortValues(fieldDoc.fields, subSearchContext.sort().formats); - } - } - topHits = new InternalTopHits(name, subSearchContext.from(), subSearchContext.size(), topDocs, fetchResult.hits(), - pipelineAggregators(), metaData()); + public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { + Collectors collectors = topDocsCollectors.get(owningBucketOrdinal); + if (collectors == null) { + return buildEmptyAggregation(); } - return topHits; + TopDocsCollector topDocsCollector = collectors.topDocsCollector; + TopDocs topDocs = topDocsCollector.topDocs(); + float maxScore = Float.NaN; + if (subSearchContext.sort() == null) { + for (RescoreContext ctx : context().rescore()) { + try { + topDocs = ctx.rescorer().rescore(topDocs, context.searcher(), ctx); + } catch (IOException e) { + throw new ElasticsearchException("Rescore TopHits Failed", e); + } + } + if (topDocs.scoreDocs.length > 0) { + maxScore = topDocs.scoreDocs[0].score; + } + } else if (subSearchContext.trackScores()) { + TopFieldCollector.populateScores(topDocs.scoreDocs, subSearchContext.searcher(), subSearchContext.query()); + maxScore = collectors.maxScoreCollector.getMaxScore(); + } + final TopDocsAndMaxScore topDocsAndMaxScore = new TopDocsAndMaxScore(topDocs, maxScore); + subSearchContext.queryResult().topDocs(topDocsAndMaxScore, + subSearchContext.sort() == null ? null : subSearchContext.sort().formats); + int[] docIdsToLoad = new int[topDocs.scoreDocs.length]; + for (int i = 0; i < topDocs.scoreDocs.length; i++) { + docIdsToLoad[i] = topDocs.scoreDocs[i].doc; + } + subSearchContext.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length); + fetchPhase.execute(subSearchContext); + FetchSearchResult fetchResult = subSearchContext.fetchResult(); + SearchHit[] internalHits = fetchResult.fetchResult().hits().getHits(); + for (int i = 0; i < internalHits.length; i++) { + ScoreDoc scoreDoc = topDocs.scoreDocs[i]; + SearchHit searchHitFields = internalHits[i]; + searchHitFields.shard(subSearchContext.shardTarget()); + searchHitFields.score(scoreDoc.score); + if (scoreDoc instanceof FieldDoc) { + FieldDoc fieldDoc = (FieldDoc) scoreDoc; + searchHitFields.sortValues(fieldDoc.fields, subSearchContext.sort().formats); + } + } + return new InternalTopHits(name, subSearchContext.from(), subSearchContext.size(), topDocsAndMaxScore, fetchResult.hits(), + pipelineAggregators(), metaData()); } @Override public InternalTopHits buildEmptyAggregation() { TopDocs topDocs; if (subSearchContext.sort() != null) { - topDocs = new TopFieldDocs(0, new FieldDoc[0], subSearchContext.sort().sort.getSort(), Float.NaN); + topDocs = new TopFieldDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new FieldDoc[0], + subSearchContext.sort().sort.getSort()); } else { topDocs = Lucene.EMPTY_TOP_DOCS; } - return new InternalTopHits(name, subSearchContext.from(), subSearchContext.size(), topDocs, SearchHits.empty(), - pipelineAggregators(), metaData()); + return new InternalTopHits(name, subSearchContext.from(), subSearchContext.size(), new TopDocsAndMaxScore(topDocs, Float.NaN), + SearchHits.empty(), pipelineAggregators(), metaData()); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorFactory.java similarity index 96% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorFactory.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorFactory.java index 416c9846105..60869429551 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.tophits; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -38,7 +38,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; -public class TopHitsAggregatorFactory extends AggregatorFactory { +class TopHitsAggregatorFactory extends AggregatorFactory { private final int from; private final int size; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCount.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCount.java similarity index 87% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCount.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCount.java index a66d9827498..2c25254d65b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCount.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCount.java @@ -16,9 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.valuecount; - -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; +package org.elasticsearch.search.aggregations.metrics; /** * An get that holds the number of values that the current document set has for a specific diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java similarity index 98% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java index a69efd76e42..70243cb8bc4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.valuecount; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregator.java similarity index 92% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregator.java index 99e7bdf769a..96a4cfe9305 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregator.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.valuecount; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.common.lease.Releasables; @@ -27,7 +27,6 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; @@ -42,14 +41,14 @@ import java.util.Map; * This aggregator works in a multi-bucket mode, that is, when serves as a sub-aggregator, a single aggregator instance aggregates the * counts for all buckets owned by the parent aggregator) */ -public class ValueCountAggregator extends NumericMetricsAggregator.SingleValue { +class ValueCountAggregator extends NumericMetricsAggregator.SingleValue { final ValuesSource valuesSource; // a count per bucket LongArray counts; - public ValueCountAggregator(String name, ValuesSource valuesSource, + ValueCountAggregator(String name, ValuesSource valuesSource, SearchContext aggregationContext, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorFactory.java similarity index 88% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregatorFactory.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorFactory.java index 80c8001b93c..26f1760940d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.valuecount; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -32,9 +32,9 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class ValueCountAggregatorFactory extends ValuesSourceAggregatorFactory { +class ValueCountAggregatorFactory extends ValuesSourceAggregatorFactory { - public ValueCountAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, + ValueCountAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/WeightedAvg.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvg.java similarity index 87% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/WeightedAvg.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvg.java index 7af48f677c1..cf52a8b6fe9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/WeightedAvg.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvg.java @@ -16,9 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.weighted_avg; - -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; +package org.elasticsearch.search.aggregations.metrics; /** * An aggregation that computes the average of the values in the current bucket. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/WeightedAvgAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregationBuilder.java similarity index 98% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/WeightedAvgAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregationBuilder.java index be06f792a5e..c3f67fb9052 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/WeightedAvgAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.weighted_avg; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/WeightedAvgAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregator.java similarity index 88% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/WeightedAvgAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregator.java index 7a34fe6df4a..08d06cf21ed 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/WeightedAvgAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregator.java @@ -16,9 +16,10 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.weighted_avg; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.DoubleArray; @@ -29,7 +30,6 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.MultiValuesSource; import org.elasticsearch.search.internal.SearchContext; @@ -38,10 +38,10 @@ import java.io.IOException; import java.util.List; import java.util.Map; -import static org.elasticsearch.search.aggregations.metrics.weighted_avg.WeightedAvgAggregationBuilder.VALUE_FIELD; -import static org.elasticsearch.search.aggregations.metrics.weighted_avg.WeightedAvgAggregationBuilder.WEIGHT_FIELD; +import static org.elasticsearch.search.aggregations.metrics.WeightedAvgAggregationBuilder.VALUE_FIELD; +import static org.elasticsearch.search.aggregations.metrics.WeightedAvgAggregationBuilder.WEIGHT_FIELD; -public class WeightedAvgAggregator extends NumericMetricsAggregator.SingleValue { +class WeightedAvgAggregator extends NumericMetricsAggregator.SingleValue { private final MultiValuesSource.NumericMultiValuesSource valuesSources; @@ -51,9 +51,9 @@ public class WeightedAvgAggregator extends NumericMetricsAggregator.SingleValue private DoubleArray weightCompensations; private DocValueFormat format; - public WeightedAvgAggregator(String name, MultiValuesSource.NumericMultiValuesSource valuesSources, DocValueFormat format, - SearchContext context, Aggregator parent, List pipelineAggregators, - Map metaData) throws IOException { + WeightedAvgAggregator(String name, MultiValuesSource.NumericMultiValuesSource valuesSources, DocValueFormat format, + SearchContext context, Aggregator parent, + List pipelineAggregators, Map metaData) throws IOException { super(name, context, parent, pipelineAggregators, metaData); this.valuesSources = valuesSources; this.format = format; @@ -67,8 +67,8 @@ public class WeightedAvgAggregator extends NumericMetricsAggregator.SingleValue } @Override - public boolean needsScores() { - return valuesSources != null && valuesSources.needsScores(); + public ScoreMode scoreMode() { + return valuesSources != null && valuesSources.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/WeightedAvgAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorFactory.java similarity index 82% rename from server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/WeightedAvgAggregatorFactory.java rename to server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorFactory.java index c7aab73af28..afdb727c512 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/WeightedAvgAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.weighted_avg; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; @@ -34,12 +34,12 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class WeightedAvgAggregatorFactory extends MultiValuesSourceAggregatorFactory { +class WeightedAvgAggregatorFactory extends MultiValuesSourceAggregatorFactory { - public WeightedAvgAggregatorFactory(String name, Map> configs, - DocValueFormat format, SearchContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder, - Map metaData) throws IOException { + WeightedAvgAggregatorFactory(String name, Map> configs, + DocValueFormat format, SearchContext context, AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { super(name, configs, format, context, parent, subFactoriesBuilder, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucket.java index 5d13638f70a..97b43e26069 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucket.java @@ -25,8 +25,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; -import org.elasticsearch.search.aggregations.metrics.max.InternalMax; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; +import org.elasticsearch.search.aggregations.metrics.InternalMax; +import org.elasticsearch.search.aggregations.metrics.Percentile; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/ParsedPercentilesBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/ParsedPercentilesBucket.java index eebe296e531..c635ff82735 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/ParsedPercentilesBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/ParsedPercentilesBucket.java @@ -22,8 +22,8 @@ package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.metrics.percentiles.ParsedPercentiles; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; +import org.elasticsearch.search.aggregations.metrics.ParsedPercentiles; +import org.elasticsearch.search.aggregations.metrics.Percentiles; import java.io.IOException; import java.util.Map.Entry; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucket.java index 64424ac5abc..0dfe9d24582 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucket.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; +import org.elasticsearch.search.aggregations.metrics.Percentiles; public interface PercentilesBucket extends Percentiles { } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/InternalStatsBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/InternalStatsBucket.java index 371e5bf5e84..352402fff82 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/InternalStatsBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/InternalStatsBucket.java @@ -22,7 +22,7 @@ package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.metrics.stats.InternalStats; +import org.elasticsearch.search.aggregations.metrics.InternalStats; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/ParsedStatsBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/ParsedStatsBucket.java index c7ddcc6ee96..84ec05f4eef 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/ParsedStatsBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/ParsedStatsBucket.java @@ -21,7 +21,7 @@ package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.metrics.stats.ParsedStats; +import org.elasticsearch.search.aggregations.metrics.ParsedStats; public class ParsedStatsBucket extends ParsedStats implements StatsBucket { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/StatsBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/StatsBucket.java index 0e158d2a122..c29a27b8446 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/StatsBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/StatsBucket.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats; * under the License. */ -import org.elasticsearch.search.aggregations.metrics.stats.Stats; +import org.elasticsearch.search.aggregations.metrics.Stats; /** * Statistics over a set of buckets diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucket.java index f252cae37e9..9e3c7cf88f6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucket.java @@ -19,10 +19,10 @@ package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended; -import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; +import org.elasticsearch.search.aggregations.metrics.ExtendedStats; /** * Extended Statistics over a set of buckets */ public interface ExtendedStatsBucket extends ExtendedStats { -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/InternalExtendedStatsBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/InternalExtendedStatsBucket.java index 5589a9ebbcb..c7f2943bfcf 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/InternalExtendedStatsBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/InternalExtendedStatsBucket.java @@ -22,7 +22,7 @@ package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.exten import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.metrics.stats.extended.InternalExtendedStats; +import org.elasticsearch.search.aggregations.metrics.InternalExtendedStats; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; @@ -49,8 +49,7 @@ public class InternalExtendedStatsBucket extends InternalExtendedStats implement } @Override - public org.elasticsearch.search.aggregations.metrics.stats.extended.InternalExtendedStats doReduce( - List aggregations, ReduceContext reduceContext) { + public InternalExtendedStats doReduce(List aggregations, ReduceContext reduceContext) { throw new UnsupportedOperationException("Not supported"); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ParsedExtendedStatsBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ParsedExtendedStatsBucket.java index d2922492423..caa014c9b49 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ParsedExtendedStatsBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ParsedExtendedStatsBucket.java @@ -21,7 +21,7 @@ package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.exten import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.metrics.stats.extended.ParsedExtendedStats; +import org.elasticsearch.search.aggregations.metrics.ParsedExtendedStats; public class ParsedExtendedStatsBucket extends ParsedExtendedStats implements ExtendedStatsBucket { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java index 25e3d38af5b..4e6760f44fe 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java @@ -26,7 +26,7 @@ import org.apache.lucene.index.OrdinalMap; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lucene.ScorerAware; import org.elasticsearch.common.util.CollectionUtils; @@ -295,7 +295,7 @@ public abstract class ValuesSource { } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { script.setScorer(scorer); } @@ -326,7 +326,7 @@ public abstract class ValuesSource { } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { script.setScorer(scorer); } @@ -445,7 +445,7 @@ public abstract class ValuesSource { } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { script.setScorer(scorer); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptBytesValues.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptBytesValues.java index 5ec1858487e..144e08ce6f2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptBytesValues.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptBytesValues.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.search.aggregations.support.values; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.common.lucene.ScorerAware; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; @@ -85,7 +85,7 @@ public class ScriptBytesValues extends SortingBinaryDocValues implements ScorerA } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { script.setScorer(scorer); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java index 1227efb5ea0..4bb531c0d40 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.search.aggregations.support.values; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.common.lucene.ScorerAware; import org.elasticsearch.index.fielddata.SortingNumericDoubleValues; import org.elasticsearch.script.SearchScript; @@ -107,7 +107,7 @@ public class ScriptDoubleValues extends SortingNumericDoubleValues implements Sc } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { script.setScorer(scorer); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java index cdc448bd041..c57afa1960d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.search.aggregations.support.values; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.apache.lucene.util.LongValues; import org.elasticsearch.common.lucene.ScorerAware; import org.elasticsearch.index.fielddata.AbstractSortingNumericDocValues; @@ -106,7 +106,7 @@ public class ScriptLongValues extends AbstractSortingNumericDocValues implements } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { script.setScorer(scorer); } } diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index c42a1a12a18..92ae481a830 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -248,9 +248,7 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R profile = in.readBoolean(); searchAfterBuilder = in.readOptionalWriteable(SearchAfterBuilder::new); sliceBuilder = in.readOptionalWriteable(SliceBuilder::new); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - collapse = in.readOptionalWriteable(CollapseBuilder::new); - } + collapse = in.readOptionalWriteable(CollapseBuilder::new); if (in.getVersion().onOrAfter(Version.V_6_0_0_beta1)) { trackTotalHits = in.readBoolean(); } else { @@ -313,9 +311,7 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R out.writeBoolean(profile); out.writeOptionalWriteable(searchAfterBuilder); out.writeOptionalWriteable(sliceBuilder); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeOptionalWriteable(collapse); - } + out.writeOptionalWriteable(collapse); if (out.getVersion().onOrAfter(Version.V_6_0_0_beta1)) { out.writeBoolean(trackTotalHits); } @@ -1154,9 +1150,7 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R } } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); + public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { if (from != -1) { builder.field(FROM_FIELD.getPreferredName(), from); } @@ -1294,6 +1288,13 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R if (collapse != null) { builder.field(COLLAPSE.getPreferredName(), collapse); } + return builder; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + innerToXContent(builder, params); builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java b/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java index ccab5e2cb93..2ebf413b140 100644 --- a/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.collapse; import org.apache.lucene.index.IndexOptions; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -94,31 +93,14 @@ public class CollapseBuilder implements Writeable, ToXContentObject { public CollapseBuilder(StreamInput in) throws IOException { this.field = in.readString(); this.maxConcurrentGroupRequests = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - this.innerHits = in.readList(InnerHitBuilder::new); - } else { - InnerHitBuilder innerHitBuilder = in.readOptionalWriteable(InnerHitBuilder::new); - if (innerHitBuilder != null) { - this.innerHits = Collections.singletonList(innerHitBuilder); - } else { - this.innerHits = Collections.emptyList(); - } - } + this.innerHits = in.readList(InnerHitBuilder::new); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(field); out.writeVInt(maxConcurrentGroupRequests); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeList(innerHits); - } else { - boolean hasInnerHit = innerHits.isEmpty() == false; - out.writeBoolean(hasInnerHit); - if (hasInnerHit) { - innerHits.get(0).writeToCollapseBWC(out); - } - } + out.writeList(innerHits); } public static CollapseBuilder fromXContent(XContentParser parser) { diff --git a/server/src/main/java/org/elasticsearch/search/collapse/CollapseContext.java b/server/src/main/java/org/elasticsearch/search/collapse/CollapseContext.java index 82a7657f180..4d8a1ba63ba 100644 --- a/server/src/main/java/org/elasticsearch/search/collapse/CollapseContext.java +++ b/server/src/main/java/org/elasticsearch/search/collapse/CollapseContext.java @@ -60,11 +60,11 @@ public class CollapseContext { return innerHits; } - public CollapsingTopDocsCollector createTopDocs(Sort sort, int topN, boolean trackMaxScore) { + public CollapsingTopDocsCollector createTopDocs(Sort sort, int topN) { if (fieldType instanceof KeywordFieldMapper.KeywordFieldType) { - return CollapsingTopDocsCollector.createKeyword(fieldType.name(), sort, topN, trackMaxScore); + return CollapsingTopDocsCollector.createKeyword(fieldType.name(), sort, topN); } else if (fieldType instanceof NumberFieldMapper.NumberFieldType) { - return CollapsingTopDocsCollector.createNumeric(fieldType.name(), sort, topN, trackMaxScore); + return CollapsingTopDocsCollector.createNumeric(fieldType.name(), sort, topN); } else { throw new IllegalStateException("unknown type for collapse field " + fieldType.name() + ", only keywords and numbers are accepted"); diff --git a/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java b/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java index fa7e611348d..0b7d8da481c 100644 --- a/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java +++ b/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java @@ -25,8 +25,9 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.Term; -import org.apache.lucene.index.TermContext; +import org.apache.lucene.index.TermStates; import org.apache.lucene.search.CollectionStatistics; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TermStatistics; import org.elasticsearch.common.collect.HppcMaps; import org.elasticsearch.search.SearchPhase; @@ -53,7 +54,8 @@ public class DfsPhase implements SearchPhase { public void execute(SearchContext context) { final ObjectHashSet termsSet = new ObjectHashSet<>(); try { - context.searcher().createNormalizedWeight(context.query(), true).extractTerms(new DelegateSet(termsSet)); + context.searcher().createWeight(context.searcher().rewrite(context.query()), ScoreMode.COMPLETE, 1f) + .extractTerms(new DelegateSet(termsSet)); for (RescoreContext rescoreContext : context.rescore()) { try { rescoreContext.rescorer().extractTerms(context.searcher(), rescoreContext, new DelegateSet(termsSet)); @@ -69,17 +71,19 @@ public class DfsPhase implements SearchPhase { if(context.isCancelled()) { throw new TaskCancelledException("cancelled"); } - // LUCENE 4 UPGRADE: cache TermContext? - TermContext termContext = TermContext.build(indexReaderContext, terms[i]); + // LUCENE 4 UPGRADE: cache TermStates? + TermStates termContext = TermStates.build(indexReaderContext, terms[i], true); termStatistics[i] = context.searcher().termStatistics(terms[i], termContext); } ObjectObjectHashMap fieldStatistics = HppcMaps.newNoNullKeysMap(); for (Term term : terms) { assert term.field() != null : "field is null"; - if (!fieldStatistics.containsKey(term.field())) { + if (fieldStatistics.containsKey(term.field()) == false) { final CollectionStatistics collectionStatistics = context.searcher().collectionStatistics(term.field()); - fieldStatistics.put(term.field(), collectionStatistics); + if (collectionStatistics != null) { + fieldStatistics.put(term.field(), collectionStatistics); + } if(context.isCancelled()) { throw new TaskCancelledException("cancelled"); } diff --git a/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java b/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java index 0cd624b00a3..8de89089c4f 100644 --- a/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; import org.elasticsearch.common.collect.HppcMaps; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -124,9 +125,16 @@ public class DfsSearchResult extends SearchPhaseResult { CollectionStatistics statistics = c.value; assert statistics.maxDoc() >= 0; out.writeVLong(statistics.maxDoc()); - out.writeVLong(addOne(statistics.docCount())); - out.writeVLong(addOne(statistics.sumTotalTermFreq())); - out.writeVLong(addOne(statistics.sumDocFreq())); + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + // stats are always positive numbers + out.writeVLong(statistics.docCount()); + out.writeVLong(statistics.sumTotalTermFreq()); + out.writeVLong(statistics.sumDocFreq()); + } else { + out.writeVLong(addOne(statistics.docCount())); + out.writeVLong(addOne(statistics.sumTotalTermFreq())); + out.writeVLong(addOne(statistics.sumDocFreq())); + } } } @@ -138,9 +146,14 @@ public class DfsSearchResult extends SearchPhaseResult { } public static void writeSingleTermStats(StreamOutput out, TermStatistics termStatistic) throws IOException { - assert termStatistic.docFreq() >= 0; - out.writeVLong(termStatistic.docFreq()); - out.writeVLong(addOne(termStatistic.totalTermFreq())); + if (termStatistic != null) { + assert termStatistic.docFreq() > 0; + out.writeVLong(termStatistic.docFreq()); + out.writeVLong(addOne(termStatistic.totalTermFreq())); + } else { + out.writeVLong(0); + out.writeVLong(0); + } } public static ObjectObjectHashMap readFieldStats(StreamInput in) throws IOException { @@ -156,9 +169,19 @@ public class DfsSearchResult extends SearchPhaseResult { final String field = in.readString(); assert field != null; final long maxDoc = in.readVLong(); - final long docCount = subOne(in.readVLong()); - final long sumTotalTermFreq = subOne(in.readVLong()); - final long sumDocFreq = subOne(in.readVLong()); + final long docCount; + final long sumTotalTermFreq; + final long sumDocFreq; + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + // stats are always positive numbers + docCount = in.readVLong(); + sumTotalTermFreq = in.readVLong(); + sumDocFreq = in.readVLong(); + } else { + docCount = subOne(in.readVLong()); + sumTotalTermFreq = subOne(in.readVLong()); + sumDocFreq = subOne(in.readVLong()); + } CollectionStatistics stats = new CollectionStatistics(field, maxDoc, docCount, sumTotalTermFreq, sumDocFreq); fieldStatistics.put(field, stats); } @@ -178,6 +201,9 @@ public class DfsSearchResult extends SearchPhaseResult { final long docFreq = in.readVLong(); assert docFreq >= 0; final long totalTermFreq = subOne(in.readVLong()); + if (docFreq == 0) { + continue; + } termStatistics[i] = new TermStatistics(term, docFreq, totalTermFreq); } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 64ed5f44795..69ac9049686 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -23,7 +23,10 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TotalHits; +import org.apache.lucene.search.TotalHits.Relation; import org.apache.lucene.search.Weight; import org.apache.lucene.util.BitSet; import org.elasticsearch.ExceptionsHelper; @@ -168,7 +171,9 @@ public class FetchPhase implements SearchPhase { } } - context.fetchResult().hits(new SearchHits(hits, context.queryResult().getTotalHits(), context.queryResult().getMaxScore())); + TotalHits totalHits = context.queryResult().getTotalHits(); + long totalHitsAsLong = totalHits.relation == Relation.EQUAL_TO ? totalHits.value : -1; + context.fetchResult().hits(new SearchHits(hits, totalHitsAsLong, context.queryResult().getMaxScore())); } catch (IOException e) { throw ExceptionsHelper.convertToElastic(e); } @@ -192,20 +197,15 @@ public class FetchPhase implements SearchPhase { int subDocId, Map> storedToRequestedFields, LeafReaderContext subReaderContext) { + DocumentMapper documentMapper = context.mapperService().documentMapper(); + Text typeText = documentMapper.typeText(); if (fieldsVisitor == null) { - return new SearchHit(docId); + return new SearchHit(docId, null, typeText, null); } Map searchFields = getSearchFields(context, fieldsVisitor, subDocId, storedToRequestedFields, subReaderContext); - DocumentMapper documentMapper = context.mapperService().documentMapper(fieldsVisitor.uid().type()); - Text typeText; - if (documentMapper == null) { - typeText = new Text(fieldsVisitor.uid().type()); - } else { - typeText = documentMapper.typeText(); - } SearchHit searchHit = new SearchHit(docId, fieldsVisitor.uid().id(), typeText, searchFields); // Set _source if requested. SourceLookup sourceLookup = context.lookup().source(); @@ -275,7 +275,7 @@ public class FetchPhase implements SearchPhase { storedToRequestedFields, subReaderContext); } - DocumentMapper documentMapper = context.mapperService().documentMapper(uid.type()); + DocumentMapper documentMapper = context.mapperService().documentMapper(); SourceLookup sourceLookup = context.lookup().source(); sourceLookup.setSegmentAndDocument(subReaderContext, nestedSubDocId); @@ -362,7 +362,8 @@ public class FetchPhase implements SearchPhase { current = nestedParentObjectMapper; continue; } - final Weight childWeight = context.searcher().createNormalizedWeight(childFilter, false); + final Weight childWeight = context.searcher() + .createWeight(context.searcher().rewrite(childFilter), ScoreMode.COMPLETE_NO_SCORES, 1f); Scorer childScorer = childWeight.scorer(subReaderContext); if (childScorer == null) { current = nestedParentObjectMapper; diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java index 2da74c56f6a..a7f333abfa2 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java @@ -57,6 +57,7 @@ public final class FetchSourceSubPhase implements FetchSubPhase { if (nestedHit) { value = getNestedSource((Map) value, hitContext); } + try { final int initialCapacity = nestedHit ? 1024 : Math.min(1024, source.internalSourceRef().length()); BytesStreamOutput streamOutput = new BytesStreamOutput(initialCapacity); @@ -81,6 +82,9 @@ public final class FetchSourceSubPhase implements FetchSubPhase { private Map getNestedSource(Map sourceAsMap, HitContext hitContext) { for (SearchHit.NestedIdentity o = hitContext.hit().getNestedIdentity(); o != null; o = o.getChild()) { sourceAsMap = (Map) sourceAsMap.get(o.getField().string()); + if (sourceAsMap == null) { + return null; + } } return sourceAsMap; } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java index d3b1da7c937..48f2f1299c2 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java @@ -25,11 +25,13 @@ import org.apache.lucene.search.Collector; import org.apache.lucene.search.ConjunctionDISI; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.internal.SearchContext; @@ -87,7 +89,7 @@ public final class InnerHitsContext { this.context = context; } - public abstract TopDocs[] topDocs(SearchHit[] hits) throws IOException; + public abstract TopDocsAndMaxScore[] topDocs(SearchHit[] hits) throws IOException; public String getName() { return name; @@ -104,7 +106,8 @@ public final class InnerHitsContext { protected Weight createInnerHitQueryWeight() throws IOException { final boolean needsScores = size() != 0 && (sort() == null || sort().sort.needsScores()); - return context.searcher().createNormalizedWeight(query(), needsScores); + return context.searcher().createWeight(context.searcher().rewrite(query()), + needsScores ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES, 1f); } public SearchContext parentSearchContext() { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsFetchSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsFetchSubPhase.java index 75d6211aca4..4d34a3afa62 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsFetchSubPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsFetchSubPhase.java @@ -21,7 +21,7 @@ package org.elasticsearch.search.fetch.subphase; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.TopDocs; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; @@ -50,19 +50,19 @@ public final class InnerHitsFetchSubPhase implements FetchSubPhase { for (Map.Entry entry : context.innerHits().getInnerHits().entrySet()) { InnerHitsContext.InnerHitSubContext innerHits = entry.getValue(); - TopDocs[] topDocs = innerHits.topDocs(hits); + TopDocsAndMaxScore[] topDocs = innerHits.topDocs(hits); for (int i = 0; i < hits.length; i++) { SearchHit hit = hits[i]; - TopDocs topDoc = topDocs[i]; + TopDocsAndMaxScore topDoc = topDocs[i]; Map results = hit.getInnerHits(); if (results == null) { hit.setInnerHits(results = new HashMap<>()); } innerHits.queryResult().topDocs(topDoc, innerHits.sort() == null ? null : innerHits.sort().formats); - int[] docIdsToLoad = new int[topDoc.scoreDocs.length]; - for (int j = 0; j < topDoc.scoreDocs.length; j++) { - docIdsToLoad[j] = topDoc.scoreDocs[j].doc; + int[] docIdsToLoad = new int[topDoc.topDocs.scoreDocs.length]; + for (int j = 0; j < topDoc.topDocs.scoreDocs.length; j++) { + docIdsToLoad[j] = topDoc.topDocs.scoreDocs[j].doc; } innerHits.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length); innerHits.setUid(new Uid(hit.getType(), hit.getId())); @@ -70,7 +70,7 @@ public final class InnerHitsFetchSubPhase implements FetchSubPhase { FetchSearchResult fetchResult = innerHits.fetchResult(); SearchHit[] internalHits = fetchResult.fetchResult().hits().getHits(); for (int j = 0; j < internalHits.length; j++) { - ScoreDoc scoreDoc = topDoc.scoreDocs[j]; + ScoreDoc scoreDoc = topDoc.topDocs.scoreDocs[j]; SearchHit searchHitFields = internalHits[j]; searchHitFields.score(scoreDoc.score); if (scoreDoc instanceof FieldDoc) { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesFetchSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesFetchSubPhase.java index c28e07ff455..c2f6980781d 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesFetchSubPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesFetchSubPhase.java @@ -22,6 +22,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; @@ -67,7 +68,7 @@ public final class MatchedQueriesFetchSubPhase implements FetchSubPhase { Query query = entry.getValue(); int readerIndex = -1; int docBase = -1; - Weight weight = context.searcher().createNormalizedWeight(query, false); + Weight weight = context.searcher().createWeight(context.searcher().rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); Bits matchingDocs = null; final IndexReader indexReader = context.searcher().getIndexReader(); for (int i = 0; i < hits.length; ++i) { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/ScoreFetchSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/ScoreFetchSubPhase.java new file mode 100644 index 00000000000..3a6db72d5b3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/ScoreFetchSubPhase.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.fetch.subphase; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.ScorerSupplier; +import org.apache.lucene.search.Weight; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.fetch.FetchSubPhase; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; +import java.util.Iterator; + +public class ScoreFetchSubPhase implements FetchSubPhase { + + @Override + public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException { + if (context.trackScores() == false || hits.length == 0 || + // scores were already computed since they are needed on the coordinated node to merge top hits + context.sort() == null) { + return; + } + + hits = hits.clone(); // don't modify the incoming hits + Arrays.sort(hits, Comparator.comparingInt(SearchHit::docId)); + + final IndexSearcher searcher = context.searcher(); + final Weight weight = searcher.createWeight(searcher.rewrite(context.query()), ScoreMode.COMPLETE, 1); + Iterator leafContextIterator = searcher.getIndexReader().leaves().iterator(); + LeafReaderContext leafContext = null; + Scorer scorer = null; + for (SearchHit hit : hits) { + if (leafContext == null || leafContext.docBase + leafContext.reader().maxDoc() <= hit.docId()) { + do { + leafContext = leafContextIterator.next(); + } while (leafContext == null || leafContext.docBase + leafContext.reader().maxDoc() <= hit.docId()); + ScorerSupplier scorerSupplier = weight.scorerSupplier(leafContext); + if (scorerSupplier == null) { + throw new IllegalStateException("Can't compute score on document " + hit + " as it doesn't match the query"); + } + scorer = scorerSupplier.get(1L); // random-access + } + + final int leafDocID = hit.docId() - leafContext.docBase; + assert leafDocID >= 0 && leafDocID < leafContext.reader().maxDoc(); + int advanced = scorer.iterator().advance(leafDocID); + if (advanced != leafDocID) { + throw new IllegalStateException("Can't compute score on document " + hit + " as it doesn't match the query"); + } + hit.score(scorer.score()); + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java index 7888f6cd5a0..161ca9279f0 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.fetch.subphase.highlight; import org.apache.lucene.search.highlight.SimpleFragmenter; import org.apache.lucene.search.highlight.SimpleSpanFragmenter; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -152,17 +151,13 @@ public abstract class AbstractHighlighterBuilder context = null; - public long totalHits = -1; - public float maxScore; + public TotalHits totalHits = null; + public float maxScore = Float.NaN; public ScoreDoc lastEmittedDoc; public Scroll scroll; diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index cf656ed3b9c..72a12b805eb 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -35,7 +35,6 @@ import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; import java.io.IOException; -import java.util.Optional; /** * Shard level search request that gets created and consumed on the local node. @@ -213,25 +212,10 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { source = in.readOptionalWriteable(SearchSourceBuilder::new); types = in.readStringArray(); aliasFilter = new AliasFilter(in); - if (in.getVersion().onOrAfter(Version.V_5_2_0)) { - indexBoost = in.readFloat(); - } else { - // Nodes < 5.2.0 doesn't send index boost. Read it from source. - if (source != null) { - Optional boost = source.indexBoosts() - .stream() - .filter(ib -> ib.getIndex().equals(shardId.getIndexName())) - .findFirst(); - indexBoost = boost.isPresent() ? boost.get().getBoost() : 1.0f; - } else { - indexBoost = 1.0f; - } - } + indexBoost = in.readFloat(); nowInMillis = in.readVLong(); requestCache = in.readOptionalBoolean(); - if (in.getVersion().onOrAfter(Version.V_5_6_0)) { - clusterAlias = in.readOptionalString(); - } + clusterAlias = in.readOptionalString(); if (in.getVersion().onOrAfter(Version.V_6_3_0)) { allowPartialSearchResults = in.readOptionalBoolean(); } @@ -254,16 +238,12 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { out.writeOptionalWriteable(source); out.writeStringArray(types); aliasFilter.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_5_2_0)) { - out.writeFloat(indexBoost); - } + out.writeFloat(indexBoost); if (asKey == false) { out.writeVLong(nowInMillis); } out.writeOptionalBoolean(requestCache); - if (out.getVersion().onOrAfter(Version.V_5_6_0)) { - out.writeOptionalString(clusterAlias); - } + out.writeOptionalString(clusterAlias); if (out.getVersion().onOrAfter(Version.V_6_3_0)) { out.writeOptionalBoolean(allowPartialSearchResults); } diff --git a/server/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java b/server/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java index 0cdeb458a30..16388fa789a 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.profile.aggregation; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; @@ -45,8 +46,8 @@ public class ProfilingAggregator extends Aggregator { } @Override - public boolean needsScores() { - return delegate.needsScores(); + public ScoreMode scoreMode() { + return delegate.scoreMode(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingLeafBucketCollector.java b/server/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingLeafBucketCollector.java index 4db67967dcb..cc84b1cfb66 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingLeafBucketCollector.java +++ b/server/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingLeafBucketCollector.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.profile.aggregation; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.profile.Timer; @@ -46,7 +46,7 @@ public class ProfilingLeafBucketCollector extends LeafBucketCollector { } @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { delegate.setScorer(scorer); } diff --git a/server/src/main/java/org/elasticsearch/search/profile/query/InternalProfileCollector.java b/server/src/main/java/org/elasticsearch/search/profile/query/InternalProfileCollector.java index e892abaab22..993d91ab7a1 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/query/InternalProfileCollector.java +++ b/server/src/main/java/org/elasticsearch/search/profile/query/InternalProfileCollector.java @@ -22,6 +22,7 @@ package org.elasticsearch.search.profile.query; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Collector; import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.ScoreMode; import java.io.IOException; import java.util.ArrayList; @@ -116,8 +117,8 @@ public class InternalProfileCollector implements Collector { } @Override - public boolean needsScores() { - return collector.needsScores(); + public ScoreMode scoreMode() { + return collector.scoreMode(); } public CollectorResult getCollectorTree() { diff --git a/server/src/main/java/org/elasticsearch/search/profile/query/ProfileCollector.java b/server/src/main/java/org/elasticsearch/search/profile/query/ProfileCollector.java index ea8dbb2f335..b900cb04f79 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/query/ProfileCollector.java +++ b/server/src/main/java/org/elasticsearch/search/profile/query/ProfileCollector.java @@ -24,7 +24,8 @@ import org.apache.lucene.search.Collector; import org.apache.lucene.search.FilterCollector; import org.apache.lucene.search.FilterLeafCollector; import org.apache.lucene.search.LeafCollector; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; +import org.apache.lucene.search.ScoreMode; import java.io.IOException; @@ -44,10 +45,10 @@ final class ProfileCollector extends FilterCollector { } @Override - public boolean needsScores() { + public ScoreMode scoreMode() { final long start = System.nanoTime(); try { - return super.needsScores(); + return super.scoreMode(); } finally { time += Math.max(1, System.nanoTime() - start); } @@ -75,7 +76,7 @@ final class ProfileCollector extends FilterCollector { } @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { final long start = System.nanoTime(); try { super.setScorer(scorer); diff --git a/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java b/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java index 66e0e0fe77c..7899750461e 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java +++ b/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java @@ -36,7 +36,7 @@ final class ProfileScorer extends Scorer { private final Scorer scorer; private ProfileWeight profileWeight; - private final Timer scoreTimer, nextDocTimer, advanceTimer, matchTimer; + private final Timer scoreTimer, nextDocTimer, advanceTimer, matchTimer, shallowAdvanceTimer, computeMaxScoreTimer; ProfileScorer(ProfileWeight w, Scorer scorer, QueryProfileBreakdown profile) throws IOException { super(w); @@ -46,6 +46,8 @@ final class ProfileScorer extends Scorer { nextDocTimer = profile.getTimer(QueryTimingType.NEXT_DOC); advanceTimer = profile.getTimer(QueryTimingType.ADVANCE); matchTimer = profile.getTimer(QueryTimingType.MATCH); + shallowAdvanceTimer = profile.getTimer(QueryTimingType.SHALLOW_ADVANCE); + computeMaxScoreTimer = profile.getTimer(QueryTimingType.COMPUTE_MAX_SCORE); } @Override @@ -69,7 +71,7 @@ final class ProfileScorer extends Scorer { } @Override - public Collection getChildren() throws IOException { + public Collection getChildren() throws IOException { return scorer.getChildren(); } @@ -166,4 +168,24 @@ final class ProfileScorer extends Scorer { } }; } + + @Override + public int advanceShallow(int target) throws IOException { + shallowAdvanceTimer.start(); + try { + return scorer.advanceShallow(target); + } finally { + shallowAdvanceTimer.stop(); + } + } + + @Override + public float getMaxScore(int upTo) throws IOException { + computeMaxScoreTimer.start(); + try { + return scorer.getMaxScore(upTo); + } finally { + computeMaxScoreTimer.stop(); + } + } } diff --git a/server/src/main/java/org/elasticsearch/search/profile/query/QueryTimingType.java b/server/src/main/java/org/elasticsearch/search/profile/query/QueryTimingType.java index 5f194a7d5f1..146bd8f07bc 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/query/QueryTimingType.java +++ b/server/src/main/java/org/elasticsearch/search/profile/query/QueryTimingType.java @@ -27,7 +27,9 @@ public enum QueryTimingType { NEXT_DOC, ADVANCE, MATCH, - SCORE; + SCORE, + SHALLOW_ADVANCE, + COMPUTE_MAX_SCORE; @Override public String toString() { diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryCollectorContext.java b/server/src/main/java/org/elasticsearch/search/query/QueryCollectorContext.java index ff80dda77fb..f0c94bd822e 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryCollectorContext.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryCollectorContext.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.Collector; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.elasticsearch.common.lucene.MinimumScoreCollector; import org.elasticsearch.common.lucene.search.FilteredCollector; @@ -114,7 +115,7 @@ abstract class QueryCollectorContext { return new QueryCollectorContext(REASON_SEARCH_POST_FILTER) { @Override Collector create(Collector in ) throws IOException { - final Weight filterWeight = searcher.createNormalizedWeight(query, false); + final Weight filterWeight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); return new FilteredCollector(in, filterWeight); } }; diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java index ca06005448c..e4f0aa6898a 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Collector; import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.EarlyTerminatingSortingCollector; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; @@ -35,9 +34,11 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.util.Counter; import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.common.util.concurrent.QueueResizingEsThreadPoolExecutor; @@ -94,8 +95,8 @@ public class QueryPhase implements SearchPhase { if (searchContext.hasOnlySuggest()) { suggestPhase.execute(searchContext); // TODO: fix this once we can fetch docs for suggestions - searchContext.queryResult().topDocs( - new TopDocs(0, Lucene.EMPTY_SCORE_DOCS, 0), + searchContext.queryResult().topDocs(new TopDocsAndMaxScore( + new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), Lucene.EMPTY_SCORE_DOCS), Float.NaN), new DocValueFormat[0]); return; } @@ -138,7 +139,7 @@ public class QueryPhase implements SearchPhase { final ScrollContext scrollContext = searchContext.scrollContext(); if (scrollContext != null) { - if (scrollContext.totalHits == -1) { + if (scrollContext.totalHits == null) { // first round assert scrollContext.lastEmittedDoc == null; // there is not much that we can optimize here since we want to collect all @@ -268,7 +269,7 @@ public class QueryPhase implements SearchPhase { queryResult.terminatedEarly(true); } catch (TimeExceededException e) { assert timeoutSet : "TimeExceededException thrown even though timeout wasn't set"; - + if (searchContext.request().allowPartialSearchResults() == false) { // Can't rethrow TimeExceededException because not serializable throw new QueryPhaseExecutionException(searchContext, "Time exceeded"); @@ -327,7 +328,7 @@ public class QueryPhase implements SearchPhase { final Sort sort = sortAndFormats.sort; for (LeafReaderContext ctx : reader.leaves()) { Sort indexSort = ctx.reader().getMetaData().getSort(); - if (indexSort == null || EarlyTerminatingSortingCollector.canEarlyTerminate(sort, indexSort) == false) { + if (indexSort == null || Lucene.canEarlyTerminate(sort, indexSort) == false) { return false; } } diff --git a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java index 83c43d10172..2aded57ece0 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -20,10 +20,11 @@ package org.elasticsearch.search.query; import org.apache.lucene.search.FieldDoc; -import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; @@ -46,7 +47,10 @@ public final class QuerySearchResult extends SearchPhaseResult { private int from; private int size; - private TopDocs topDocs; + private TopDocsAndMaxScore topDocsAndMaxScore; + private boolean hasScoreDocs; + private TotalHits totalHits; + private float maxScore = Float.NaN; private DocValueFormat[] sortValueFormats; private InternalAggregations aggregations; private boolean hasAggs; @@ -56,9 +60,6 @@ public final class QuerySearchResult extends SearchPhaseResult { private Boolean terminatedEarly = null; private ProfileShardResult profileShardResults; private boolean hasProfileResults; - private boolean hasScoreDocs; - private long totalHits; - private float maxScore; private long serviceTimeEWMA = -1; private int nodeQueueSize = -1; @@ -92,37 +93,37 @@ public final class QuerySearchResult extends SearchPhaseResult { return this.terminatedEarly; } - public TopDocs topDocs() { - if (topDocs == null) { + public TopDocsAndMaxScore topDocs() { + if (topDocsAndMaxScore == null) { throw new IllegalStateException("topDocs already consumed"); } - return topDocs; + return topDocsAndMaxScore; } /** * Returns true iff the top docs have already been consumed. */ public boolean hasConsumedTopDocs() { - return topDocs == null; + return topDocsAndMaxScore == null; } /** * Returns and nulls out the top docs for this search results. This allows to free up memory once the top docs are consumed. * @throws IllegalStateException if the top docs have already been consumed. */ - public TopDocs consumeTopDocs() { - TopDocs topDocs = this.topDocs; - if (topDocs == null) { + public TopDocsAndMaxScore consumeTopDocs() { + TopDocsAndMaxScore topDocsAndMaxScore = this.topDocsAndMaxScore; + if (topDocsAndMaxScore == null) { throw new IllegalStateException("topDocs already consumed"); } - this.topDocs = null; - return topDocs; + this.topDocsAndMaxScore = null; + return topDocsAndMaxScore; } - public void topDocs(TopDocs topDocs, DocValueFormat[] sortValueFormats) { + public void topDocs(TopDocsAndMaxScore topDocs, DocValueFormat[] sortValueFormats) { setTopDocs(topDocs); - if (topDocs.scoreDocs.length > 0 && topDocs.scoreDocs[0] instanceof FieldDoc) { - int numFields = ((FieldDoc) topDocs.scoreDocs[0]).fields.length; + if (topDocs.topDocs.scoreDocs.length > 0 && topDocs.topDocs.scoreDocs[0] instanceof FieldDoc) { + int numFields = ((FieldDoc) topDocs.topDocs.scoreDocs[0]).fields.length; if (numFields != sortValueFormats.length) { throw new IllegalArgumentException("The number of sort fields does not match: " + numFields + " != " + sortValueFormats.length); @@ -131,11 +132,11 @@ public final class QuerySearchResult extends SearchPhaseResult { this.sortValueFormats = sortValueFormats; } - private void setTopDocs(TopDocs topDocs) { - this.topDocs = topDocs; - hasScoreDocs = topDocs.scoreDocs.length > 0; - this.totalHits = topDocs.totalHits; - this.maxScore = topDocs.getMaxScore(); + private void setTopDocs(TopDocsAndMaxScore topDocsAndMaxScore) { + this.topDocsAndMaxScore = topDocsAndMaxScore; + this.totalHits = topDocsAndMaxScore.topDocs.totalHits; + this.maxScore = topDocsAndMaxScore.maxScore; + this.hasScoreDocs = topDocsAndMaxScore.topDocs.scoreDocs.length > 0; } public DocValueFormat[] sortValueFormats() { @@ -326,7 +327,7 @@ public final class QuerySearchResult extends SearchPhaseResult { out.writeNamedWriteable(sortValueFormats[i]); } } - writeTopDocs(out, topDocs); + writeTopDocs(out, topDocsAndMaxScore); if (aggregations == null) { out.writeBoolean(false); } else { @@ -349,7 +350,7 @@ public final class QuerySearchResult extends SearchPhaseResult { } } - public long getTotalHits() { + public TotalHits getTotalHits() { return totalHits; } diff --git a/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java b/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java index dc110b27977..3aaa640f62f 100644 --- a/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java +++ b/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java @@ -37,9 +37,14 @@ import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.search.TopFieldCollector; import org.apache.lucene.search.TopScoreDocCollector; import org.apache.lucene.search.TotalHitCountCollector; +import org.apache.lucene.search.TotalHits; +import org.apache.lucene.search.grouping.CollapseTopFieldDocs; import org.apache.lucene.search.grouping.CollapsingTopDocsCollector; +import org.elasticsearch.action.search.MaxScoreCollector; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; +import org.elasticsearch.common.util.CachedSupplier; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.collapse.CollapseContext; import org.elasticsearch.search.internal.ScrollContext; @@ -49,7 +54,6 @@ import org.elasticsearch.search.sort.SortAndFormats; import java.io.IOException; import java.util.Objects; -import java.util.function.IntSupplier; import java.util.function.Supplier; import static org.elasticsearch.search.profile.query.CollectorResult.REASON_SEARCH_COUNT; @@ -82,7 +86,7 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext { static class EmptyTopDocsCollectorContext extends TopDocsCollectorContext { private final Collector collector; - private final IntSupplier hitCountSupplier; + private final Supplier hitCountSupplier; /** * Ctr @@ -100,15 +104,15 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext { int hitCount = hasFilterCollector ? -1 : shortcutTotalHitCount(reader, query); if (hitCount == -1) { this.collector = hitCountCollector; - this.hitCountSupplier = hitCountCollector::getTotalHits; + this.hitCountSupplier = () -> new TotalHits(hitCountCollector.getTotalHits(), TotalHits.Relation.EQUAL_TO); } else { this.collector = new EarlyTerminatingCollector(hitCountCollector, 0, false); - this.hitCountSupplier = () -> hitCount; + this.hitCountSupplier = () -> new TotalHits(hitCount, TotalHits.Relation.EQUAL_TO); } } else { this.collector = new EarlyTerminatingCollector(new TotalHitCountCollector(), 0, false); // for bwc hit count is set to 0, it will be converted to -1 by the coordinating node - this.hitCountSupplier = () -> 0; + this.hitCountSupplier = () -> new TotalHits(0, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO); } } @@ -119,14 +123,15 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext { @Override void postProcess(QuerySearchResult result) { - final int totalHitCount = hitCountSupplier.getAsInt(); - result.topDocs(new TopDocs(totalHitCount, Lucene.EMPTY_SCORE_DOCS, 0), null); + final TotalHits totalHitCount = hitCountSupplier.get(); + result.topDocs(new TopDocsAndMaxScore(new TopDocs(totalHitCount, Lucene.EMPTY_SCORE_DOCS), Float.NaN), null); } } static class CollapsingTopDocsCollectorContext extends TopDocsCollectorContext { private final DocValueFormat[] sortFmt; private final CollapsingTopDocsCollector topDocsCollector; + private final Supplier maxScoreSupplier; /** * Ctr @@ -144,7 +149,15 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext { assert collapseContext != null; Sort sort = sortAndFormats == null ? Sort.RELEVANCE : sortAndFormats.sort; this.sortFmt = sortAndFormats == null ? new DocValueFormat[] { DocValueFormat.RAW } : sortAndFormats.formats; - this.topDocsCollector = collapseContext.createTopDocs(sort, numHits, trackMaxScore); + this.topDocsCollector = collapseContext.createTopDocs(sort, numHits); + + MaxScoreCollector maxScoreCollector = null; + if (trackMaxScore) { + maxScoreCollector = new MaxScoreCollector(); + maxScoreSupplier = maxScoreCollector::getMaxScore; + } else { + maxScoreSupplier = () -> Float.NaN; + } } @Override @@ -155,15 +168,17 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext { @Override void postProcess(QuerySearchResult result) throws IOException { - result.topDocs(topDocsCollector.getTopDocs(), sortFmt); + CollapseTopFieldDocs topDocs = topDocsCollector.getTopDocs(); + result.topDocs(new TopDocsAndMaxScore(topDocs, maxScoreSupplier.get()), sortFmt); } } abstract static class SimpleTopDocsCollectorContext extends TopDocsCollectorContext { private final @Nullable SortAndFormats sortAndFormats; private final Collector collector; - private final IntSupplier totalHitsSupplier; + private final Supplier totalHitsSupplier; private final Supplier topDocsSupplier; + private final Supplier maxScoreSupplier; /** * Ctr @@ -187,37 +202,53 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext { super(REASON_SEARCH_TOP_HITS, numHits); this.sortAndFormats = sortAndFormats; if (sortAndFormats == null) { - final TopDocsCollector topDocsCollector = TopScoreDocCollector.create(numHits, searchAfter); + final TopDocsCollector topDocsCollector = TopScoreDocCollector.create(numHits, searchAfter, Integer.MAX_VALUE); this.collector = topDocsCollector; - this.topDocsSupplier = topDocsCollector::topDocs; - this.totalHitsSupplier = topDocsCollector::getTotalHits; + this.topDocsSupplier = new CachedSupplier<>(topDocsCollector::topDocs); + this.totalHitsSupplier = () -> topDocsSupplier.get().totalHits; + this.maxScoreSupplier = () -> { + TopDocs topDocs = topDocsSupplier.get(); + if (topDocs.scoreDocs.length == 0) { + return Float.NaN; + } else { + return topDocs.scoreDocs[0].score; + } + }; } else { /** * We explicitly don't track total hits in the topdocs collector, it can early terminate * if the sort matches the index sort. */ final TopDocsCollector topDocsCollector = TopFieldCollector.create(sortAndFormats.sort, numHits, - (FieldDoc) searchAfter, true, trackMaxScore, trackMaxScore, false); - this.topDocsSupplier = topDocsCollector::topDocs; + (FieldDoc) searchAfter, 1); + this.topDocsSupplier = new CachedSupplier<>(topDocsCollector::topDocs); + TotalHitCountCollector hitCountCollector = null; if (trackTotalHits) { // implicit total hit counts are valid only when there is no filter collector in the chain int count = hasFilterCollector ? -1 : shortcutTotalHitCount(reader, query); if (count != -1) { // we can extract the total count from the shard statistics directly - this.totalHitsSupplier = () -> count; - this.collector = topDocsCollector; + this.totalHitsSupplier = () -> new TotalHits(count, TotalHits.Relation.EQUAL_TO); } else { // wrap a collector that counts the total number of hits even // if the top docs collector terminates early final TotalHitCountCollector countingCollector = new TotalHitCountCollector(); - this.collector = MultiCollector.wrap(topDocsCollector, countingCollector); - this.totalHitsSupplier = countingCollector::getTotalHits; + hitCountCollector = countingCollector; + this.totalHitsSupplier = () -> new TotalHits(countingCollector.getTotalHits(), TotalHits.Relation.EQUAL_TO); } } else { // total hit count is not needed - this.collector = topDocsCollector; - this.totalHitsSupplier = topDocsCollector::getTotalHits; + // for bwc hit count is set to 0, it will be converted to -1 by the coordinating node + this.totalHitsSupplier = () -> new TotalHits(0, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO); } + MaxScoreCollector maxScoreCollector = null; + if (trackMaxScore) { + maxScoreCollector = new MaxScoreCollector(); + maxScoreSupplier = maxScoreCollector::getMaxScore; + } else { + maxScoreSupplier = () -> Float.NaN; + } + collector = MultiCollector.wrap(topDocsCollector, hitCountCollector, maxScoreCollector); } } @@ -230,8 +261,8 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext { @Override void postProcess(QuerySearchResult result) throws IOException { final TopDocs topDocs = topDocsSupplier.get(); - topDocs.totalHits = totalHitsSupplier.getAsInt(); - result.topDocs(topDocs, sortAndFormats == null ? null : sortAndFormats.formats); + topDocs.totalHits = totalHitsSupplier.get(); + result.topDocs(new TopDocsAndMaxScore(topDocs, maxScoreSupplier.get()), sortAndFormats == null ? null : sortAndFormats.formats); } } @@ -257,22 +288,22 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext { @Override void postProcess(QuerySearchResult result) throws IOException { super.postProcess(result); - final TopDocs topDocs = result.topDocs(); - if (scrollContext.totalHits == -1) { + final TopDocsAndMaxScore topDocs = result.topDocs(); + if (scrollContext.totalHits == null) { // first round - scrollContext.totalHits = topDocs.totalHits; - scrollContext.maxScore = topDocs.getMaxScore(); + scrollContext.totalHits = topDocs.topDocs.totalHits; + scrollContext.maxScore = topDocs.maxScore; } else { // subsequent round: the total number of hits and // the maximum score were computed on the first round - topDocs.totalHits = scrollContext.totalHits; - topDocs.setMaxScore(scrollContext.maxScore); + topDocs.topDocs.totalHits = scrollContext.totalHits; + topDocs.maxScore = scrollContext.maxScore; } if (numberOfShards == 1) { // if we fetch the document in the same roundtrip, we already know the last emitted doc - if (topDocs.scoreDocs.length > 0) { + if (topDocs.topDocs.scoreDocs.length > 0) { // set the last emitted doc - scrollContext.lastEmittedDoc = topDocs.scoreDocs[topDocs.scoreDocs.length - 1]; + scrollContext.lastEmittedDoc = topDocs.topDocs.scoreDocs[topDocs.topDocs.scoreDocs.length - 1]; } } result.topDocs(topDocs, result.sortValueFormats()); @@ -334,8 +365,7 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext { } else if (searchContext.collapse() != null) { boolean trackScores = searchContext.sort() == null ? true : searchContext.trackScores(); int numDocs = Math.min(searchContext.from() + searchContext.size(), totalNumDocs); - return new CollapsingTopDocsCollectorContext(searchContext.collapse(), - searchContext.sort(), numDocs, trackScores); + return new CollapsingTopDocsCollectorContext(searchContext.collapse(), searchContext.sort(), numDocs, trackScores); } else { int numDocs = Math.min(searchContext.from() + searchContext.size(), totalNumDocs); final boolean rescore = searchContext.rescore().isEmpty() == false; diff --git a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java index 4a9567a32c0..61bd150291d 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java @@ -24,6 +24,7 @@ import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TopDocs; import java.io.IOException; @@ -41,7 +42,7 @@ public final class QueryRescorer implements Rescorer { public TopDocs rescore(TopDocs topDocs, IndexSearcher searcher, RescoreContext rescoreContext) throws IOException { assert rescoreContext != null; - if (topDocs == null || topDocs.totalHits == 0 || topDocs.scoreDocs.length == 0) { + if (topDocs == null || topDocs.scoreDocs.length == 0) { return topDocs; } @@ -87,7 +88,7 @@ public final class QueryRescorer implements Rescorer { Explanation prim; if (sourceExplanation.isMatch()) { prim = Explanation.match( - sourceExplanation.getValue() * primaryWeight, + sourceExplanation.getValue().floatValue() * primaryWeight, "product of:", sourceExplanation, Explanation.match(primaryWeight, "primaryWeight")); } else { prim = Explanation.noMatch("First pass did not match", sourceExplanation); @@ -99,12 +100,12 @@ public final class QueryRescorer implements Rescorer { if (rescoreExplain != null && rescoreExplain.isMatch()) { float secondaryWeight = rescore.rescoreQueryWeight(); Explanation sec = Explanation.match( - rescoreExplain.getValue() * secondaryWeight, + rescoreExplain.getValue().floatValue() * secondaryWeight, "product of:", rescoreExplain, Explanation.match(secondaryWeight, "secondaryWeight")); QueryRescoreMode scoreMode = rescore.scoreMode(); return Explanation.match( - scoreMode.combine(prim.getValue(), sec.getValue()), + scoreMode.combine(prim.getValue().floatValue(), sec.getValue().floatValue()), scoreMode + " of:", prim, sec); } @@ -123,15 +124,14 @@ public final class QueryRescorer implements Rescorer { /** Returns a new {@link TopDocs} with the topN from the incoming one, or the same TopDocs if the number of hits is already <= * topN. */ private TopDocs topN(TopDocs in, int topN) { - if (in.totalHits < topN) { - assert in.scoreDocs.length == in.totalHits; + if (in.scoreDocs.length < topN) { return in; } ScoreDoc[] subset = new ScoreDoc[topN]; System.arraycopy(in.scoreDocs, 0, subset, 0, topN); - return new TopDocs(in.totalHits, subset, in.getMaxScore()); + return new TopDocs(in.totalHits, subset); } /** Modifies incoming TopDocs (in) by replacing the top hits with resorted's hits, and then resorting all hits. */ @@ -151,8 +151,6 @@ public final class QueryRescorer implements Rescorer { // incoming first pass hits, instead of allowing recoring of just the top subset: Arrays.sort(in.scoreDocs, SCORE_DOC_COMPARATOR); } - // update the max score after the resort - in.setMaxScore(in.scoreDocs[0].score); return in; } @@ -206,7 +204,8 @@ public final class QueryRescorer implements Rescorer { @Override public void extractTerms(IndexSearcher searcher, RescoreContext rescoreContext, Set termsSet) throws IOException { - searcher.createNormalizedWeight(((QueryRescoreContext) rescoreContext).query(), false).extractTerms(termsSet); + Query query = ((QueryRescoreContext) rescoreContext).query(); + searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f).extractTerms(termsSet); } } diff --git a/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java b/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java index 7baaa61bbb8..7f5a1be285d 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.SearchPhase; import org.elasticsearch.search.internal.SearchContext; @@ -44,15 +45,19 @@ public class RescorePhase extends AbstractComponent implements SearchPhase { @Override public void execute(SearchContext context) { + TopDocs topDocs = context.queryResult().topDocs().topDocs; + if (topDocs.scoreDocs.length == 0) { + return; + } try { - TopDocs topDocs = context.queryResult().topDocs(); for (RescoreContext ctx : context.rescore()) { topDocs = ctx.rescorer().rescore(topDocs, context.searcher(), ctx); // It is the responsibility of the rescorer to sort the resulted top docs, // here we only assert that this condition is met. assert context.sort() == null && topDocsSortedByScore(topDocs): "topdocs should be sorted after rescore"; } - context.queryResult().topDocs(topDocs, context.queryResult().sortValueFormats()); + context.queryResult().topDocs(new TopDocsAndMaxScore(topDocs, topDocs.scoreDocs[0].score), + context.queryResult().sortValueFormats()); } catch (IOException e) { throw new ElasticsearchException("Rescore Phase Failed", e); } diff --git a/server/src/main/java/org/elasticsearch/search/slice/DocValuesSliceQuery.java b/server/src/main/java/org/elasticsearch/search/slice/DocValuesSliceQuery.java index c1aaad04d1d..f2cf854947f 100644 --- a/server/src/main/java/org/elasticsearch/search/slice/DocValuesSliceQuery.java +++ b/server/src/main/java/org/elasticsearch/search/slice/DocValuesSliceQuery.java @@ -27,6 +27,7 @@ import org.apache.lucene.search.ConstantScoreScorer; import org.apache.lucene.search.ConstantScoreWeight; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; @@ -45,7 +46,7 @@ public final class DocValuesSliceQuery extends SliceQuery { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { return new ConstantScoreWeight(this, boost) { @Override diff --git a/server/src/main/java/org/elasticsearch/search/slice/TermsSliceQuery.java b/server/src/main/java/org/elasticsearch/search/slice/TermsSliceQuery.java index da1b98822cf..1a10770fe9d 100644 --- a/server/src/main/java/org/elasticsearch/search/slice/TermsSliceQuery.java +++ b/server/src/main/java/org/elasticsearch/search/slice/TermsSliceQuery.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Weight; @@ -55,7 +56,7 @@ public final class TermsSliceQuery extends SliceQuery { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { return new ConstantScoreWeight(this, boost) { @Override public Scorer scorer(LeafReaderContext context) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java index 4759027ee51..1b71c51d416 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java @@ -21,7 +21,7 @@ package org.elasticsearch.search.sort; import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.SortField; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; @@ -351,7 +351,7 @@ public class ScriptSortBuilder extends SortBuilder { return FieldData.singleton(values); } @Override - protected void setScorer(Scorer scorer) { + protected void setScorer(Scorable scorer) { leafScript.setScorer(scorer); } }; @@ -376,7 +376,7 @@ public class ScriptSortBuilder extends SortBuilder { return FieldData.singleton(values); } @Override - protected void setScorer(Scorer scorer) { + protected void setScorer(Scorable scorer) { leafScript.setScorer(scorer); } }; diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java index 5690acd7abd..7dc63a8daac 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java @@ -84,7 +84,7 @@ public class CompletionSuggester extends Suggester private static void suggest(IndexSearcher searcher, CompletionQuery query, TopSuggestDocsCollector collector) throws IOException { query = (CompletionQuery) query.rewrite(searcher.getIndexReader()); - Weight weight = query.createWeight(searcher, collector.needsScores(), 1f); + Weight weight = query.createWeight(searcher, collector.scoreMode(), 1f); for (LeafReaderContext context : searcher.getIndexReader().leaves()) { BulkScorer scorer = weight.bulkScorer(context); if (scorer != null) { @@ -192,7 +192,7 @@ public class CompletionSuggester extends Suggester // The following code groups suggestions matching different contexts by document id and dedup the surface form + contexts // if needed (skip_duplicates). int size = entries.scoreDocs.length; - final List suggestDocs = new ArrayList(size); + final List suggestDocs = new ArrayList<>(size); final CharArraySet seenSurfaceForms = doSkipDuplicates() ? new CharArraySet(size, false) : null; for (TopSuggestDocs.SuggestScoreDoc suggestEntry : entries.scoreLookupDocs()) { final SuggestDoc suggestDoc; @@ -209,8 +209,8 @@ public class CompletionSuggester extends Suggester } suggestDocs.add(suggestDoc); } - return new TopSuggestDocs((int) entries.totalHits, - suggestDocs.toArray(new TopSuggestDocs.SuggestScoreDoc[0]), entries.getMaxScore()); + return new TopSuggestDocs(entries.totalHits, + suggestDocs.toArray(new TopSuggestDocs.SuggestScoreDoc[0])); } } } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java index 7b7584f4674..6fdff8d18eb 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java @@ -21,7 +21,7 @@ package org.elasticsearch.search.suggest.phrase; import org.apache.lucene.search.spell.DirectSpellChecker; import org.apache.lucene.search.spell.JaroWinklerDistance; -import org.apache.lucene.search.spell.LevensteinDistance; +import org.apache.lucene.search.spell.LevenshteinDistance; import org.apache.lucene.search.spell.LuceneLevenshteinDistance; import org.apache.lucene.search.spell.NGramDistance; import org.apache.lucene.search.spell.StringDistance; @@ -466,7 +466,7 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator } else if ("damerau_levenshtein".equals(distanceVal)) { return new LuceneLevenshteinDistance(); } else if ("levenshtein".equals(distanceVal)) { - return new LevensteinDistance(); + return new LevenshteinDistance(); } else if ("jaro_winkler".equals(distanceVal)) { return new JaroWinklerDistance(); } else if ("ngram".equals(distanceVal)) { diff --git a/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java index fdc31dd6c2f..ad6a8b4acf3 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java @@ -21,7 +21,7 @@ package org.elasticsearch.search.suggest.term; import org.apache.lucene.search.spell.DirectSpellChecker; import org.apache.lucene.search.spell.JaroWinklerDistance; -import org.apache.lucene.search.spell.LevensteinDistance; +import org.apache.lucene.search.spell.LevenshteinDistance; import org.apache.lucene.search.spell.LuceneLevenshteinDistance; import org.apache.lucene.search.spell.NGramDistance; import org.apache.lucene.search.spell.StringDistance; @@ -548,7 +548,7 @@ public class TermSuggestionBuilder extends SuggestionBuilder UNREMOVABLE_SETTINGS; @@ -268,7 +270,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateApp // Index doesn't exist - create it and start recovery // Make sure that the index we are about to create has a validate name MetaDataCreateIndexService.validateIndexName(renamedIndexName, currentState); - createIndexService.validateIndexSettings(renamedIndexName, snapshotIndexMetaData.getSettings()); + createIndexService.validateIndexSettings(renamedIndexName, snapshotIndexMetaData.getSettings(), false); IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN).index(renamedIndexName); indexMdBuilder.settings(Settings.builder().put(snapshotIndexMetaData.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())); if (!request.includeAliases() && !snapshotIndexMetaData.getAliases().isEmpty()) { @@ -292,6 +294,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateApp // Index exists and it's closed - open it in metadata and start recovery IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN); indexMdBuilder.version(Math.max(snapshotIndexMetaData.getVersion(), currentIndexMetaData.getVersion() + 1)); + indexMdBuilder.mappingVersion(Math.max(snapshotIndexMetaData.getMappingVersion(), currentIndexMetaData.getMappingVersion() + 1)); if (!request.includeAliases()) { // Remove all snapshot aliases if (!snapshotIndexMetaData.getAliases().isEmpty()) { diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java index 67ddabc37fa..fdbe74d8d4d 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java @@ -76,9 +76,7 @@ public final class SnapshotInfo implements Comparable, ToXContent, private static final String SUCCESSFUL_SHARDS = "successful_shards"; private static final String INCLUDE_GLOBAL_STATE = "include_global_state"; - private static final Version VERSION_INCOMPATIBLE_INTRODUCED = Version.V_5_2_0; private static final Version INCLUDE_GLOBAL_STATE_INTRODUCED = Version.V_6_2_0; - public static final Version VERBOSE_INTRODUCED = Version.V_5_5_0; private static final Comparator COMPARATOR = Comparator.comparing(SnapshotInfo::startTime).thenComparing(SnapshotInfo::snapshotId); @@ -275,11 +273,7 @@ public final class SnapshotInfo implements Comparable, ToXContent, indicesListBuilder.add(in.readString()); } indices = Collections.unmodifiableList(indicesListBuilder); - if (in.getVersion().onOrAfter(VERBOSE_INTRODUCED)) { - state = in.readBoolean() ? SnapshotState.fromValue(in.readByte()) : null; - } else { - state = SnapshotState.fromValue(in.readByte()); - } + state = in.readBoolean() ? SnapshotState.fromValue(in.readByte()) : null; reason = in.readOptionalString(); startTime = in.readVLong(); endTime = in.readVLong(); @@ -295,11 +289,7 @@ public final class SnapshotInfo implements Comparable, ToXContent, } else { shardFailures = Collections.emptyList(); } - if (in.getVersion().before(VERSION_INCOMPATIBLE_INTRODUCED)) { - version = Version.readVersion(in); - } else { - version = in.readBoolean() ? Version.readVersion(in) : null; - } + version = in.readBoolean() ? Version.readVersion(in) : null; if (in.getVersion().onOrAfter(INCLUDE_GLOBAL_STATE_INTRODUCED)) { includeGlobalState = in.readOptionalBoolean(); } @@ -681,19 +671,11 @@ public final class SnapshotInfo implements Comparable, ToXContent, for (String index : indices) { out.writeString(index); } - if (out.getVersion().onOrAfter(VERBOSE_INTRODUCED)) { - if (state != null) { - out.writeBoolean(true); - out.writeByte(state.value()); - } else { - out.writeBoolean(false); - } + if (state != null) { + out.writeBoolean(true); + out.writeByte(state.value()); } else { - if (out.getVersion().before(VERSION_INCOMPATIBLE_INTRODUCED) && state == SnapshotState.INCOMPATIBLE) { - out.writeByte(SnapshotState.FAILED.value()); - } else { - out.writeByte(state.value()); - } + out.writeBoolean(false); } out.writeOptionalString(reason); out.writeVLong(startTime); @@ -704,19 +686,11 @@ public final class SnapshotInfo implements Comparable, ToXContent, for (SnapshotShardFailure failure : shardFailures) { failure.writeTo(out); } - if (out.getVersion().before(VERSION_INCOMPATIBLE_INTRODUCED)) { - Version versionToWrite = version; - if (versionToWrite == null) { - versionToWrite = Version.CURRENT; - } - Version.writeVersion(versionToWrite, out); + if (version != null) { + out.writeBoolean(true); + Version.writeVersion(version, out); } else { - if (version != null) { - out.writeBoolean(true); - Version.writeVersion(version, out); - } else { - out.writeBoolean(false); - } + out.writeBoolean(false); } if (out.getVersion().onOrAfter(INCLUDE_GLOBAL_STATE_INTRODUCED)) { out.writeOptionalBoolean(includeGlobalState); diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java index f2bdc2ba5df..67bf9c6069f 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java @@ -102,7 +102,7 @@ public class SnapshotShardFailure extends ShardOperationFailedException { nodeId = in.readOptionalString(); shardId = ShardId.readShardId(in); super.shardId = shardId.getId(); - super.index = shardId.getIndexName(); + index = shardId.getIndexName(); reason = in.readString(); status = RestStatus.readFrom(in); } diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java index 84c337399d5..da8faaf3c33 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java @@ -38,12 +38,12 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; import java.io.IOException; import java.util.Iterator; -import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -62,6 +62,7 @@ public class ConnectionManager implements Closeable { private final TimeValue pingSchedule; private final ConnectionProfile defaultProfile; private final Lifecycle lifecycle = new Lifecycle(); + private final AtomicBoolean closed = new AtomicBoolean(false); private final ReadWriteLock closeLock = new ReentrantReadWriteLock(); private final DelegatingNodeConnectionListener connectionListener = new DelegatingNodeConnectionListener(); @@ -83,7 +84,7 @@ public class ConnectionManager implements Closeable { } public void addListener(TransportConnectionListener listener) { - this.connectionListener.listeners.add(listener); + this.connectionListener.listeners.addIfAbsent(listener); } public void removeListener(TransportConnectionListener listener) { @@ -186,45 +187,50 @@ public class ConnectionManager implements Closeable { } } - public int connectedNodeCount() { + /** + * Returns the number of nodes this manager is connected to. + */ + public int size() { return connectedNodes.size(); } @Override public void close() { - lifecycle.moveToStopped(); - CountDownLatch latch = new CountDownLatch(1); + if (closed.compareAndSet(false, true)) { + lifecycle.moveToStopped(); + CountDownLatch latch = new CountDownLatch(1); - // TODO: Consider moving all read/write lock (in Transport and this class) to the TransportService - threadPool.generic().execute(() -> { - closeLock.writeLock().lock(); - try { - // we are holding a write lock so nobody modifies the connectedNodes / openConnections map - it's safe to first close - // all instances and then clear them maps - Iterator> iterator = connectedNodes.entrySet().iterator(); - while (iterator.hasNext()) { - Map.Entry next = iterator.next(); - try { - IOUtils.closeWhileHandlingException(next.getValue()); - } finally { - iterator.remove(); + // TODO: Consider moving all read/write lock (in Transport and this class) to the TransportService + threadPool.generic().execute(() -> { + closeLock.writeLock().lock(); + try { + // we are holding a write lock so nobody modifies the connectedNodes / openConnections map - it's safe to first close + // all instances and then clear them maps + Iterator> iterator = connectedNodes.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry next = iterator.next(); + try { + IOUtils.closeWhileHandlingException(next.getValue()); + } finally { + iterator.remove(); + } } + } finally { + closeLock.writeLock().unlock(); + latch.countDown(); + } + }); + + try { + try { + latch.await(30, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + // ignore } } finally { - closeLock.writeLock().unlock(); - latch.countDown(); + lifecycle.moveToClosed(); } - }); - - try { - try { - latch.await(30, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - // ignore - } - } finally { - lifecycle.moveToClosed(); } } @@ -288,7 +294,7 @@ public class ConnectionManager implements Closeable { private static final class DelegatingNodeConnectionListener implements TransportConnectionListener { - private final List listeners = new CopyOnWriteArrayList<>(); + private final CopyOnWriteArrayList listeners = new CopyOnWriteArrayList<>(); @Override public void onNodeDisconnected(DiscoveryNode key) { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index a12f27c93e3..f08ef75612f 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -16,12 +16,15 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.transport; -import java.util.function.Supplier; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.ClusterNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -33,11 +36,13 @@ import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Collections; +import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.Predicate; +import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -46,26 +51,85 @@ import java.util.stream.Stream; */ public abstract class RemoteClusterAware extends AbstractComponent { + public static final Setting.AffixSetting> SEARCH_REMOTE_CLUSTERS_SEEDS = + Setting.affixKeySetting( + "search.remote.", + "seeds", + key -> Setting.listSetting( + key, + Collections.emptyList(), + s -> { + parsePort(s); + return s; + }, + Setting.Property.Deprecated, + Setting.Property.Dynamic, + Setting.Property.NodeScope)); + /** * A list of initial seed nodes to discover eligible nodes from the remote cluster */ public static final Setting.AffixSetting> REMOTE_CLUSTERS_SEEDS = Setting.affixKeySetting( - "search.remote.", - "seeds", - key -> Setting.listSetting( - key, Collections.emptyList(), - s -> { - // validate seed address - parsePort(s); - return s; - }, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - ); + "cluster.remote.", + "seeds", + key -> Setting.listSetting( + key, + // the default needs to be emptyList() when fallback is removed + "_na_".equals(key) + ? SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(key) + : SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSetting(key.replaceAll("^cluster", "search")), + s -> { + // validate seed address + parsePort(s); + return s; + }, + Setting.Property.Dynamic, + Setting.Property.NodeScope)); + public static final char REMOTE_CLUSTER_INDEX_SEPARATOR = ':'; public static final String LOCAL_CLUSTER_GROUP_KEY = ""; + public static final Setting.AffixSetting SEARCH_REMOTE_CLUSTERS_PROXY = Setting.affixKeySetting( + "search.remote.", + "proxy", + key -> Setting.simpleString( + key, + s -> { + if (Strings.hasLength(s)) { + parsePort(s); + } + return s; + }, + Setting.Property.Deprecated, + Setting.Property.Dynamic, + Setting.Property.NodeScope), + REMOTE_CLUSTERS_SEEDS); + + /** + * A proxy address for the remote cluster. + * NOTE: this settings is undocumented until we have at last one transport that supports passing + * on the hostname via a mechanism like SNI. + */ + public static final Setting.AffixSetting REMOTE_CLUSTERS_PROXY = Setting.affixKeySetting( + "cluster.remote.", + "proxy", + key -> Setting.simpleString( + key, + // no default is needed when fallback is removed, use simple string which gives empty + "_na_".equals(key) + ? SEARCH_REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(key) + : SEARCH_REMOTE_CLUSTERS_PROXY.getConcreteSetting(key.replaceAll("^cluster", "search")), + s -> { + if (Strings.hasLength(s)) { + parsePort(s); + } + return s; + }, + Setting.Property.Dynamic, + Setting.Property.NodeScope), + REMOTE_CLUSTERS_SEEDS); + + protected final ClusterNameExpressionResolver clusterNameResolver; /** @@ -77,23 +141,40 @@ public abstract class RemoteClusterAware extends AbstractComponent { this.clusterNameResolver = new ClusterNameExpressionResolver(settings); } - protected static Map>> buildRemoteClustersSeeds(Settings settings) { + /** + * Builds the dynamic per-cluster config from the given settings. This is a map keyed by the cluster alias that points to a tuple + * (ProxyAddresss, [SeedNodeSuppliers]). If a cluster is configured with a proxy address all seed nodes will point to + * {@link TransportAddress#META_ADDRESS} and their configured address will be used as the hostname for the generated discovery node. + */ + protected static Map>>> buildRemoteClustersDynamicConfig(Settings settings) { Stream>> allConcreteSettings = REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings); return allConcreteSettings.collect( - Collectors.toMap(REMOTE_CLUSTERS_SEEDS::getNamespace, concreteSetting -> { - String clusterName = REMOTE_CLUSTERS_SEEDS.getNamespace(concreteSetting); - List addresses = concreteSetting.get(settings); - List> nodes = new ArrayList<>(addresses.size()); - for (String address : addresses) { - nodes.add(() -> { - TransportAddress transportAddress = new TransportAddress(RemoteClusterAware.parseSeedAddress(address)); - return new DiscoveryNode(clusterName + "#" + transportAddress.toString(), - transportAddress, - Version.CURRENT.minimumCompatibilityVersion()); - }); - } - return nodes; - })); + Collectors.toMap(REMOTE_CLUSTERS_SEEDS::getNamespace, concreteSetting -> { + String clusterName = REMOTE_CLUSTERS_SEEDS.getNamespace(concreteSetting); + List addresses = concreteSetting.get(settings); + final boolean proxyMode = REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(clusterName).exists(settings); + List> nodes = new ArrayList<>(addresses.size()); + for (String address : addresses) { + nodes.add(() -> buildSeedNode(clusterName, address, proxyMode)); + } + return new Tuple<>(REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(clusterName).get(settings), nodes); + })); + } + + static DiscoveryNode buildSeedNode(String clusterName, String address, boolean proxyMode) { + if (proxyMode) { + TransportAddress transportAddress = new TransportAddress(TransportAddress.META_ADDRESS, 0); + String hostName = address.substring(0, indexOfPortSeparator(address)); + return new DiscoveryNode("", clusterName + "#" + address, UUIDs.randomBase64UUID(), hostName, address, + transportAddress, Collections + .emptyMap(), EnumSet.allOf(DiscoveryNode.Role.class), + Version.CURRENT.minimumCompatibilityVersion()); + } else { + TransportAddress transportAddress = new TransportAddress(RemoteClusterAware.parseSeedAddress(address)); + return new DiscoveryNode(clusterName + "#" + transportAddress.toString(), + transportAddress, + Version.CURRENT.minimumCompatibilityVersion()); + } } /** @@ -120,8 +201,8 @@ public abstract class RemoteClusterAware extends AbstractComponent { // remote_cluster_alias:index_name - for this case we fail the request. the user can easily change the cluster alias // if that happens throw new IllegalArgumentException("Can not filter indices; index " + index + - " exists but there is also a remote cluster named: " + remoteClusterName); - } + " exists but there is also a remote cluster named: " + remoteClusterName); + } String indexName = index.substring(i + 1); for (String clusterName : clusters) { perClusterIndices.computeIfAbsent(clusterName, k -> new ArrayList<>()).add(indexName); @@ -138,20 +219,30 @@ public abstract class RemoteClusterAware extends AbstractComponent { protected abstract Set getRemoteClusterNames(); + /** * Subclasses must implement this to receive information about updated cluster aliases. If the given address list is * empty the cluster alias is unregistered and should be removed. */ - protected abstract void updateRemoteCluster(String clusterAlias, List addresses); + protected abstract void updateRemoteCluster(String clusterAlias, List addresses, String proxy); /** * Registers this instance to listen to updates on the cluster settings. */ public void listenForUpdates(ClusterSettings clusterSettings) { - clusterSettings.addAffixUpdateConsumer(RemoteClusterAware.REMOTE_CLUSTERS_SEEDS, this::updateRemoteCluster, - (namespace, value) -> {}); + clusterSettings.addAffixUpdateConsumer( + RemoteClusterAware.REMOTE_CLUSTERS_PROXY, + RemoteClusterAware.REMOTE_CLUSTERS_SEEDS, + (key, value) -> updateRemoteCluster(key, value.v2(), value.v1()), + (namespace, value) -> {}); + clusterSettings.addAffixUpdateConsumer( + RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_PROXY, + RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_SEEDS, + (key, value) -> updateRemoteCluster(key, value.v2(), value.v1()), + (namespace, value) -> {}); } + protected static InetSocketAddress parseSeedAddress(String remoteHost) { String host = remoteHost.substring(0, indexOfPortSeparator(remoteHost)); InetAddress hostAddress; @@ -186,4 +277,5 @@ public abstract class RemoteClusterAware extends AbstractComponent { public static String buildRemoteIndexName(String clusterAlias, String indexName) { return clusterAlias != null ? clusterAlias + REMOTE_CLUSTER_INDEX_SEPARATOR + indexName : indexName; } + } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index 15cf7899dc0..6b190943465 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.transport; +import java.net.InetSocketAddress; import java.util.function.Supplier; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; @@ -35,6 +36,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; @@ -80,16 +82,18 @@ import java.util.stream.Collectors; final class RemoteClusterConnection extends AbstractComponent implements TransportConnectionListener, Closeable { private final TransportService transportService; + private final ConnectionManager connectionManager; private final ConnectionProfile remoteProfile; private final ConnectedNodes connectedNodes; private final String clusterAlias; private final int maxNumRemoteConnections; private final Predicate nodePredicate; + private final ThreadPool threadPool; + private volatile String proxyAddress; private volatile List> seedNodes; private volatile boolean skipUnavailable; private final ConnectHandler connectHandler; private SetOnce remoteClusterName = new SetOnce<>(); - private final ClusterName localClusterName; /** * Creates a new {@link RemoteClusterConnection} @@ -97,13 +101,21 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo * @param clusterAlias the configured alias of the cluster to connect to * @param seedNodes a list of seed nodes to discover eligible nodes from * @param transportService the local nodes transport service + * @param connectionManager the connection manager to use for this remote connection * @param maxNumRemoteConnections the maximum number of connections to the remote cluster * @param nodePredicate a predicate to filter eligible remote nodes to connect to */ RemoteClusterConnection(Settings settings, String clusterAlias, List> seedNodes, - TransportService transportService, int maxNumRemoteConnections, Predicate nodePredicate) { + TransportService transportService, ConnectionManager connectionManager, int maxNumRemoteConnections, + Predicate nodePredicate) { + this(settings, clusterAlias, seedNodes, transportService, connectionManager, maxNumRemoteConnections, nodePredicate, null); + } + + RemoteClusterConnection(Settings settings, String clusterAlias, List> seedNodes, + TransportService transportService, ConnectionManager connectionManager, int maxNumRemoteConnections, Predicate + nodePredicate, + String proxyAddress) { super(settings); - this.localClusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); this.transportService = transportService; this.maxNumRemoteConnections = maxNumRemoteConnections; this.nodePredicate = nodePredicate; @@ -122,14 +134,31 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo this.skipUnavailable = RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE .getConcreteSettingForNamespace(clusterAlias).get(settings); this.connectHandler = new ConnectHandler(); - transportService.addConnectionListener(this); + this.threadPool = transportService.threadPool; + this.connectionManager = connectionManager; + connectionManager.addListener(this); + // we register the transport service here as a listener to make sure we notify handlers on disconnect etc. + connectionManager.addListener(transportService); + this.proxyAddress = proxyAddress; + } + + private static DiscoveryNode maybeAddProxyAddress(String proxyAddress, DiscoveryNode node) { + if (proxyAddress == null || proxyAddress.isEmpty()) { + return node; + } else { + // resovle proxy address lazy here + InetSocketAddress proxyInetAddress = RemoteClusterAware.parseSeedAddress(proxyAddress); + return new DiscoveryNode(node.getName(), node.getId(), node.getEphemeralId(), node.getHostName(), node + .getHostAddress(), new TransportAddress(proxyInetAddress), node.getAttributes(), node.getRoles(), node.getVersion()); + } } /** * Updates the list of seed nodes for this cluster connection */ - synchronized void updateSeedNodes(List> seedNodes, ActionListener connectListener) { + synchronized void updateSeedNodes(String proxyAddress, List> seedNodes, ActionListener connectListener) { this.seedNodes = Collections.unmodifiableList(new ArrayList<>(seedNodes)); + this.proxyAddress = proxyAddress; connectHandler.connect(connectListener); } @@ -183,8 +212,9 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo private void fetchShardsInternal(ClusterSearchShardsRequest searchShardsRequest, final ActionListener listener) { - final DiscoveryNode node = connectedNodes.getAny(); - transportService.sendRequest(node, ClusterSearchShardsAction.NAME, searchShardsRequest, + final DiscoveryNode node = getAnyConnectedNode(); + Transport.Connection connection = connectionManager.getConnection(node); + transportService.sendRequest(connection, ClusterSearchShardsAction.NAME, searchShardsRequest, TransportRequestOptions.EMPTY, new TransportResponseHandler() { @Override @@ -219,12 +249,16 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo request.clear(); request.nodes(true); request.local(true); // run this on the node that gets the request it's as good as any other - final DiscoveryNode node = connectedNodes.getAny(); - transportService.sendRequest(node, ClusterStateAction.NAME, request, TransportRequestOptions.EMPTY, + final DiscoveryNode node = getAnyConnectedNode(); + Transport.Connection connection = connectionManager.getConnection(node); + transportService.sendRequest(connection, ClusterStateAction.NAME, request, TransportRequestOptions.EMPTY, new TransportResponseHandler() { + @Override - public ClusterStateResponse newInstance() { - return new ClusterStateResponse(); + public ClusterStateResponse read(StreamInput in) throws IOException { + ClusterStateResponse response = new ClusterStateResponse(); + response.readFrom(in); + return response; } @Override @@ -261,14 +295,15 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo * If such node is not connected, the returned connection will be a proxy connection that redirects to it. */ Transport.Connection getConnection(DiscoveryNode remoteClusterNode) { - if (transportService.nodeConnected(remoteClusterNode)) { - return transportService.getConnection(remoteClusterNode); + if (connectionManager.nodeConnected(remoteClusterNode)) { + return connectionManager.getConnection(remoteClusterNode); } - DiscoveryNode discoveryNode = connectedNodes.getAny(); - Transport.Connection connection = transportService.getConnection(discoveryNode); + DiscoveryNode discoveryNode = getAnyConnectedNode(); + Transport.Connection connection = connectionManager.getConnection(discoveryNode); return new ProxyConnection(connection, remoteClusterNode); } + static final class ProxyConnection implements Transport.Connection { private final Transport.Connection proxyConnection; private final DiscoveryNode targetNode; @@ -317,33 +352,18 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo } Transport.Connection getConnection() { - return transportService.getConnection(getAnyConnectedNode()); + return connectionManager.getConnection(getAnyConnectedNode()); } @Override public void close() throws IOException { - connectHandler.close(); + IOUtils.close(connectHandler, connectionManager); } public boolean isClosed() { return connectHandler.isClosed(); } - private ConnectionProfile getRemoteProfile(ClusterName name) { - // we can only compare the cluster name to make a decision if we should use a remote profile - // we can't use a cluster UUID here since we could be connecting to that remote cluster before - // the remote node has joined its cluster and have a cluster UUID. The fact that we just lose a - // rather smallish optimization on the connection layer under certain situations where remote clusters - // have the same name as the local one is minor here. - // the alternative here is to complicate the remote infrastructure to also wait until we formed a cluster, - // gained a cluster UUID and then start connecting etc. we rather use this simplification in order to maintain simplicity - if (this.localClusterName.equals(name)) { - return null; - } else { - return remoteProfile; - } - } - /** * The connect handler manages node discovery and the actual connect to the remote cluster. * There is at most one connect job running at any time. If such a connect job is triggered @@ -387,7 +407,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo final boolean runConnect; final Collection> toNotify; final ActionListener listener = connectListener == null ? null : - ContextPreservingActionListener.wrapPreservingContext(connectListener, transportService.getThreadPool().getThreadContext()); + ContextPreservingActionListener.wrapPreservingContext(connectListener, threadPool.getThreadContext()); synchronized (queue) { if (listener != null && queue.offer(listener) == false) { listener.onFailure(new RejectedExecutionException("connect queue is full")); @@ -415,7 +435,6 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo } private void forkConnect(final Collection> toNotify) { - ThreadPool threadPool = transportService.getThreadPool(); ExecutorService executor = threadPool.executor(ThreadPool.Names.MANAGEMENT); executor.submit(new AbstractRunnable() { @Override @@ -452,22 +471,22 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo maybeConnect(); } }); - collectRemoteNodes(seedNodes.iterator(), transportService, listener); + collectRemoteNodes(seedNodes.iterator(), transportService, connectionManager, listener); } }); } private void collectRemoteNodes(Iterator> seedNodes, - final TransportService transportService, ActionListener listener) { + final TransportService transportService, final ConnectionManager manager, ActionListener listener) { if (Thread.currentThread().isInterrupted()) { listener.onFailure(new InterruptedException("remote connect thread got interrupted")); } try { if (seedNodes.hasNext()) { cancellableThreads.executeIO(() -> { - final DiscoveryNode seedNode = seedNodes.next().get(); + final DiscoveryNode seedNode = maybeAddProxyAddress(proxyAddress, seedNodes.next().get()); final TransportService.HandshakeResponse handshakeResponse; - Transport.Connection connection = transportService.openConnection(seedNode, + Transport.Connection connection = manager.openConnection(seedNode, ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, null, null)); boolean success = false; try { @@ -480,9 +499,9 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo throw ex; } - final DiscoveryNode handshakeNode = handshakeResponse.getDiscoveryNode(); + final DiscoveryNode handshakeNode = maybeAddProxyAddress(proxyAddress, handshakeResponse.getDiscoveryNode()); if (nodePredicate.test(handshakeNode) && connectedNodes.size() < maxNumRemoteConnections) { - transportService.connectToNode(handshakeNode, getRemoteProfile(handshakeResponse.getClusterName())); + manager.connectToNode(handshakeNode, remoteProfile, transportService.connectionValidator(handshakeNode)); if (remoteClusterName.get() == null) { assert handshakeResponse.getClusterName().value() != null; remoteClusterName.set(handshakeResponse.getClusterName()); @@ -524,7 +543,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo // ISE if we fail the handshake with an version incompatible node if (seedNodes.hasNext()) { logger.debug(() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), ex); - collectRemoteNodes(seedNodes, transportService, listener); + collectRemoteNodes(seedNodes, transportService, manager, listener); } else { listener.onFailure(ex); } @@ -552,7 +571,6 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo /* This class handles the _state response from the remote cluster when sniffing nodes to connect to */ private class SniffClusterStateResponseHandler implements TransportResponseHandler { - private final TransportService transportService; private final Transport.Connection connection; private final ActionListener listener; private final Iterator> seedNodes; @@ -561,7 +579,6 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo SniffClusterStateResponseHandler(TransportService transportService, Transport.Connection connection, ActionListener listener, Iterator> seedNodes, CancellableThreads cancellableThreads) { - this.transportService = transportService; this.connection = connection; this.listener = listener; this.seedNodes = seedNodes; @@ -589,11 +606,12 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo cancellableThreads.executeIO(() -> { DiscoveryNodes nodes = response.getState().nodes(); Iterable nodesIter = nodes.getNodes()::valuesIt; - for (DiscoveryNode node : nodesIter) { + for (DiscoveryNode n : nodesIter) { + DiscoveryNode node = maybeAddProxyAddress(proxyAddress, n); if (nodePredicate.test(node) && connectedNodes.size() < maxNumRemoteConnections) { try { - transportService.connectToNode(node, getRemoteProfile(remoteClusterName.get())); // noop if node is - // connected + connectionManager.connectToNode(node, remoteProfile, + transportService.connectionValidator(node)); // noop if node is connected connectedNodes.add(node); } catch (ConnectTransportException | IllegalStateException ex) { // ISE if we fail the handshake with an version incompatible node @@ -609,7 +627,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo listener.onFailure(ex); // we got canceled - fail the listener and step out } catch (Exception ex) { logger.warn(() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), ex); - collectRemoteNodes(seedNodes, transportService, listener); + collectRemoteNodes(seedNodes, transportService, connectionManager, listener); } } @@ -620,7 +638,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo IOUtils.closeWhileHandlingException(connection); } finally { // once the connection is closed lets try the next node - collectRemoteNodes(seedNodes, transportService, listener); + collectRemoteNodes(seedNodes, transportService, connectionManager, listener); } } @@ -652,7 +670,8 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo * Get the information about remote nodes to be rendered on {@code _remote/info} requests. */ public RemoteConnectionInfo getConnectionInfo() { - List seedNodeAddresses = seedNodes.stream().map(node -> node.get().getAddress()).collect(Collectors.toList()); + List seedNodeAddresses = seedNodes.stream().map(node -> node.get().getAddress()).collect + (Collectors.toList()); TimeValue initialConnectionTimeout = RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings); return new RemoteConnectionInfo(clusterAlias, seedNodeAddresses, maxNumRemoteConnections, connectedNodes.size(), initialConnectionTimeout, skipUnavailable); @@ -715,4 +734,8 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo } } } + + ConnectionManager getConnectionManager() { + return connectionManager; + } } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 956a0d94179..0e8bd5cb28d 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -16,8 +16,10 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.transport; +import java.util.Collection; import java.util.function.Supplier; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -30,10 +32,10 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.core.internal.io.IOUtils; @@ -63,18 +65,39 @@ import static org.elasticsearch.common.settings.Setting.boolSetting; */ public final class RemoteClusterService extends RemoteClusterAware implements Closeable { + public static final Setting SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER = + Setting.intSetting("search.remote.connections_per_cluster", 3, 1, Setting.Property.NodeScope, Setting.Property.Deprecated); + /** * The maximum number of connections that will be established to a remote cluster. For instance if there is only a single * seed node, other nodes will be discovered up to the given number of nodes in this setting. The default is 3. */ - public static final Setting REMOTE_CONNECTIONS_PER_CLUSTER = Setting.intSetting("search.remote.connections_per_cluster", - 3, 1, Setting.Property.NodeScope); + public static final Setting REMOTE_CONNECTIONS_PER_CLUSTER = + Setting.intSetting( + "cluster.remote.connections_per_cluster", + SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER, // the default needs to three when fallback is removed + 1, + Setting.Property.NodeScope); + + public static final Setting SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING = + Setting.positiveTimeSetting( + "search.remote.initial_connect_timeout", + TimeValue.timeValueSeconds(30), + Setting.Property.NodeScope, + Setting.Property.Deprecated); /** * The initial connect timeout for remote cluster connections */ public static final Setting REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING = - Setting.positiveTimeSetting("search.remote.initial_connect_timeout", TimeValue.timeValueSeconds(30), Setting.Property.NodeScope); + Setting.positiveTimeSetting( + "cluster.remote.initial_connect_timeout", + SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING, // the default needs to be thirty seconds when fallback is removed + TimeValue.timeValueSeconds(30), + Setting.Property.NodeScope); + + public static final Setting SEARCH_REMOTE_NODE_ATTRIBUTE = + Setting.simpleString("search.remote.node.attr", Setting.Property.NodeScope, Setting.Property.Deprecated); /** * The name of a node attribute to select nodes that should be connected to in the remote cluster. @@ -82,20 +105,46 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl * clusters. In that case {@code search.remote.node.attr: gateway} can be used to filter out other nodes in the remote cluster. * The value of the setting is expected to be a boolean, {@code true} for nodes that can become gateways, {@code false} otherwise. */ - public static final Setting REMOTE_NODE_ATTRIBUTE = Setting.simpleString("search.remote.node.attr", - Setting.Property.NodeScope); + public static final Setting REMOTE_NODE_ATTRIBUTE = + Setting.simpleString( + "cluster.remote.node.attr", + SEARCH_REMOTE_NODE_ATTRIBUTE, // no default is needed when fallback is removed, use simple string which gives empty + Setting.Property.NodeScope); + + public static final Setting SEARCH_ENABLE_REMOTE_CLUSTERS = + Setting.boolSetting("search.remote.connect", true, Setting.Property.NodeScope, Setting.Property.Deprecated); /** * If true connecting to remote clusters is supported on this node. If false this node will not establish * connections to any remote clusters configured. Search requests executed against this node (where this node is the coordinating node) * will fail if remote cluster syntax is used as an index pattern. The default is true */ - public static final Setting ENABLE_REMOTE_CLUSTERS = Setting.boolSetting("search.remote.connect", true, - Setting.Property.NodeScope); + public static final Setting ENABLE_REMOTE_CLUSTERS = + Setting.boolSetting( + "cluster.remote.connect", + SEARCH_ENABLE_REMOTE_CLUSTERS, // the default needs to be true when fallback is removed + Setting.Property.NodeScope); + + public static final Setting.AffixSetting SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE = + Setting.affixKeySetting( + "search.remote.", + "skip_unavailable", + key -> boolSetting(key, false, Setting.Property.Deprecated, Setting.Property.Dynamic, Setting.Property.NodeScope), + REMOTE_CLUSTERS_SEEDS); public static final Setting.AffixSetting REMOTE_CLUSTER_SKIP_UNAVAILABLE = - Setting.affixKeySetting("search.remote.", "skip_unavailable", - key -> boolSetting(key, false, Setting.Property.NodeScope, Setting.Property.Dynamic), REMOTE_CLUSTERS_SEEDS); + Setting.affixKeySetting( + "cluster.remote.", + "skip_unavailable", + key -> boolSetting( + key, + // the default needs to be false when fallback is removed + "_na_".equals(key) + ? SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(key) + : SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSetting(key.replaceAll("^cluster", "search")), + Setting.Property.Dynamic, + Setting.Property.NodeScope), + REMOTE_CLUSTERS_SEEDS); private static final Predicate DEFAULT_NODE_PREDICATE = (node) -> Version.CURRENT.isCompatible(node.getVersion()) && (node.isMasterNode() == false || node.isDataNode() || node.isIngestNode()); @@ -115,8 +164,8 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl * @param seeds a cluster alias to discovery node mapping representing the remote clusters seeds nodes * @param connectionListener a listener invoked once every configured cluster has been connected to */ - private synchronized void updateRemoteClusters(Map>> seeds, - ActionListener connectionListener) { + private synchronized void updateRemoteClusters(Map>>> seeds, + ActionListener connectionListener) { if (seeds.containsKey(LOCAL_CLUSTER_GROUP_KEY)) { throw new IllegalArgumentException("remote clusters must not have the empty string as its key"); } @@ -126,9 +175,12 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl } else { CountDown countDown = new CountDown(seeds.size()); remoteClusters.putAll(this.remoteClusters); - for (Map.Entry>> entry : seeds.entrySet()) { + for (Map.Entry>>> entry : seeds.entrySet()) { + List> seedList = entry.getValue().v2(); + String proxyAddress = entry.getValue().v1(); + RemoteClusterConnection remote = this.remoteClusters.get(entry.getKey()); - if (entry.getValue().isEmpty()) { // with no seed nodes we just remove the connection + if (seedList.isEmpty()) { // with no seed nodes we just remove the connection try { IOUtils.close(remote); } catch (IOException e) { @@ -139,27 +191,28 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl } if (remote == null) { // this is a new cluster we have to add a new representation - remote = new RemoteClusterConnection(settings, entry.getKey(), entry.getValue(), transportService, numRemoteConnections, - getNodePredicate(settings)); + remote = new RemoteClusterConnection(settings, entry.getKey(), seedList, transportService, + new ConnectionManager(settings, transportService.transport, transportService.threadPool), numRemoteConnections, + getNodePredicate(settings), proxyAddress); remoteClusters.put(entry.getKey(), remote); } // now update the seed nodes no matter if it's new or already existing RemoteClusterConnection finalRemote = remote; - remote.updateSeedNodes(entry.getValue(), ActionListener.wrap( - response -> { - if (countDown.countDown()) { - connectionListener.onResponse(response); - } - }, - exception -> { - if (countDown.fastForward()) { - connectionListener.onFailure(exception); - } - if (finalRemote.isClosed() == false) { - logger.warn("failed to update seed list for cluster: " + entry.getKey(), exception); - } - })); + remote.updateSeedNodes(proxyAddress, seedList, ActionListener.wrap( + response -> { + if (countDown.countDown()) { + connectionListener.onResponse(response); + } + }, + exception -> { + if (countDown.fastForward()) { + connectionListener.onFailure(exception); + } + if (finalRemote.isClosed() == false) { + logger.warn("failed to update seed list for cluster: " + entry.getKey(), exception); + } + })); } } this.remoteClusters = Collections.unmodifiableMap(remoteClusters); @@ -193,7 +246,7 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl String clusterAlias = entry.getKey(); List originalIndices = entry.getValue(); originalIndicesMap.put(clusterAlias, - new OriginalIndices(originalIndices.toArray(new String[originalIndices.size()]), indicesOptions)); + new OriginalIndices(originalIndices.toArray(new String[originalIndices.size()]), indicesOptions)); } if (originalIndicesMap.containsKey(LOCAL_CLUSTER_GROUP_KEY) == false) { originalIndicesMap.put(LOCAL_CLUSTER_GROUP_KEY, new OriginalIndices(Strings.EMPTY_ARRAY, indicesOptions)); @@ -225,38 +278,38 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl } final String[] indices = entry.getValue().indices(); ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices) - .indicesOptions(indicesOptions).local(true).preference(preference) - .routing(routing); + .indicesOptions(indicesOptions).local(true).preference(preference) + .routing(routing); remoteClusterConnection.fetchSearchShards(searchShardsRequest, - new ActionListener() { - @Override - public void onResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) { - searchShardsResponses.put(clusterName, clusterSearchShardsResponse); - if (responsesCountDown.countDown()) { - RemoteTransportException exception = transportException.get(); - if (exception == null) { - listener.onResponse(searchShardsResponses); - } else { - listener.onFailure(transportException.get()); + new ActionListener() { + @Override + public void onResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) { + searchShardsResponses.put(clusterName, clusterSearchShardsResponse); + if (responsesCountDown.countDown()) { + RemoteTransportException exception = transportException.get(); + if (exception == null) { + listener.onResponse(searchShardsResponses); + } else { + listener.onFailure(transportException.get()); + } } } - } - @Override - public void onFailure(Exception e) { - RemoteTransportException exception = new RemoteTransportException("error while communicating with remote cluster [" - + clusterName + "]", e); - if (transportException.compareAndSet(null, exception) == false) { - exception = transportException.accumulateAndGet(exception, (previous, current) -> { - current.addSuppressed(previous); - return current; - }); + @Override + public void onFailure(Exception e) { + RemoteTransportException exception = + new RemoteTransportException("error while communicating with remote cluster [" + clusterName + "]", e); + if (transportException.compareAndSet(null, exception) == false) { + exception = transportException.accumulateAndGet(exception, (previous, current) -> { + current.addSuppressed(previous); + return current; + }); + } + if (responsesCountDown.countDown()) { + listener.onFailure(exception); + } } - if (responsesCountDown.countDown()) { - listener.onFailure(exception); - } - } - }); + }); } } @@ -300,8 +353,8 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl @Override public void listenForUpdates(ClusterSettings clusterSettings) { super.listenForUpdates(clusterSettings); - clusterSettings.addAffixUpdateConsumer(REMOTE_CLUSTER_SKIP_UNAVAILABLE, this::updateSkipUnavailable, - (clusterAlias, value) -> {}); + clusterSettings.addAffixUpdateConsumer(REMOTE_CLUSTER_SKIP_UNAVAILABLE, this::updateSkipUnavailable, (alias, value) -> {}); + clusterSettings.addAffixUpdateConsumer(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE, this::updateSkipUnavailable, (alias, value) -> {}); } synchronized void updateSkipUnavailable(String clusterAlias, Boolean skipUnavailable) { @@ -311,22 +364,21 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl } } + @Override - protected void updateRemoteCluster(String clusterAlias, List addresses) { - updateRemoteCluster(clusterAlias, addresses, ActionListener.wrap((x) -> {}, (x) -> {})); + protected void updateRemoteCluster(String clusterAlias, List addresses, String proxyAddress) { + updateRemoteCluster(clusterAlias, addresses, proxyAddress, ActionListener.wrap((x) -> {}, (x) -> {})); } void updateRemoteCluster( final String clusterAlias, final List addresses, + final String proxyAddress, final ActionListener connectionListener) { - final List> nodes = addresses.stream().>map(address -> () -> { - final TransportAddress transportAddress = new TransportAddress(RemoteClusterAware.parseSeedAddress(address)); - final String id = clusterAlias + "#" + transportAddress.toString(); - final Version version = Version.CURRENT.minimumCompatibilityVersion(); - return new DiscoveryNode(id, transportAddress, version); - }).collect(Collectors.toList()); - updateRemoteClusters(Collections.singletonMap(clusterAlias, nodes), connectionListener); + final List> nodes = addresses.stream().>map(address -> () -> + buildSeedNode(clusterAlias, address, Strings.hasLength(proxyAddress)) + ).collect(Collectors.toList()); + updateRemoteClusters(Collections.singletonMap(clusterAlias, new Tuple<>(proxyAddress, nodes)), connectionListener); } /** @@ -336,7 +388,7 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl void initializeRemoteClusters() { final TimeValue timeValue = REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings); final PlainActionFuture future = new PlainActionFuture<>(); - Map>> seeds = RemoteClusterAware.buildRemoteClustersSeeds(settings); + Map>>> seeds = RemoteClusterAware.buildRemoteClustersDynamicConfig(settings); updateRemoteClusters(seeds, future); try { future.get(timeValue.millis(), TimeUnit.MILLISECONDS); @@ -384,7 +436,7 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl } if (countDown.countDown()) { listener.onResponse((clusterAlias, nodeId) - -> clusterMap.getOrDefault(clusterAlias, nullFunction).apply(nodeId)); + -> clusterMap.getOrDefault(clusterAlias, nullFunction).apply(nodeId)); } } @@ -411,4 +463,9 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl } return new RemoteClusterAwareClient(settings, threadPool, transportService, clusterAlias); } + + Collection getConnections() { + return remoteClusters.values(); + } + } diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 6d4ab80a892..2552007463b 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -207,6 +207,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements private final MeanMetric transmittedBytesMetric = new MeanMetric(); private volatile Map requestHandlers = Collections.emptyMap(); private final ResponseHandlers responseHandlers = new ResponseHandlers(); + private final TransportLogger transportLogger; private final BytesReference pingMessage; public TcpTransport(String transportName, Settings settings, ThreadPool threadPool, BigArrays bigArrays, @@ -221,6 +222,8 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements this.compress = Transport.TRANSPORT_TCP_COMPRESS.get(settings); this.networkService = networkService; this.transportName = transportName; + this.transportLogger = new TransportLogger(settings); + final Settings defaultFeatures = DEFAULT_FEATURES_SETTING.get(settings); if (defaultFeatures == null) { this.features = new String[0]; @@ -441,7 +444,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements try { PlainActionFuture connectFuture = PlainActionFuture.newFuture(); connectionFutures.add(connectFuture); - TcpChannel channel = initiateChannel(node.getAddress().address(), connectFuture); + TcpChannel channel = initiateChannel(node, connectFuture); logger.trace(() -> new ParameterizedMessage("Tcp transport client channel opened: {}", channel)); channels.add(channel); } catch (Exception e) { @@ -788,7 +791,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements // in case we are able to return data, serialize the exception content and sent it back to the client if (channel.isOpen()) { BytesArray message = new BytesArray(e.getMessage().getBytes(StandardCharsets.UTF_8)); - final SendMetricListener closeChannel = new SendMetricListener(message.length()) { + final SendMetricListener listener = new SendMetricListener(message.length()) { @Override protected void innerInnerOnResponse(Void v) { CloseableChannel.closeChannel(channel); @@ -800,7 +803,14 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements CloseableChannel.closeChannel(channel); } }; - internalSendMessage(channel, message, closeChannel); + // We do not call internalSendMessage because we are not sending a message that is an + // elasticsearch binary message. We are just serializing an exception here. Not formatting it + // as an elasticsearch transport message. + try { + channel.sendMessage(message, listener); + } catch (Exception ex) { + listener.onFailure(ex); + } } } else { logger.warn(() -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e); @@ -841,12 +851,12 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements /** * Initiate a single tcp socket channel. * - * @param address address for the initiated connection + * @param node for the initiated connection * @param connectListener listener to be called when connection complete * @return the pending connection * @throws IOException if an I/O exception occurs while opening the channel */ - protected abstract TcpChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException; + protected abstract TcpChannel initiateChannel(DiscoveryNode node, ActionListener connectListener) throws IOException; /** * Called to tear down internal resources @@ -906,6 +916,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements * sends a message to the given channel, using the given callbacks. */ private void internalSendMessage(TcpChannel channel, BytesReference message, SendMetricListener listener) { + transportLogger.logOutboundMessage(channel, message); try { channel.sendMessage(message, listener); } catch (Exception ex) { @@ -1050,6 +1061,24 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements return new CompositeBytesReference(header, messageBody, zeroCopyBuffer); } + /** + * Handles inbound message that has been decoded. + * + * @param channel the channel the message if fomr + * @param message the message + */ + public void inboundMessage(TcpChannel channel, BytesReference message) { + try { + transportLogger.logInboundMessage(channel, message); + // Message length of 0 is a ping + if (message.length() != 0) { + messageReceived(message, channel); + } + } catch (Exception e) { + onException(channel, e); + } + } + /** * Consumes bytes that are available from network reads. This method returns the number of bytes consumed * in this call. @@ -1067,15 +1096,8 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements if (message == null) { return 0; - } else if (message.length() == 0) { - // This is a ping and should not be handled. - return BYTES_NEEDED_FOR_MESSAGE_SIZE; } else { - try { - messageReceived(message, channel); - } catch (Exception e) { - onException(channel, e); - } + inboundMessage(channel, message); return message.length() + BYTES_NEEDED_FOR_MESSAGE_SIZE; } } @@ -1091,7 +1113,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements * @throws IllegalArgumentException if the message length is greater that the maximum allowed frame size. * This is dependent on the available memory. */ - public static BytesReference decodeFrame(BytesReference networkBytes) throws IOException { + static BytesReference decodeFrame(BytesReference networkBytes) throws IOException { int messageLength = readMessageLength(networkBytes); if (messageLength == -1) { return null; diff --git a/server/src/main/java/org/elasticsearch/transport/TransportLogger.java b/server/src/main/java/org/elasticsearch/transport/TransportLogger.java new file mode 100644 index 00000000000..3120620b053 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/TransportLogger.java @@ -0,0 +1,122 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.Compressor; +import org.elasticsearch.common.compress.CompressorFactory; +import org.elasticsearch.common.compress.NotCompressedException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.internal.io.IOUtils; + +import java.io.IOException; + +public final class TransportLogger { + + private final Logger logger; + private static final int HEADER_SIZE = TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE; + + TransportLogger(Settings settings) { + logger = Loggers.getLogger(TransportLogger.class, settings); + } + + void logInboundMessage(TcpChannel channel, BytesReference message) { + if (logger.isTraceEnabled()) { + try { + String logMessage = format(channel, message, "READ"); + logger.trace(logMessage); + } catch (IOException e) { + logger.trace("an exception occurred formatting a READ trace message", e); + } + } + } + + void logOutboundMessage(TcpChannel channel, BytesReference message) { + if (logger.isTraceEnabled()) { + try { + BytesReference withoutHeader = message.slice(HEADER_SIZE, message.length() - HEADER_SIZE); + String logMessage = format(channel, withoutHeader, "WRITE"); + logger.trace(logMessage); + } catch (IOException e) { + logger.trace("an exception occurred formatting a WRITE trace message", e); + } + } + } + + private String format(TcpChannel channel, BytesReference message, String event) throws IOException { + final StringBuilder sb = new StringBuilder(); + sb.append(channel); + int messageLengthWithHeader = HEADER_SIZE + message.length(); + // This is a ping + if (message.length() == 0) { + sb.append(" [ping]").append(' ').append(event).append(": ").append(messageLengthWithHeader).append('B'); + } else { + boolean success = false; + StreamInput streamInput = message.streamInput(); + try { + final long requestId = streamInput.readLong(); + final byte status = streamInput.readByte(); + final boolean isRequest = TransportStatus.isRequest(status); + final String type = isRequest ? "request" : "response"; + final String version = Version.fromId(streamInput.readInt()).toString(); + sb.append(" [length: ").append(messageLengthWithHeader); + sb.append(", request id: ").append(requestId); + sb.append(", type: ").append(type); + sb.append(", version: ").append(version); + + if (isRequest) { + if (TransportStatus.isCompress(status)) { + Compressor compressor; + try { + final int bytesConsumed = TcpHeader.REQUEST_ID_SIZE + TcpHeader.STATUS_SIZE + TcpHeader.VERSION_ID_SIZE; + compressor = CompressorFactory.compressor(message.slice(bytesConsumed, message.length() - bytesConsumed)); + } catch (NotCompressedException ex) { + throw new IllegalStateException(ex); + } + streamInput = compressor.streamInput(streamInput); + } + + try (ThreadContext context = new ThreadContext(Settings.EMPTY)) { + context.readHeaders(streamInput); + } + // now we decode the features + if (streamInput.getVersion().onOrAfter(Version.V_6_3_0)) { + streamInput.readStringArray(); + } + sb.append(", action: ").append(streamInput.readString()); + } + sb.append(']'); + sb.append(' ').append(event).append(": ").append(messageLengthWithHeader).append('B'); + success = true; + } finally { + if (success) { + IOUtils.close(streamInput); + } else { + IOUtils.closeWhileHandlingException(streamInput); + } + } + } + return sb.toString(); + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index fb14ae96dbf..e37ea81211a 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -28,6 +28,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; @@ -56,6 +57,7 @@ import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.io.UncheckedIOException; import java.net.UnknownHostException; import java.util.Arrays; import java.util.Collections; @@ -268,8 +270,9 @@ public class TransportService extends AbstractLifecycleComponent implements Tran @Override protected void doStop() { try { - connectionManager.close(); - transport.stop(); + IOUtils.close(connectionManager, remoteClusterService, transport::stop); + } catch (IOException e) { + throw new UncheckedIOException(e); } finally { // in case the transport is not connected to our local node (thus cleaned on node disconnect) // make sure to clean any leftover on going handles @@ -306,7 +309,7 @@ public class TransportService extends AbstractLifecycleComponent implements Tran @Override protected void doClose() throws IOException { - IOUtils.close(remoteClusterService, transport); + transport.close(); } /** @@ -364,14 +367,18 @@ public class TransportService extends AbstractLifecycleComponent implements Tran if (isLocalNode(node)) { return; } + connectionManager.connectToNode(node, connectionProfile, connectionValidator(node)); + } - connectionManager.connectToNode(node, connectionProfile, (newConnection, actualProfile) -> { + public CheckedBiConsumer connectionValidator(DiscoveryNode node) { + return (newConnection, actualProfile) -> { // We don't validate cluster names to allow for CCS connections. final DiscoveryNode remote = handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true).discoveryNode; if (validateConnections && node.equals(remote) == false) { throw new ConnectTransportException(node, "handshake failed. unexpected remote node " + remote); } - }); + }; + } /** @@ -562,8 +569,12 @@ public class TransportService extends AbstractLifecycleComponent implements Tran final TransportRequest request, final TransportRequestOptions options, TransportResponseHandler handler) { - - asyncSender.sendRequest(connection, action, request, options, handler); + try { + asyncSender.sendRequest(connection, action, request, options, handler); + } catch (NodeNotConnectedException ex) { + // the caller might not handle this so we invoke the handler + handler.handleException(ex); + } } /** diff --git a/server/src/test/java/org/apache/lucene/grouping/CollapsingTopDocsCollectorTests.java b/server/src/test/java/org/apache/lucene/grouping/CollapsingTopDocsCollectorTests.java index bce5965e50b..50c80b8e435 100644 --- a/server/src/test/java/org/apache/lucene/grouping/CollapsingTopDocsCollectorTests.java +++ b/server/src/test/java/org/apache/lucene/grouping/CollapsingTopDocsCollectorTests.java @@ -28,23 +28,26 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.CheckHits; import org.apache.lucene.search.Collector; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.search.TopFieldCollector; import org.apache.lucene.search.TopFieldDocs; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.Weight; import org.apache.lucene.search.grouping.CollapseTopFieldDocs; import org.apache.lucene.search.grouping.CollapsingTopDocsCollector; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; -import org.apache.lucene.util.TestUtil; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -54,7 +57,6 @@ import java.util.HashSet; import java.util.List; import java.util.Set; -import static org.hamcrest.core.IsEqual.equalTo; public class CollapsingTopDocsCollectorTests extends ESTestCase { private static class SegmentSearcher extends IndexSearcher { @@ -84,15 +86,12 @@ public class CollapsingTopDocsCollectorTests extends ESTestCase { } > void assertSearchCollapse(CollapsingDocValuesProducer dvProducers, boolean numeric) throws IOException { - assertSearchCollapse(dvProducers, numeric, true, true); - assertSearchCollapse(dvProducers, numeric, true, false); - assertSearchCollapse(dvProducers, numeric, false, true); - assertSearchCollapse(dvProducers, numeric, false, false); + assertSearchCollapse(dvProducers, numeric, true); + assertSearchCollapse(dvProducers, numeric, false); } private > void assertSearchCollapse(CollapsingDocValuesProducer dvProducers, - boolean numeric, boolean multivalued, - boolean trackMaxScores) throws IOException { + boolean numeric, boolean multivalued) throws IOException { final int numDocs = randomIntBetween(1000, 2000); int maxGroup = randomIntBetween(2, 500); final Directory dir = newDirectory(); @@ -123,29 +122,25 @@ public class CollapsingTopDocsCollectorTests extends ESTestCase { final CollapsingTopDocsCollector collapsingCollector; if (numeric) { collapsingCollector = - CollapsingTopDocsCollector.createNumeric(collapseField.getField(), sort, expectedNumGroups, trackMaxScores); + CollapsingTopDocsCollector.createNumeric(collapseField.getField(), sort, expectedNumGroups); } else { collapsingCollector = - CollapsingTopDocsCollector.createKeyword(collapseField.getField(), sort, expectedNumGroups, trackMaxScores); + CollapsingTopDocsCollector.createKeyword(collapseField.getField(), sort, expectedNumGroups); } TopFieldCollector topFieldCollector = - TopFieldCollector.create(sort, totalHits, true, trackMaxScores, trackMaxScores, true); - - searcher.search(new MatchAllDocsQuery(), collapsingCollector); - searcher.search(new MatchAllDocsQuery(), topFieldCollector); + TopFieldCollector.create(sort, totalHits, Integer.MAX_VALUE); + Query query = new MatchAllDocsQuery(); + searcher.search(query, collapsingCollector); + searcher.search(query, topFieldCollector); CollapseTopFieldDocs collapseTopFieldDocs = collapsingCollector.getTopDocs(); TopFieldDocs topDocs = topFieldCollector.topDocs(); assertEquals(collapseField.getField(), collapseTopFieldDocs.field); assertEquals(expectedNumGroups, collapseTopFieldDocs.scoreDocs.length); - assertEquals(totalHits, collapseTopFieldDocs.totalHits); + assertEquals(totalHits, collapseTopFieldDocs.totalHits.value); + assertEquals(TotalHits.Relation.EQUAL_TO, collapseTopFieldDocs.totalHits.relation); assertEquals(totalHits, topDocs.scoreDocs.length); - assertEquals(totalHits, topDocs.totalHits); - if (trackMaxScores) { - assertThat(collapseTopFieldDocs.getMaxScore(), equalTo(topDocs.getMaxScore())); - } else { - assertThat(collapseTopFieldDocs.getMaxScore(), equalTo(Float.NaN)); - } + assertEquals(totalHits, topDocs.totalHits.value); Set seen = new HashSet<>(); // collapse field is the last sort @@ -170,7 +165,6 @@ public class CollapsingTopDocsCollectorTests extends ESTestCase { assertTrue(seen.contains(fieldDoc.fields[collapseIndex])); } - // check merge final IndexReaderContext ctx = searcher.getTopReaderContext(); final SegmentSearcher[] subSearchers; @@ -196,27 +190,27 @@ public class CollapsingTopDocsCollectorTests extends ESTestCase { } final CollapseTopFieldDocs[] shardHits = new CollapseTopFieldDocs[subSearchers.length]; - final Weight weight = searcher.createNormalizedWeight(new MatchAllDocsQuery(), true); + final Weight weight = searcher.createWeight(searcher.rewrite(new MatchAllDocsQuery()), ScoreMode.COMPLETE, 1f); for (int shardIDX = 0; shardIDX < subSearchers.length; shardIDX++) { final SegmentSearcher subSearcher = subSearchers[shardIDX]; final CollapsingTopDocsCollector c; if (numeric) { - c = CollapsingTopDocsCollector.createNumeric(collapseField.getField(), sort, expectedNumGroups, trackMaxScores); + c = CollapsingTopDocsCollector.createNumeric(collapseField.getField(), sort, expectedNumGroups); } else { - c = CollapsingTopDocsCollector.createKeyword(collapseField.getField(), sort, expectedNumGroups, trackMaxScores); + c = CollapsingTopDocsCollector.createKeyword(collapseField.getField(), sort, expectedNumGroups); } subSearcher.search(weight, c); shardHits[shardIDX] = c.getTopDocs(); } CollapseTopFieldDocs mergedFieldDocs = CollapseTopFieldDocs.merge(sort, 0, expectedNumGroups, shardHits, true); - assertTopDocsEquals(mergedFieldDocs, collapseTopFieldDocs); + assertTopDocsEquals(query, mergedFieldDocs, collapseTopFieldDocs); w.close(); reader.close(); dir.close(); } - private static void assertTopDocsEquals(CollapseTopFieldDocs topDocs1, CollapseTopFieldDocs topDocs2) { - TestUtil.assertEquals(topDocs1, topDocs2); + private static void assertTopDocsEquals(Query query, CollapseTopFieldDocs topDocs1, CollapseTopFieldDocs topDocs2) { + CheckHits.checkEqual(query, topDocs1.scoreDocs, topDocs2.scoreDocs); assertArrayEquals(topDocs1.collapseValues, topDocs2.collapseValues); } @@ -384,7 +378,7 @@ public class CollapsingTopDocsCollectorTests extends ESTestCase { sortField.setMissingValue(Long.MAX_VALUE); Sort sort = new Sort(sortField); final CollapsingTopDocsCollector collapsingCollector = - CollapsingTopDocsCollector.createNumeric("group", sort, 10, false); + CollapsingTopDocsCollector.createNumeric("group", sort, 10); searcher.search(new MatchAllDocsQuery(), collapsingCollector); CollapseTopFieldDocs collapseTopFieldDocs = collapsingCollector.getTopDocs(); assertEquals(4, collapseTopFieldDocs.scoreDocs.length); @@ -420,7 +414,7 @@ public class CollapsingTopDocsCollectorTests extends ESTestCase { final IndexSearcher searcher = newSearcher(reader); Sort sort = new Sort(new SortField("group", SortField.Type.STRING_VAL)); final CollapsingTopDocsCollector collapsingCollector = - CollapsingTopDocsCollector.createKeyword("group", sort, 10, false); + CollapsingTopDocsCollector.createKeyword("group", sort, 10); searcher.search(new MatchAllDocsQuery(), collapsingCollector); CollapseTopFieldDocs collapseTopFieldDocs = collapsingCollector.getTopDocs(); assertEquals(4, collapseTopFieldDocs.scoreDocs.length); diff --git a/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java b/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java index 8f96936e43b..5b37b4bf481 100644 --- a/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java +++ b/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.QueryUtils; import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.similarities.BM25Similarity; @@ -63,15 +64,12 @@ public class BlendedTermQueryTests extends ESTestCase { "generator", "foo fighers - generator", "foo fighters generator" }; final boolean omitNorms = random().nextBoolean(); + final boolean omitFreqs = random().nextBoolean(); FieldType ft = new FieldType(TextField.TYPE_NOT_STORED); - ft.setIndexOptions(random().nextBoolean() ? IndexOptions.DOCS : IndexOptions.DOCS_AND_FREQS); + ft.setIndexOptions(omitFreqs ? IndexOptions.DOCS : IndexOptions.DOCS_AND_FREQS); ft.setOmitNorms(omitNorms); ft.freeze(); - FieldType ft1 = new FieldType(TextField.TYPE_NOT_STORED); - ft1.setIndexOptions(random().nextBoolean() ? IndexOptions.DOCS : IndexOptions.DOCS_AND_FREQS); - ft1.setOmitNorms(omitNorms); - ft1.freeze(); for (int i = 0; i < username.length; i++) { Document d = new Document(); d.add(new TextField("id", Integer.toString(i), Field.Store.YES)); @@ -83,8 +81,8 @@ public class BlendedTermQueryTests extends ESTestCase { for (int j = 0; j < iters; j++) { Document d = new Document(); d.add(new TextField("id", Integer.toString(username.length + j), Field.Store.YES)); - d.add(new Field("username", "foo fighters", ft1)); - d.add(new Field("song", "some bogus text to bump up IDF", ft1)); + d.add(new Field("username", "foo fighters", ft)); + d.add(new Field("song", "some bogus text to bump up IDF", ft)); w.addDocument(d); } w.commit(); @@ -167,7 +165,7 @@ public class BlendedTermQueryTests extends ESTestCase { BlendedTermQuery blendedTermQuery = BlendedTermQuery.dismaxBlendedQuery(terms.toArray(new Term[0]), random().nextFloat()); Set extracted = new HashSet<>(); IndexSearcher searcher = new IndexSearcher(new MultiReader()); - searcher.createNormalizedWeight(blendedTermQuery, false).extractTerms(extracted); + searcher.createWeight(searcher.rewrite(blendedTermQuery), ScoreMode.COMPLETE_NO_SCORES, 1f).extractTerms(extracted); assertThat(extracted.size(), equalTo(terms.size())); assertThat(extracted, containsInAnyOrder(terms.toArray(new Term[0]))); } diff --git a/server/src/test/java/org/apache/lucene/queries/InetAddressRandomBinaryDocValuesRangeQueryTests.java b/server/src/test/java/org/apache/lucene/queries/InetAddressRandomBinaryDocValuesRangeQueryTests.java index 2def2702d38..ec468fd8d9b 100644 --- a/server/src/test/java/org/apache/lucene/queries/InetAddressRandomBinaryDocValuesRangeQueryTests.java +++ b/server/src/test/java/org/apache/lucene/queries/InetAddressRandomBinaryDocValuesRangeQueryTests.java @@ -19,7 +19,7 @@ package org.apache.lucene.queries; import org.apache.lucene.document.InetAddressPoint; -import org.apache.lucene.util.StringHelper; +import org.apache.lucene.util.FutureArrays; import org.elasticsearch.index.mapper.RangeFieldMapper; import java.net.InetAddress; @@ -44,7 +44,7 @@ public class InetAddressRandomBinaryDocValuesRangeQueryTests extends BaseRandomB byte[] bMin = InetAddressPoint.encode(min); InetAddress max = nextInetaddress(); byte[] bMax = InetAddressPoint.encode(max); - if (StringHelper.compare(bMin.length, bMin, 0, bMax, 0) > 0) { + if (FutureArrays.compareUnsigned(bMin, 0, bMin.length, bMax, 0, bMin.length) > 0) { return new IpRange(max, min); } return new IpRange(min, max); @@ -91,7 +91,7 @@ public class InetAddressRandomBinaryDocValuesRangeQueryTests extends BaseRandomB InetAddress v = (InetAddress)val; byte[] e = InetAddressPoint.encode(v); - if (StringHelper.compare(e.length, min, 0, e, 0) < 0) { + if (FutureArrays.compareUnsigned(min, 0, e.length, e, 0, e.length) < 0) { max = e; maxAddress = v; } else { @@ -111,7 +111,7 @@ public class InetAddressRandomBinaryDocValuesRangeQueryTests extends BaseRandomB InetAddress v = (InetAddress)val; byte[] e = InetAddressPoint.encode(v); - if (StringHelper.compare(e.length, max, 0, e, 0) > 0) { + if (FutureArrays.compareUnsigned(max, 0, e.length, e, 0, e.length) > 0) { min = e; minAddress = v; } else { @@ -123,22 +123,22 @@ public class InetAddressRandomBinaryDocValuesRangeQueryTests extends BaseRandomB @Override protected boolean isDisjoint(Range o) { IpRange other = (IpRange) o; - return StringHelper.compare(min.length, min, 0, other.max, 0) > 0 || - StringHelper.compare(max.length, max, 0, other.min, 0) < 0; + return FutureArrays.compareUnsigned(min, 0, min.length, other.max, 0, min.length) > 0 || + FutureArrays.compareUnsigned(max, 0, max.length, other.min, 0, max.length) < 0; } @Override protected boolean isWithin(Range o) { IpRange other = (IpRange)o; - return StringHelper.compare(min.length, min, 0, other.min, 0) >= 0 && - StringHelper.compare(max.length, max, 0, other.max, 0) <= 0; + return FutureArrays.compareUnsigned(min, 0, min.length, other.min, 0, min.length) >= 0 && + FutureArrays.compareUnsigned(max, 0, max.length, other.max, 0, max.length) <= 0; } @Override protected boolean contains(Range o) { IpRange other = (IpRange)o; - return StringHelper.compare(min.length, min, 0, other.min, 0) <= 0 && - StringHelper.compare(max.length, max, 0, other.max, 0) >= 0; + return FutureArrays.compareUnsigned(min, 0, min.length, other.min, 0, min.length) <= 0 && + FutureArrays.compareUnsigned(max, 0, max.length, other.max, 0, max.length) >= 0; } } diff --git a/server/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java b/server/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java index 796553034fb..a6e676006fd 100644 --- a/server/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java +++ b/server/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java @@ -74,7 +74,7 @@ public class CustomUnifiedHighlighterTests extends ESTestCase { IndexSearcher searcher = newSearcher(reader); iw.close(); TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), 1, Sort.INDEXORDER); - assertThat(topDocs.totalHits, equalTo(1L)); + assertThat(topDocs.totalHits.value, equalTo(1L)); String rawValue = Strings.arrayToDelimitedString(inputs, String.valueOf(MULTIVAL_SEP_CHAR)); CustomUnifiedHighlighter highlighter = new CustomUnifiedHighlighter(searcher, analyzer, null, new CustomPassageFormatter("", "", new DefaultEncoder()), locale, diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 669d1b33c07..b0c0814d1c0 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -42,8 +42,6 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.breaker.CircuitBreakingException; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -105,7 +103,6 @@ import java.nio.file.NotDirectoryException; import java.nio.file.Path; import java.nio.file.attribute.BasicFileAttributes; import java.util.Arrays; -import java.util.Base64; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -117,7 +114,6 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static java.util.Collections.singleton; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.instanceOf; public class ExceptionSerializationTests extends ESTestCase { @@ -874,89 +870,12 @@ public class ExceptionSerializationTests extends ESTestCase { public void testShardLockObtainFailedException() throws IOException { ShardId shardId = new ShardId("foo", "_na_", 1); ShardLockObtainFailedException orig = new ShardLockObtainFailedException(shardId, "boom"); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); - if (version.before(Version.V_5_0_2)) { - version = Version.V_5_0_2; - } + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); ShardLockObtainFailedException ex = serialize(orig, version); assertEquals(orig.getMessage(), ex.getMessage()); assertEquals(orig.getShardId(), ex.getShardId()); } - public void testBWCShardLockObtainFailedException() throws IOException { - ShardId shardId = new ShardId("foo", "_na_", 1); - ShardLockObtainFailedException orig = new ShardLockObtainFailedException(shardId, "boom"); - Exception ex = serialize((Exception)orig, randomFrom(Version.V_5_0_0, Version.V_5_0_1)); - assertThat(ex, instanceOf(NotSerializableExceptionWrapper.class)); - assertEquals("shard_lock_obtain_failed_exception: [foo][1]: boom", ex.getMessage()); - } - - public void testBWCHeadersAndMetadata() throws IOException { - //this is a request serialized with headers only, no metadata as they were added in 5.3.0 - BytesReference decoded = new BytesArray(Base64.getDecoder().decode - ("AQ10ZXN0ICBtZXNzYWdlACYtb3JnLmVsYXN0aWNzZWFyY2guRXhjZXB0aW9uU2VyaWFsaXphdGlvblRlc3RzASBFeGNlcHRpb25TZXJpYWxpemF0aW9uVG" + - "VzdHMuamF2YQR0ZXN03wYkc3VuLnJlZmxlY3QuTmF0aXZlTWV0aG9kQWNjZXNzb3JJbXBsAR1OYXRpdmVNZXRob2RBY2Nlc3NvckltcGwuamF2Y" + - "QdpbnZva2Uw/v///w8kc3VuLnJlZmxlY3QuTmF0aXZlTWV0aG9kQWNjZXNzb3JJbXBsAR1OYXRpdmVNZXRob2RBY2Nlc3NvckltcGwuamF2YQZp" + - "bnZva2U+KHN1bi5yZWZsZWN0LkRlbGVnYXRpbmdNZXRob2RBY2Nlc3NvckltcGwBIURlbGVnYXRpbmdNZXRob2RBY2Nlc3NvckltcGwuamF2YQZ" + - "pbnZva2UrGGphdmEubGFuZy5yZWZsZWN0Lk1ldGhvZAELTWV0aG9kLmphdmEGaW52b2tl8QMzY29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkdG" + - "VzdGluZy5SYW5kb21pemVkUnVubmVyARVSYW5kb21pemVkUnVubmVyLmphdmEGaW52b2tlsQ01Y29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkd" + - "GVzdGluZy5SYW5kb21pemVkUnVubmVyJDgBFVJhbmRvbWl6ZWRSdW5uZXIuamF2YQhldmFsdWF0ZYsHNWNvbS5jYXJyb3RzZWFyY2gucmFuZG9t" + - "aXplZHRlc3RpbmcuUmFuZG9taXplZFJ1bm5lciQ5ARVSYW5kb21pemVkUnVubmVyLmphdmEIZXZhbHVhdGWvBzZjb20uY2Fycm90c2VhcmNoLnJ" + - "hbmRvbWl6ZWR0ZXN0aW5nLlJhbmRvbWl6ZWRSdW5uZXIkMTABFVJhbmRvbWl6ZWRSdW5uZXIuamF2YQhldmFsdWF0Zb0HOWNvbS5jYXJyb3RzZW" + - "FyY2gucmFuZG9taXplZHRlc3RpbmcucnVsZXMuU3RhdGVtZW50QWRhcHRlcgEVU3RhdGVtZW50QWRhcHRlci5qYXZhCGV2YWx1YXRlJDVvcmcuY" + - "XBhY2hlLmx1Y2VuZS51dGlsLlRlc3RSdWxlU2V0dXBUZWFyZG93bkNoYWluZWQkMQEhVGVzdFJ1bGVTZXR1cFRlYXJkb3duQ2hhaW5lZC5qYXZh" + - "CGV2YWx1YXRlMTBvcmcuYXBhY2hlLmx1Y2VuZS51dGlsLkFic3RyYWN0QmVmb3JlQWZ0ZXJSdWxlJDEBHEFic3RyYWN0QmVmb3JlQWZ0ZXJSdWx" + - "lLmphdmEIZXZhbHVhdGUtMm9yZy5hcGFjaGUubHVjZW5lLnV0aWwuVGVzdFJ1bGVUaHJlYWRBbmRUZXN0TmFtZSQxAR5UZXN0UnVsZVRocmVhZE" + - "FuZFRlc3ROYW1lLmphdmEIZXZhbHVhdGUwN29yZy5hcGFjaGUubHVjZW5lLnV0aWwuVGVzdFJ1bGVJZ25vcmVBZnRlck1heEZhaWx1cmVzJDEBI" + - "1Rlc3RSdWxlSWdub3JlQWZ0ZXJNYXhGYWlsdXJlcy5qYXZhCGV2YWx1YXRlQCxvcmcuYXBhY2hlLmx1Y2VuZS51dGlsLlRlc3RSdWxlTWFya0Zh" + - "aWx1cmUkMQEYVGVzdFJ1bGVNYXJrRmFpbHVyZS5qYXZhCGV2YWx1YXRlLzljb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLnJ1bGV" + - "zLlN0YXRlbWVudEFkYXB0ZXIBFVN0YXRlbWVudEFkYXB0ZXIuamF2YQhldmFsdWF0ZSREY29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkdGVzdG" + - "luZy5UaHJlYWRMZWFrQ29udHJvbCRTdGF0ZW1lbnRSdW5uZXIBFlRocmVhZExlYWtDb250cm9sLmphdmEDcnVu7wI0Y29tLmNhcnJvdHNlYXJja" + - "C5yYW5kb21pemVkdGVzdGluZy5UaHJlYWRMZWFrQ29udHJvbAEWVGhyZWFkTGVha0NvbnRyb2wuamF2YRJmb3JrVGltZW91dGluZ1Rhc2urBjZj" + - "b20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLlRocmVhZExlYWtDb250cm9sJDMBFlRocmVhZExlYWtDb250cm9sLmphdmEIZXZhbHV" + - "hdGXOAzNjb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLlJhbmRvbWl6ZWRSdW5uZXIBFVJhbmRvbWl6ZWRSdW5uZXIuamF2YQ1ydW" + - "5TaW5nbGVUZXN0lAc1Y29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkdGVzdGluZy5SYW5kb21pemVkUnVubmVyJDUBFVJhbmRvbWl6ZWRSdW5uZ" + - "XIuamF2YQhldmFsdWF0ZaIGNWNvbS5jYXJyb3RzZWFyY2gucmFuZG9taXplZHRlc3RpbmcuUmFuZG9taXplZFJ1bm5lciQ2ARVSYW5kb21pemVk" + - "UnVubmVyLmphdmEIZXZhbHVhdGXUBjVjb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLlJhbmRvbWl6ZWRSdW5uZXIkNwEVUmFuZG9" + - "taXplZFJ1bm5lci5qYXZhCGV2YWx1YXRl3wYwb3JnLmFwYWNoZS5sdWNlbmUudXRpbC5BYnN0cmFjdEJlZm9yZUFmdGVyUnVsZSQxARxBYnN0cm" + - "FjdEJlZm9yZUFmdGVyUnVsZS5qYXZhCGV2YWx1YXRlLTljb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLnJ1bGVzLlN0YXRlbWVud" + - "EFkYXB0ZXIBFVN0YXRlbWVudEFkYXB0ZXIuamF2YQhldmFsdWF0ZSQvb3JnLmFwYWNoZS5sdWNlbmUudXRpbC5UZXN0UnVsZVN0b3JlQ2xhc3NO" + - "YW1lJDEBG1Rlc3RSdWxlU3RvcmVDbGFzc05hbWUuamF2YQhldmFsdWF0ZSlOY29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkdGVzdGluZy5ydWx" + - "lcy5Ob1NoYWRvd2luZ09yT3ZlcnJpZGVzT25NZXRob2RzUnVsZSQxAShOb1NoYWRvd2luZ09yT3ZlcnJpZGVzT25NZXRob2RzUnVsZS5qYXZhCG" + - "V2YWx1YXRlKE5jb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLnJ1bGVzLk5vU2hhZG93aW5nT3JPdmVycmlkZXNPbk1ldGhvZHNSd" + - "WxlJDEBKE5vU2hhZG93aW5nT3JPdmVycmlkZXNPbk1ldGhvZHNSdWxlLmphdmEIZXZhbHVhdGUoOWNvbS5jYXJyb3RzZWFyY2gucmFuZG9taXpl" + - "ZHRlc3RpbmcucnVsZXMuU3RhdGVtZW50QWRhcHRlcgEVU3RhdGVtZW50QWRhcHRlci5qYXZhCGV2YWx1YXRlJDljb20uY2Fycm90c2VhcmNoLnJ" + - "hbmRvbWl6ZWR0ZXN0aW5nLnJ1bGVzLlN0YXRlbWVudEFkYXB0ZXIBFVN0YXRlbWVudEFkYXB0ZXIuamF2YQhldmFsdWF0ZSQ5Y29tLmNhcnJvdH" + - "NlYXJjaC5yYW5kb21pemVkdGVzdGluZy5ydWxlcy5TdGF0ZW1lbnRBZGFwdGVyARVTdGF0ZW1lbnRBZGFwdGVyLmphdmEIZXZhbHVhdGUkM29yZ" + - "y5hcGFjaGUubHVjZW5lLnV0aWwuVGVzdFJ1bGVBc3NlcnRpb25zUmVxdWlyZWQkMQEfVGVzdFJ1bGVBc3NlcnRpb25zUmVxdWlyZWQuamF2YQhl" + - "dmFsdWF0ZTUsb3JnLmFwYWNoZS5sdWNlbmUudXRpbC5UZXN0UnVsZU1hcmtGYWlsdXJlJDEBGFRlc3RSdWxlTWFya0ZhaWx1cmUuamF2YQhldmF" + - "sdWF0ZS83b3JnLmFwYWNoZS5sdWNlbmUudXRpbC5UZXN0UnVsZUlnbm9yZUFmdGVyTWF4RmFpbHVyZXMkMQEjVGVzdFJ1bGVJZ25vcmVBZnRlck" + - "1heEZhaWx1cmVzLmphdmEIZXZhbHVhdGVAMW9yZy5hcGFjaGUubHVjZW5lLnV0aWwuVGVzdFJ1bGVJZ25vcmVUZXN0U3VpdGVzJDEBHVRlc3RSd" + - "WxlSWdub3JlVGVzdFN1aXRlcy5qYXZhCGV2YWx1YXRlNjljb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLnJ1bGVzLlN0YXRlbWVu" + - "dEFkYXB0ZXIBFVN0YXRlbWVudEFkYXB0ZXIuamF2YQhldmFsdWF0ZSREY29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkdGVzdGluZy5UaHJlYWR" + - "MZWFrQ29udHJvbCRTdGF0ZW1lbnRSdW5uZXIBFlRocmVhZExlYWtDb250cm9sLmphdmEDcnVu7wIQamF2YS5sYW5nLlRocmVhZAELVGhyZWFkLm" + - "phdmEDcnVu6QUABAdoZWFkZXIyAQZ2YWx1ZTIKZXMuaGVhZGVyMwEGdmFsdWUzB2hlYWRlcjEBBnZhbHVlMQplcy5oZWFkZXI0AQZ2YWx1ZTQAA" + - "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + - "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + - "AAAAA")); - - try (StreamInput in = decoded.streamInput()) { - //randomize the version across released and unreleased ones - Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2, - Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0); - in.setVersion(version); - ElasticsearchException exception = new ElasticsearchException(in); - assertEquals("test message", exception.getMessage()); - //the headers received as part of a single set get split based on their prefix - assertEquals(2, exception.getHeaderKeys().size()); - assertEquals("value1", exception.getHeader("header1").get(0)); - assertEquals("value2", exception.getHeader("header2").get(0)); - assertEquals(2, exception.getMetadataKeys().size()); - assertEquals("value3", exception.getMetadata("es.header3").get(0)); - assertEquals("value4", exception.getMetadata("es.header4").get(0)); - } - } - private static class UnknownException extends Exception { UnknownException(final String message, final Exception cause) { super(message, cause); diff --git a/server/src/test/java/org/elasticsearch/VersionTests.java b/server/src/test/java/org/elasticsearch/VersionTests.java index 74303bfb6d8..c0d29e86fd6 100644 --- a/server/src/test/java/org/elasticsearch/VersionTests.java +++ b/server/src/test/java/org/elasticsearch/VersionTests.java @@ -36,8 +36,8 @@ import java.util.Locale; import java.util.Map; import java.util.Set; -import static org.elasticsearch.Version.V_5_3_0; -import static org.elasticsearch.Version.V_6_0_0_beta1; +import static org.elasticsearch.Version.V_6_3_0; +import static org.elasticsearch.Version.V_7_0_0_alpha1; import static org.elasticsearch.test.VersionUtils.allVersions; import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.hamcrest.CoreMatchers.equalTo; @@ -50,30 +50,30 @@ import static org.hamcrest.Matchers.sameInstance; public class VersionTests extends ESTestCase { public void testVersionComparison() throws Exception { - assertThat(V_5_3_0.before(V_6_0_0_beta1), is(true)); - assertThat(V_5_3_0.before(V_5_3_0), is(false)); - assertThat(V_6_0_0_beta1.before(V_5_3_0), is(false)); + assertThat(V_6_3_0.before(V_7_0_0_alpha1), is(true)); + assertThat(V_6_3_0.before(V_6_3_0), is(false)); + assertThat(V_7_0_0_alpha1.before(V_6_3_0), is(false)); - assertThat(V_5_3_0.onOrBefore(V_6_0_0_beta1), is(true)); - assertThat(V_5_3_0.onOrBefore(V_5_3_0), is(true)); - assertThat(V_6_0_0_beta1.onOrBefore(V_5_3_0), is(false)); + assertThat(V_6_3_0.onOrBefore(V_7_0_0_alpha1), is(true)); + assertThat(V_6_3_0.onOrBefore(V_6_3_0), is(true)); + assertThat(V_7_0_0_alpha1.onOrBefore(V_6_3_0), is(false)); - assertThat(V_5_3_0.after(V_6_0_0_beta1), is(false)); - assertThat(V_5_3_0.after(V_5_3_0), is(false)); - assertThat(V_6_0_0_beta1.after(V_5_3_0), is(true)); + assertThat(V_6_3_0.after(V_7_0_0_alpha1), is(false)); + assertThat(V_6_3_0.after(V_6_3_0), is(false)); + assertThat(V_7_0_0_alpha1.after(V_6_3_0), is(true)); - assertThat(V_5_3_0.onOrAfter(V_6_0_0_beta1), is(false)); - assertThat(V_5_3_0.onOrAfter(V_5_3_0), is(true)); - assertThat(V_6_0_0_beta1.onOrAfter(V_5_3_0), is(true)); + assertThat(V_6_3_0.onOrAfter(V_7_0_0_alpha1), is(false)); + assertThat(V_6_3_0.onOrAfter(V_6_3_0), is(true)); + assertThat(V_7_0_0_alpha1.onOrAfter(V_6_3_0), is(true)); assertTrue(Version.fromString("5.0.0-alpha2").onOrAfter(Version.fromString("5.0.0-alpha1"))); assertTrue(Version.fromString("5.0.0").onOrAfter(Version.fromString("5.0.0-beta2"))); assertTrue(Version.fromString("5.0.0-rc1").onOrAfter(Version.fromString("5.0.0-beta24"))); assertTrue(Version.fromString("5.0.0-alpha24").before(Version.fromString("5.0.0-beta0"))); - assertThat(V_5_3_0, is(lessThan(V_6_0_0_beta1))); - assertThat(V_5_3_0.compareTo(V_5_3_0), is(0)); - assertThat(V_6_0_0_beta1, is(greaterThan(V_5_3_0))); + assertThat(V_6_3_0, is(lessThan(V_7_0_0_alpha1))); + assertThat(V_6_3_0.compareTo(V_6_3_0), is(0)); + assertThat(V_7_0_0_alpha1, is(greaterThan(V_6_3_0))); } public void testMin() { @@ -101,12 +101,12 @@ public class VersionTests extends ESTestCase { } public void testMinimumIndexCompatibilityVersion() { - assertEquals(Version.V_5_0_0, Version.V_6_0_0_beta1.minimumIndexCompatibilityVersion()); - assertEquals(Version.fromId(2000099), Version.V_5_0_0.minimumIndexCompatibilityVersion()); + assertEquals(Version.fromId(5000099), Version.V_6_0_0_beta1.minimumIndexCompatibilityVersion()); + assertEquals(Version.fromId(2000099), Version.fromId(5000099).minimumIndexCompatibilityVersion()); assertEquals(Version.fromId(2000099), - Version.V_5_1_1.minimumIndexCompatibilityVersion()); + Version.fromId(5010000).minimumIndexCompatibilityVersion()); assertEquals(Version.fromId(2000099), - Version.V_5_0_0_alpha1.minimumIndexCompatibilityVersion()); + Version.fromId(5000001).minimumIndexCompatibilityVersion()); } public void testVersionConstantPresent() { @@ -160,31 +160,38 @@ public class VersionTests extends ESTestCase { public void testIndexCreatedVersion() { // an actual index has a IndexMetaData.SETTING_INDEX_UUID - final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_2, - Version.V_5_2_0, Version.V_6_0_0_beta1); + final Version version = Version.V_6_0_0_beta1; assertEquals(version, Version.indexCreated(Settings.builder().put(IndexMetaData.SETTING_INDEX_UUID, "foo").put(IndexMetaData.SETTING_VERSION_CREATED, version).build())); } public void testMinCompatVersion() { - Version prerelease = VersionUtils.getFirstVersion(); - assertThat(prerelease.minimumCompatibilityVersion(), equalTo(prerelease)); Version major = Version.fromString("2.0.0"); assertThat(Version.fromString("2.0.0").minimumCompatibilityVersion(), equalTo(major)); assertThat(Version.fromString("2.2.0").minimumCompatibilityVersion(), equalTo(major)); assertThat(Version.fromString("2.3.0").minimumCompatibilityVersion(), equalTo(major)); - // from 6.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is + + Version major5x = Version.fromString("5.0.0"); + assertThat(Version.fromString("5.0.0").minimumCompatibilityVersion(), equalTo(major5x)); + assertThat(Version.fromString("5.2.0").minimumCompatibilityVersion(), equalTo(major5x)); + assertThat(Version.fromString("5.3.0").minimumCompatibilityVersion(), equalTo(major5x)); + + Version major56x = Version.fromString("5.6.0"); + assertThat(Version.V_6_5_0.minimumCompatibilityVersion(), equalTo(major56x)); + assertThat(Version.V_6_3_1.minimumCompatibilityVersion(), equalTo(major56x)); + + // from 7.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is // released since we need to bump the supported minor in Version#minimumCompatibilityVersion() - Version lastVersion = Version.V_5_6_0; // TODO: remove this once min compat version is a constant instead of method - assertEquals(lastVersion.major, Version.V_6_0_0_beta1.minimumCompatibilityVersion().major); + Version lastVersion = Version.V_6_5_0; // TODO: remove this once min compat version is a constant instead of method + assertEquals(lastVersion.major, Version.V_7_0_0_alpha1.minimumCompatibilityVersion().major); assertEquals("did you miss to bump the minor in Version#minimumCompatibilityVersion()", - lastVersion.minor, Version.V_6_0_0_beta1.minimumCompatibilityVersion().minor); - assertEquals(0, Version.V_6_0_0_beta1.minimumCompatibilityVersion().revision); + lastVersion.minor, Version.V_7_0_0_alpha1.minimumCompatibilityVersion().minor); + assertEquals(0, Version.V_7_0_0_alpha1.minimumCompatibilityVersion().revision); } public void testToString() { // with 2.0.beta we lowercase assertEquals("2.0.0-beta1", Version.fromString("2.0.0-beta1").toString()); - assertEquals("5.0.0-alpha1", Version.V_5_0_0_alpha1.toString()); + assertEquals("5.0.0-alpha1", Version.fromId(5000001).toString()); assertEquals("2.3.0", Version.fromString("2.3.0").toString()); assertEquals("0.90.0.Beta1", Version.fromString("0.90.0.Beta1").toString()); assertEquals("1.0.0.Beta1", Version.fromString("1.0.0.Beta1").toString()); @@ -201,9 +208,9 @@ public class VersionTests extends ESTestCase { public void testIsAlpha() { - assertTrue(new Version(5000001, org.apache.lucene.util.Version.LUCENE_6_0_0).isAlpha()); - assertFalse(new Version(4000002, org.apache.lucene.util.Version.LUCENE_6_0_0).isAlpha()); - assertTrue(new Version(4000002, org.apache.lucene.util.Version.LUCENE_6_0_0).isBeta()); + assertTrue(new Version(5000001, org.apache.lucene.util.Version.LUCENE_7_0_0).isAlpha()); + assertFalse(new Version(4000002, org.apache.lucene.util.Version.LUCENE_7_0_0).isAlpha()); + assertTrue(new Version(4000002, org.apache.lucene.util.Version.LUCENE_7_0_0).isBeta()); assertTrue(Version.fromString("5.0.0-alpha14").isAlpha()); assertEquals(5000014, Version.fromString("5.0.0-alpha14").id); assertTrue(Version.fromId(5000015).isAlpha()); @@ -219,7 +226,6 @@ public class VersionTests extends ESTestCase { } } - public void testParseVersion() { final int iters = scaledRandomIntBetween(100, 1000); for (int i = 0; i < iters; i++) { @@ -334,11 +340,11 @@ public class VersionTests extends ESTestCase { public void testIsCompatible() { assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion())); - assertTrue(isCompatible(Version.V_5_6_0, Version.V_6_0_0_alpha2)); - assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_0_0_alpha2)); - assertFalse(isCompatible(Version.fromId(2000099), Version.V_5_0_0)); - assertFalse(isCompatible(Version.fromString("6.0.0"), Version.fromString("7.0.0"))); - assertFalse(isCompatible(Version.fromString("6.0.0-alpha1"), Version.fromString("7.0.0"))); + assertTrue(isCompatible(Version.V_6_5_0, Version.V_7_0_0_alpha1)); + assertFalse(isCompatible(Version.fromId(2000099), Version.V_7_0_0_alpha1)); + assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_5_0)); + assertFalse(isCompatible(Version.fromString("7.0.0"), Version.fromString("8.0.0"))); + assertFalse(isCompatible(Version.fromString("7.0.0-alpha1"), Version.fromString("8.0.0"))); final Version currentMajorVersion = Version.fromId(Version.CURRENT.major * 1000000 + 99); final Version currentOrNextMajorVersion; @@ -373,8 +379,8 @@ public class VersionTests extends ESTestCase { isCompatible(VersionUtils.getPreviousMinorVersion(), currentOrNextMajorVersion), equalTo(isCompatible)); - assertFalse(isCompatible(Version.V_5_0_0, Version.fromString("6.0.0"))); - assertFalse(isCompatible(Version.V_5_0_0, Version.fromString("7.0.0"))); + assertFalse(isCompatible(Version.fromId(5000099), Version.fromString("6.0.0"))); + assertFalse(isCompatible(Version.fromId(5000099), Version.fromString("7.0.0"))); Version a = randomVersion(random()); Version b = randomVersion(random()); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java index 7bf43b828c0..3384efcf836 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -49,7 +49,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomLongBetween; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestTests.java index 232259948fb..5f5fe54321b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestTests.java @@ -54,7 +54,7 @@ public class ClusterSearchShardsRequestTests extends ESTestCase { request.routing(routings); } - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); try (BytesStreamOutput out = new BytesStreamOutput()) { out.setVersion(version); request.writeTo(out); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java index 90eb7cdcfd4..f685be02141 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java @@ -77,7 +77,7 @@ public class ClusterSearchShardsResponseTests extends ESTestCase { List entries = new ArrayList<>(); entries.addAll(searchModule.getNamedWriteables()); NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(entries); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); try(BytesStreamOutput out = new BytesStreamOutput()) { out.setVersion(version); clusterSearchShardsResponse.writeTo(out); @@ -93,11 +93,7 @@ public class ClusterSearchShardsResponseTests extends ESTestCase { assertEquals(clusterSearchShardsGroup.getShardId(), deserializedGroup.getShardId()); assertArrayEquals(clusterSearchShardsGroup.getShards(), deserializedGroup.getShards()); } - if (version.onOrAfter(Version.V_5_1_1)) { - assertEquals(clusterSearchShardsResponse.getIndicesAndFilters(), deserialized.getIndicesAndFilters()); - } else { - assertNull(deserialized.getIndicesAndFilters()); - } + assertEquals(clusterSearchShardsResponse.getIndicesAndFilters(), deserialized.getIndicesAndFilters()); } } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java index 2ca71fabbc7..821c75c2ed7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java @@ -20,8 +20,11 @@ package org.elasticsearch.action.admin.cluster.storedscripts; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.script.StoredScriptSource; import org.elasticsearch.test.ESTestCase; @@ -48,4 +51,30 @@ public class PutStoredScriptRequestTests extends ESTestCase { } } } + + public void testToXContent() throws IOException { + XContentType xContentType = randomFrom(XContentType.values()); + XContentBuilder builder = XContentBuilder.builder(xContentType.xContent()); + builder.startObject(); + builder.startObject("script") + .field("lang", "painless") + .field("source", "Math.log(_score * 2) + params.multiplier") + .endObject(); + builder.endObject(); + + BytesReference expectedRequestBody = BytesReference.bytes(builder); + + PutStoredScriptRequest request = new PutStoredScriptRequest(); + request.id("test1"); + request.content(expectedRequestBody, xContentType); + + XContentBuilder requestBuilder = XContentBuilder.builder(xContentType.xContent()); + requestBuilder.startObject(); + request.toXContent(requestBuilder, ToXContent.EMPTY_PARAMS); + requestBuilder.endObject(); + + BytesReference actualRequestBody = BytesReference.bytes(requestBuilder); + + assertEquals(expectedRequestBody, actualRequestBody); + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java index 0cb0063727f..c0685d5d17d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java @@ -19,10 +19,7 @@ package org.elasticsearch.action.admin.indices.create; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.AbstractStreamableXContentTestCase; @@ -67,25 +64,6 @@ public class CreateIndexResponseTests extends AbstractStreamableXContentTestCase return CreateIndexResponse.fromXContent(parser); } - public void testSerializationWithOldVersion() throws IOException { - Version oldVersion = Version.V_5_4_0; - CreateIndexResponse response = new CreateIndexResponse(true, true, "foo"); - - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(oldVersion); - response.writeTo(output); - - try (StreamInput in = output.bytes().streamInput()) { - in.setVersion(oldVersion); - CreateIndexResponse serialized = new CreateIndexResponse(); - serialized.readFrom(in); - assertEquals(response.isShardsAcknowledged(), serialized.isShardsAcknowledged()); - assertEquals(response.isAcknowledged(), serialized.isAcknowledged()); - assertNull(serialized.index()); - } - } - } - public void testToXContent() { CreateIndexResponse response = new CreateIndexResponse(true, false, "index_name"); String output = Strings.toString(response); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index 0c3cc7e4b15..b9624e3073f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -59,13 +59,10 @@ import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; import java.util.Arrays; -import java.util.Collection; import java.util.List; import java.util.Map; import java.util.stream.IntStream; @@ -79,8 +76,8 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class ShrinkIndexIT extends ESIntegTestCase { @Override - protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class); + protected boolean forbidPrivateIndexSettings() { + return false; } public void testCreateShrinkIndexToN() { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index a0fd40a649e..4510956358f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -54,15 +54,12 @@ import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.io.UncheckedIOException; import java.util.Arrays; -import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -83,8 +80,8 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class SplitIndexIT extends ESIntegTestCase { @Override - protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class); + protected boolean forbidPrivateIndexSettings() { + return false; } public void testCreateSplitIndexToN() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java index b67c2e2954d..9bf4d9d32f6 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java @@ -40,9 +40,11 @@ import org.elasticsearch.transport.TransportService; import org.junit.After; import org.junit.Before; -import java.util.Collections; import java.util.concurrent.TimeUnit; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptySet; + public class GetIndexActionTests extends ESSingleNodeTestCase { private TransportService transportService; @@ -58,14 +60,14 @@ public class GetIndexActionTests extends ESSingleNodeTestCase { public void setUp() throws Exception { super.setUp(); - settingsFilter = new SettingsModule(Settings.EMPTY, Collections.emptyList(), Collections.emptyList()).getSettingsFilter(); + settingsFilter = new SettingsModule(Settings.EMPTY, emptyList(), emptyList(), emptySet()).getSettingsFilter(); threadPool = new TestThreadPool("GetIndexActionTests"); clusterService = getInstanceFromNode(ClusterService.class); indicesService = getInstanceFromNode(IndicesService.class); CapturingTransport capturingTransport = new CapturingTransport(); transportService = capturingTransport.createCapturingTransportService(clusterService.getSettings(), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - boundAddress -> clusterService.localNode(), null, Collections.emptySet()); + boundAddress -> clusterService.localNode(), null, emptySet()); transportService.start(); transportService.acceptIncomingRequests(); getIndexAction = new GetIndexActionTests.TestTransportGetIndexAction(); @@ -106,7 +108,7 @@ public class GetIndexActionTests extends ESSingleNodeTestCase { TestTransportGetIndexAction() { super(Settings.EMPTY, GetIndexActionTests.this.transportService, GetIndexActionTests.this.clusterService, - GetIndexActionTests.this.threadPool, settingsFilter, new ActionFilters(Collections.emptySet()), + GetIndexActionTests.this.threadPool, settingsFilter, new ActionFilters(emptySet()), new GetIndexActionTests.Resolver(Settings.EMPTY), indicesService, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java index 86c2b67be9c..5243ffd33b3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java @@ -19,20 +19,14 @@ package org.elasticsearch.action.admin.indices.mapping.put; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.common.xcontent.yaml.YamlXContent; import org.elasticsearch.index.Index; import org.elasticsearch.index.RandomCreateIndexGenerator; import org.elasticsearch.test.ESTestCase; @@ -87,27 +81,6 @@ public class PutMappingRequestTests extends ESTestCase { assertEquals("mapping source must be pairs of fieldnames and properties definition.", e.getMessage()); } - public void testPutMappingRequestSerialization() throws IOException { - PutMappingRequest request = new PutMappingRequest("foo"); - String mapping = Strings.toString(YamlXContent.contentBuilder().startObject().field("foo", "bar").endObject()); - request.source(mapping, XContentType.YAML); - assertEquals(XContentHelper.convertToJson(new BytesArray(mapping), false, XContentType.YAML), request.source()); - - final Version version = randomFrom(Version.CURRENT, Version.V_5_3_0, Version.V_5_3_1, Version.V_5_3_2, Version.V_5_4_0); - try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { - bytesStreamOutput.setVersion(version); - request.writeTo(bytesStreamOutput); - try (StreamInput in = StreamInput.wrap(bytesStreamOutput.bytes().toBytesRef().bytes)) { - in.setVersion(version); - PutMappingRequest serialized = new PutMappingRequest(); - serialized.readFrom(in); - - String source = serialized.source(); - assertEquals(XContentHelper.convertToJson(new BytesArray(mapping), false, XContentType.YAML), source); - } - } - } - public void testToXContent() throws IOException { PutMappingRequest request = new PutMappingRequest("foo"); request.type("my_type"); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponseTests.java new file mode 100644 index 00000000000..1d63db7585e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponseTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.segments; + +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.engine.Segment; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +public class IndicesSegmentResponseTests extends ESTestCase { + + public void testToXContentSerialiationWithSortedFields() throws Exception { + ShardRouting shardRouting = TestShardRouting.newShardRouting("foo", 0, "node_id", true, ShardRoutingState.STARTED); + Segment segment = new Segment("my"); + + SortField sortField = new SortField("foo", SortField.Type.STRING); + sortField.setMissingValue(SortField.STRING_LAST); + segment.segmentSort = new Sort(sortField); + + ShardSegments shardSegments = new ShardSegments(shardRouting, Collections.singletonList(segment)); + IndicesSegmentResponse response = + new IndicesSegmentResponse(new ShardSegments[] { shardSegments }, 1, 1, 0, Collections.emptyList()); + try (XContentBuilder builder = jsonBuilder()) { + response.toXContent(builder, ToXContent.EMPTY_PARAMS); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsActionTests.java index 03ccebba10d..85b85cf9e14 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsActionTests.java @@ -42,6 +42,8 @@ import org.junit.Before; import java.util.Collections; import java.util.concurrent.TimeUnit; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptySet; import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; public class GetSettingsActionTests extends ESTestCase { @@ -71,7 +73,7 @@ public class GetSettingsActionTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); - settingsFilter = new SettingsModule(Settings.EMPTY, Collections.emptyList(), Collections.emptyList()).getSettingsFilter(); + settingsFilter = new SettingsModule(Settings.EMPTY, emptyList(), emptyList(), emptySet()).getSettingsFilter(); threadPool = new TestThreadPool("GetSettingsActionTests"); clusterService = createClusterService(threadPool); CapturingTransport capturingTransport = new CapturingTransport(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java index bd43182f007..ce60b14b3ef 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java @@ -47,6 +47,7 @@ import java.util.Collections; import java.util.HashSet; import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.equalTo; public class TransportResizeActionTests extends ESTestCase { @@ -92,6 +93,16 @@ public class TransportResizeActionTests extends ESTestCase { ).getMessage().startsWith("Can't merge index with more than [2147483519] docs - too many documents in shards ")); + IllegalArgumentException softDeletesError = expectThrows(IllegalArgumentException.class, () -> { + ResizeRequest req = new ResizeRequest("target", "source"); + req.getTargetIndexRequest().settings(Settings.builder().put("index.soft_deletes.enabled", false)); + ClusterState clusterState = createClusterState("source", 8, 1, + Settings.builder().put("index.blocks.write", true).put("index.soft_deletes.enabled", true).build()); + TransportResizeAction.prepareCreateIndexRequest(req, clusterState, + (i) -> new DocsStats(between(10, 1000), between(1, 10), between(1, 10000)), "source", "target"); + }); + assertThat(softDeletesError.getMessage(), equalTo("Can't disable [index.soft_deletes.enabled] setting on resize")); + // create one that won't fail ClusterState clusterState = ClusterState.builder(createClusterState("source", randomIntBetween(2, 10), 0, Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java index f0e9a57f7f3..892721f8a5c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java @@ -69,7 +69,7 @@ public class MetaDataIndexTemplateServiceTests extends ESSingleNodeTestCase { containsString("Failed to parse value [0] for setting [index.number_of_shards] must be >= 1")); assertThat(throwables.get(0).getMessage(), containsString("unknown value for [index.shard.check_on_startup] " + - "must be one of [true, false, fix, checksum] but was: blargh")); + "must be one of [true, false, checksum] but was: blargh")); } public void testIndexTemplateValidationAccumulatesValidationErrors() { @@ -178,7 +178,11 @@ public class MetaDataIndexTemplateServiceTests extends ESSingleNodeTestCase { null, null, null, - null, null, null, xContentRegistry); + null, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + null, + xContentRegistry, + true); MetaDataIndexTemplateService service = new MetaDataIndexTemplateService(Settings.EMPTY, null, createIndexService, new AliasValidator(Settings.EMPTY), null, new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS), xContentRegistry); @@ -202,15 +206,16 @@ public class MetaDataIndexTemplateServiceTests extends ESSingleNodeTestCase { IndicesService indicesService = getInstanceFromNode(IndicesService.class); ClusterService clusterService = getInstanceFromNode(ClusterService.class); MetaDataCreateIndexService createIndexService = new MetaDataCreateIndexService( - Settings.EMPTY, - clusterService, - indicesService, - null, - null, - null, - null, - null, - xContentRegistry()); + Settings.EMPTY, + clusterService, + indicesService, + null, + null, + null, + null, + null, + xContentRegistry(), + true); MetaDataIndexTemplateService service = new MetaDataIndexTemplateService( Settings.EMPTY, clusterService, createIndexService, new AliasValidator(Settings.EMPTY), indicesService, new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS), xContentRegistry()); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java index c21e6b3c225..2d037d7c024 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java @@ -18,25 +18,16 @@ */ package org.elasticsearch.action.admin.indices.template.put; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.alias.Alias; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.common.xcontent.yaml.YamlXContent; import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; import java.io.UncheckedIOException; import java.util.Arrays; -import java.util.Base64; import java.util.Collections; import static org.hamcrest.Matchers.containsString; @@ -46,81 +37,6 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.Is.is; public class PutIndexTemplateRequestTests extends AbstractXContentTestCase { - - // bwc for #21009 - public void testPutIndexTemplateRequest510() throws IOException { - PutIndexTemplateRequest putRequest = new PutIndexTemplateRequest("test"); - putRequest.patterns(Collections.singletonList("test*")); - putRequest.order(5); - - PutIndexTemplateRequest multiPatternRequest = new PutIndexTemplateRequest("test"); - multiPatternRequest.patterns(Arrays.asList("test*", "*test2", "*test3*")); - multiPatternRequest.order(5); - - // These bytes were retrieved by Base64 encoding the result of the above with 5_0_0 code. - // Note: Instead of a list for the template, in 5_0_0 the element was provided as a string. - String putRequestBytes = "ADwDAAR0ZXN0BXRlc3QqAAAABQAAAAAAAA=="; - BytesArray bytes = new BytesArray(Base64.getDecoder().decode(putRequestBytes)); - - try (StreamInput in = bytes.streamInput()) { - in.setVersion(Version.V_5_0_0); - PutIndexTemplateRequest readRequest = new PutIndexTemplateRequest(); - readRequest.readFrom(in); - assertEquals(putRequest.patterns(), readRequest.patterns()); - assertEquals(putRequest.order(), readRequest.order()); - - BytesStreamOutput output = new BytesStreamOutput(); - output.setVersion(Version.V_5_0_0); - readRequest.writeTo(output); - assertEquals(bytes.toBytesRef(), output.bytes().toBytesRef()); - - // test that multi templates are reverse-compatible. - // for the bwc case, if multiple patterns, use only the first pattern seen. - output.reset(); - multiPatternRequest.writeTo(output); - assertEquals(bytes.toBytesRef(), output.bytes().toBytesRef()); - } - } - - public void testPutIndexTemplateRequestSerializationXContent() throws IOException { - PutIndexTemplateRequest request = new PutIndexTemplateRequest("foo"); - String mapping = Strings.toString(YamlXContent.contentBuilder().startObject().field("foo", "bar").endObject()); - request.patterns(Collections.singletonList("foo")); - request.mapping("bar", mapping, XContentType.YAML); - assertNotEquals(mapping, request.mappings().get("bar")); - assertEquals(XContentHelper.convertToJson(new BytesArray(mapping), false, XContentType.YAML), request.mappings().get("bar")); - - final Version version = randomFrom(Version.CURRENT, Version.V_5_3_0, Version.V_5_3_1, Version.V_5_3_2, Version.V_5_4_0); - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(version); - request.writeTo(out); - - try (StreamInput in = StreamInput.wrap(out.bytes().toBytesRef().bytes)) { - in.setVersion(version); - PutIndexTemplateRequest serialized = new PutIndexTemplateRequest(); - serialized.readFrom(in); - assertEquals(XContentHelper.convertToJson(new BytesArray(mapping), false, XContentType.YAML), - serialized.mappings().get("bar")); - } - } - } - - public void testPutIndexTemplateRequestSerializationXContentBwc() throws IOException { - final byte[] data = Base64.getDecoder().decode("ADwDAANmb28IdGVtcGxhdGUAAAAAAAABA2Jhcg8tLS0KZm9vOiAiYmFyIgoAAAAAAAAAAAAAAAA="); - final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2, - Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0); - try (StreamInput in = StreamInput.wrap(data)) { - in.setVersion(version); - PutIndexTemplateRequest request = new PutIndexTemplateRequest(); - request.readFrom(in); - String mapping = Strings.toString(YamlXContent.contentBuilder().startObject().field("foo", "bar").endObject()); - assertNotEquals(mapping, request.mappings().get("bar")); - assertEquals(XContentHelper.convertToJson(new BytesArray(mapping), false, XContentType.YAML), request.mappings().get("bar")); - assertEquals("foo", request.name()); - assertEquals("template", request.patterns().get(0)); - } - } - public void testValidateErrorMessage() throws Exception { PutIndexTemplateRequest request = new PutIndexTemplateRequest(); ActionRequestValidationException withoutNameAndPattern = request.validate(); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index 4c0dacc8a6e..7fdb12ff135 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -45,7 +45,6 @@ import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.ingest.IngestService; -import org.elasticsearch.ingest.PipelineExecutionService; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -90,9 +89,6 @@ public class TransportBulkActionIngestTests extends ESTestCase { ClusterService clusterService; IngestService ingestService; - /** The ingest execution service we can capture calls to */ - PipelineExecutionService executionService; - /** Arguments to callbacks we want to capture, but which require generics, so we must use @Captor */ @Captor ArgumentCaptor> failureHandler; @@ -207,8 +203,6 @@ public class TransportBulkActionIngestTests extends ESTestCase { }).when(clusterService).addStateApplier(any(ClusterStateApplier.class)); // setup the mocked ingest service for capturing calls ingestService = mock(IngestService.class); - executionService = mock(PipelineExecutionService.class); - when(ingestService.getPipelineExecutionService()).thenReturn(executionService); action = new TestTransportBulkAction(); singleItemBulkWriteAction = new TestSingleItemBulkWriteAction(action); reset(transportService); // call on construction of action @@ -265,7 +259,7 @@ public class TransportBulkActionIngestTests extends ESTestCase { assertFalse(action.isExecuted); // haven't executed yet assertFalse(responseCalled.get()); assertFalse(failureCalled.get()); - verify(executionService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture()); + verify(ingestService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture(), any()); completionHandler.getValue().accept(exception); assertTrue(failureCalled.get()); @@ -299,7 +293,7 @@ public class TransportBulkActionIngestTests extends ESTestCase { assertFalse(action.isExecuted); // haven't executed yet assertFalse(responseCalled.get()); assertFalse(failureCalled.get()); - verify(executionService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture()); + verify(ingestService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture(), any()); completionHandler.getValue().accept(exception); assertTrue(failureCalled.get()); @@ -331,7 +325,7 @@ public class TransportBulkActionIngestTests extends ESTestCase { action.execute(null, bulkRequest, listener); // should not have executed ingest locally - verify(executionService, never()).executeBulkRequest(any(), any(), any()); + verify(ingestService, never()).executeBulkRequest(any(), any(), any(), any()); // but instead should have sent to a remote node with the transport service ArgumentCaptor node = ArgumentCaptor.forClass(DiscoveryNode.class); verify(transportService).sendRequest(node.capture(), eq(BulkAction.NAME), any(), remoteResponseHandler.capture()); @@ -375,7 +369,7 @@ public class TransportBulkActionIngestTests extends ESTestCase { singleItemBulkWriteAction.execute(null, indexRequest, listener); // should not have executed ingest locally - verify(executionService, never()).executeBulkRequest(any(), any(), any()); + verify(ingestService, never()).executeBulkRequest(any(), any(), any(), any()); // but instead should have sent to a remote node with the transport service ArgumentCaptor node = ArgumentCaptor.forClass(DiscoveryNode.class); verify(transportService).sendRequest(node.capture(), eq(BulkAction.NAME), any(), remoteResponseHandler.capture()); @@ -423,7 +417,7 @@ public class TransportBulkActionIngestTests extends ESTestCase { assertFalse(action.isExecuted); // haven't executed yet assertFalse(responseCalled.get()); assertFalse(failureCalled.get()); - verify(executionService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture()); + verify(ingestService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture(), any()); completionHandler.getValue().accept(exception); assertTrue(failureCalled.get()); @@ -455,7 +449,7 @@ public class TransportBulkActionIngestTests extends ESTestCase { assertFalse(action.isExecuted); // haven't executed yet assertFalse(responseCalled.get()); assertFalse(failureCalled.get()); - verify(executionService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture()); + verify(ingestService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture(), any()); completionHandler.getValue().accept(exception); assertTrue(failureCalled.get()); diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java index b3824063242..90b730660dd 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java @@ -28,11 +28,15 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.AbstractStreamableXContentTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.function.Predicate; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiLettersOfLength; + public class FieldCapabilitiesResponseTests extends AbstractStreamableXContentTestCase { @@ -48,22 +52,46 @@ public class FieldCapabilitiesResponseTests extends AbstractStreamableXContentTe @Override protected FieldCapabilitiesResponse createTestInstance() { - Map> responses = new HashMap<>(); + if (randomBoolean()) { + // merged responses + Map> responses = new HashMap<>(); + + String[] fields = generateRandomStringArray(5, 10, false, true); + assertNotNull(fields); + + for (String field : fields) { + Map typesToCapabilities = new HashMap<>(); + String[] types = generateRandomStringArray(5, 10, false, false); + assertNotNull(types); + + for (String type : types) { + typesToCapabilities.put(type, FieldCapabilitiesTests.randomFieldCaps(field)); + } + responses.put(field, typesToCapabilities); + } + return new FieldCapabilitiesResponse(responses); + } else { + // non-merged responses + List responses = new ArrayList<>(); + int numResponse = randomIntBetween(0, 10); + for (int i = 0; i < numResponse; i++) { + responses.add(createRandomIndexResponse()); + } + return new FieldCapabilitiesResponse(responses); + } + } + + + private FieldCapabilitiesIndexResponse createRandomIndexResponse() { + Map responses = new HashMap<>(); String[] fields = generateRandomStringArray(5, 10, false, true); assertNotNull(fields); for (String field : fields) { - Map typesToCapabilities = new HashMap<>(); - String[] types = generateRandomStringArray(5, 10, false, false); - assertNotNull(types); - - for (String type : types) { - typesToCapabilities.put(type, FieldCapabilitiesTests.randomFieldCaps(field)); - } - responses.put(field, typesToCapabilities); + responses.put(field, FieldCapabilitiesTests.randomFieldCaps(field)); } - return new FieldCapabilitiesResponse(responses); + return new FieldCapabilitiesIndexResponse(randomAsciiLettersOfLength(10), responses); } @Override @@ -138,6 +166,11 @@ public class FieldCapabilitiesResponseTests extends AbstractStreamableXContentTe "}").replaceAll("\\s+", ""), generatedResponse); } + public void testEmptyResponse() throws IOException { + FieldCapabilitiesResponse testInstance = new FieldCapabilitiesResponse(); + assertSerialization(testInstance); + } + private static FieldCapabilitiesResponse createSimpleResponse() { Map titleCapabilities = new HashMap<>(); titleCapabilities.put("text", new FieldCapabilities("title", "text", true, false)); diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java index b0c6d717bb3..1711d168910 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java @@ -31,8 +31,8 @@ import java.util.Map; import org.elasticsearch.index.VersionType; import org.elasticsearch.ingest.CompoundProcessor; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.ingest.Pipeline; -import org.elasticsearch.ingest.PipelineStore; import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.TestProcessor; import org.elasticsearch.test.ESTestCase; @@ -53,7 +53,7 @@ import static org.mockito.Mockito.when; public class SimulatePipelineRequestParsingTests extends ESTestCase { - private PipelineStore store; + private IngestService ingestService; @Before public void init() throws IOException { @@ -62,9 +62,9 @@ public class SimulatePipelineRequestParsingTests extends ESTestCase { Pipeline pipeline = new Pipeline(SIMULATED_PIPELINE_ID, null, null, pipelineCompoundProcessor); Map registry = Collections.singletonMap("mock_processor", (factories, tag, config) -> processor); - store = mock(PipelineStore.class); - when(store.get(SIMULATED_PIPELINE_ID)).thenReturn(pipeline); - when(store.getProcessorFactories()).thenReturn(registry); + ingestService = mock(IngestService.class); + when(ingestService.getPipeline(SIMULATED_PIPELINE_ID)).thenReturn(pipeline); + when(ingestService.getProcessorFactories()).thenReturn(registry); } public void testParseUsingPipelineStore() throws Exception { @@ -94,7 +94,8 @@ public class SimulatePipelineRequestParsingTests extends ESTestCase { expectedDocs.add(expectedDoc); } - SimulatePipelineRequest.Parsed actualRequest = SimulatePipelineRequest.parseWithPipelineId(SIMULATED_PIPELINE_ID, requestContent, false, store); + SimulatePipelineRequest.Parsed actualRequest = + SimulatePipelineRequest.parseWithPipelineId(SIMULATED_PIPELINE_ID, requestContent, false, ingestService); assertThat(actualRequest.isVerbose(), equalTo(false)); assertThat(actualRequest.getDocuments().size(), equalTo(numDocs)); Iterator> expectedDocsIterator = expectedDocs.iterator(); @@ -182,7 +183,7 @@ public class SimulatePipelineRequestParsingTests extends ESTestCase { requestContent.put(Fields.PIPELINE, pipelineConfig); - SimulatePipelineRequest.Parsed actualRequest = SimulatePipelineRequest.parse(requestContent, false, store); + SimulatePipelineRequest.Parsed actualRequest = SimulatePipelineRequest.parse(requestContent, false, ingestService); assertThat(actualRequest.isVerbose(), equalTo(false)); assertThat(actualRequest.getDocuments().size(), equalTo(numDocs)); Iterator> expectedDocsIterator = expectedDocs.iterator(); @@ -208,7 +209,7 @@ public class SimulatePipelineRequestParsingTests extends ESTestCase { List> docs = new ArrayList<>(); requestContent.put(Fields.DOCS, docs); Exception e = expectThrows(IllegalArgumentException.class, - () -> SimulatePipelineRequest.parseWithPipelineId(null, requestContent, false, store)); + () -> SimulatePipelineRequest.parseWithPipelineId(null, requestContent, false, ingestService)); assertThat(e.getMessage(), equalTo("param [pipeline] is null")); } @@ -218,7 +219,7 @@ public class SimulatePipelineRequestParsingTests extends ESTestCase { List> docs = new ArrayList<>(); requestContent.put(Fields.DOCS, docs); Exception e = expectThrows(IllegalArgumentException.class, - () -> SimulatePipelineRequest.parseWithPipelineId(pipelineId, requestContent, false, store)); + () -> SimulatePipelineRequest.parseWithPipelineId(pipelineId, requestContent, false, ingestService)); assertThat(e.getMessage(), equalTo("pipeline [" + pipelineId + "] does not exist")); } } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java index 5cd82be8cb0..53c307c4308 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.ingest; -import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -28,7 +27,6 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.nio.charset.StandardCharsets; -import java.util.Base64; import static org.hamcrest.CoreMatchers.equalTo; @@ -68,22 +66,4 @@ public class SimulatePipelineRequestTests extends ESTestCase { assertEquals(XContentType.JSON, serialized.getXContentType()); assertEquals("{}", serialized.getSource().utf8ToString()); } - - public void testSerializationWithXContentBwc() throws IOException { - final byte[] data = Base64.getDecoder().decode("AAAAAnt9AAA="); - final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2, - Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0); - try (StreamInput in = StreamInput.wrap(data)) { - in.setVersion(version); - SimulatePipelineRequest request = new SimulatePipelineRequest(in); - assertEquals(XContentType.JSON, request.getXContentType()); - assertEquals("{}", request.getSource().utf8ToString()); - - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(version); - request.writeTo(out); - assertArrayEquals(data, out.bytes().toBytesRef().bytes); - } - } - } } diff --git a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java index 8b174196773..2a0fa6c7ce1 100644 --- a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java @@ -33,7 +33,6 @@ import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.transport.Transport; import java.io.IOException; @@ -61,7 +60,7 @@ public class CanMatchPreFilterSearchPhaseTests extends ESTestCase { final boolean shard2 = randomBoolean(); SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("search.remote.connect", false).build(), null, null) { + Settings.builder().put("cluster.remote.connect", false).build(), null, null) { @Override public void sendCanMatch(Transport.Connection connection, ShardSearchTransportRequest request, SearchTask task, @@ -110,17 +109,6 @@ public class CanMatchPreFilterSearchPhaseTests extends ESTestCase { } } - public void testOldNodesTriggerException() { - SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("search.remote.connect", false).build(), null, null); - DiscoveryNode node = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), VersionUtils.randomVersionBetween(random(), - VersionUtils.getFirstVersion(), VersionUtils.getPreviousVersion(Version.V_5_6_0))); - SearchAsyncActionTests.MockConnection mockConnection = new SearchAsyncActionTests.MockConnection(node); - IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, - () -> searchTransportService.sendCanMatch(mockConnection, null, null, null)); - assertEquals("can_match is not supported on pre 5.6 nodes", illegalArgumentException.getMessage()); - } - public void testFilterWithFailure() throws InterruptedException { final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, System.nanoTime(), System::nanoTime); @@ -131,7 +119,7 @@ public class CanMatchPreFilterSearchPhaseTests extends ESTestCase { lookup.put("node2", new SearchAsyncActionTests.MockConnection(replicaNode)); final boolean shard1 = randomBoolean(); SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("search.remote.connect", false).build(), null, null) { + Settings.builder().put("cluster.remote.connect", false).build(), null, null) { @Override public void sendCanMatch(Transport.Connection connection, ShardSearchTransportRequest request, SearchTask task, @@ -198,7 +186,7 @@ public class CanMatchPreFilterSearchPhaseTests extends ESTestCase { final SearchTransportService searchTransportService = - new SearchTransportService(Settings.builder().put("search.remote.connect", false).build(), null, null) { + new SearchTransportService(Settings.builder().put("cluster.remote.connect", false).build(), null, null) { @Override public void sendCanMatch( Transport.Connection connection, diff --git a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java index c1f729a12ca..fe9be2a06e2 100644 --- a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java @@ -22,7 +22,9 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.MockDirectoryWrapper; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -60,7 +62,7 @@ public class DfsQueryPhaseTests extends ESTestCase { SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, (b) -> new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, b)); SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("search.remote.connect", false).build(), null, null) { + Settings.builder().put("cluster.remote.connect", false).build(), null, null) { @Override public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task, @@ -68,13 +70,17 @@ public class DfsQueryPhaseTests extends ESTestCase { if (request.id() == 1) { QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(2); // the size of the result set listener.onResponse(queryResult); } else if (request.id() == 2) { QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node2", new Index("test", "na"), 0, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(84, 2.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(84, 2.0F)}), 2.0F), + new DocValueFormat[0]); queryResult.size(2); // the size of the result set listener.onResponse(queryResult); } else { @@ -97,12 +103,12 @@ public class DfsQueryPhaseTests extends ESTestCase { assertNotNull(responseRef.get()); assertNotNull(responseRef.get().get(0)); assertNull(responseRef.get().get(0).fetchResult()); - assertEquals(1, responseRef.get().get(0).queryResult().topDocs().totalHits); - assertEquals(42, responseRef.get().get(0).queryResult().topDocs().scoreDocs[0].doc); + assertEquals(1, responseRef.get().get(0).queryResult().topDocs().topDocs.totalHits.value); + assertEquals(42, responseRef.get().get(0).queryResult().topDocs().topDocs.scoreDocs[0].doc); assertNotNull(responseRef.get().get(1)); assertNull(responseRef.get().get(1).fetchResult()); - assertEquals(1, responseRef.get().get(1).queryResult().topDocs().totalHits); - assertEquals(84, responseRef.get().get(1).queryResult().topDocs().scoreDocs[0].doc); + assertEquals(1, responseRef.get().get(1).queryResult().topDocs().topDocs.totalHits.value); + assertEquals(84, responseRef.get().get(1).queryResult().topDocs().topDocs.scoreDocs[0].doc); assertTrue(mockSearchPhaseContext.releasedSearchContexts.isEmpty()); assertEquals(2, mockSearchPhaseContext.numSuccess.get()); } @@ -118,7 +124,7 @@ public class DfsQueryPhaseTests extends ESTestCase { SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, (b) -> new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, b)); SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("search.remote.connect", false).build(), null, null) { + Settings.builder().put("cluster.remote.connect", false).build(), null, null) { @Override public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task, @@ -126,7 +132,9 @@ public class DfsQueryPhaseTests extends ESTestCase { if (request.id() == 1) { QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs( + new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(2); // the size of the result set listener.onResponse(queryResult); } else if (request.id() == 2) { @@ -151,8 +159,8 @@ public class DfsQueryPhaseTests extends ESTestCase { assertNotNull(responseRef.get()); assertNotNull(responseRef.get().get(0)); assertNull(responseRef.get().get(0).fetchResult()); - assertEquals(1, responseRef.get().get(0).queryResult().topDocs().totalHits); - assertEquals(42, responseRef.get().get(0).queryResult().topDocs().scoreDocs[0].doc); + assertEquals(1, responseRef.get().get(0).queryResult().topDocs().topDocs.totalHits.value); + assertEquals(42, responseRef.get().get(0).queryResult().topDocs().topDocs.scoreDocs[0].doc); assertNull(responseRef.get().get(1)); assertEquals(1, mockSearchPhaseContext.numSuccess.get()); @@ -175,7 +183,7 @@ public class DfsQueryPhaseTests extends ESTestCase { SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, (b) -> new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, b)); SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("search.remote.connect", false).build(), null, null) { + Settings.builder().put("cluster.remote.connect", false).build(), null, null) { @Override public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task, @@ -183,7 +191,9 @@ public class DfsQueryPhaseTests extends ESTestCase { if (request.id() == 1) { QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(2); // the size of the result set listener.onResponse(queryResult); } else if (request.id() == 2) { diff --git a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java index b580d48c11a..7d19ee58f9f 100644 --- a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java @@ -70,7 +70,7 @@ public class ExpandSearchPhaseTests extends ESTestCase { .collect(Collectors.toList())))); mockSearchPhaseContext.getRequest().source().query(originalQuery); mockSearchPhaseContext.searchTransport = new SearchTransportService( - Settings.builder().put("search.remote.connect", false).build(), null, null) { + Settings.builder().put("cluster.remote.connect", false).build(), null, null) { @Override void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { @@ -145,7 +145,7 @@ public class ExpandSearchPhaseTests extends ESTestCase { mockSearchPhaseContext.getRequest().source(new SearchSourceBuilder() .collapse(new CollapseBuilder("someField").setInnerHits(new InnerHitBuilder().setName("foobarbaz")))); mockSearchPhaseContext.searchTransport = new SearchTransportService( - Settings.builder().put("search.remote.connect", false).build(), null, null) { + Settings.builder().put("cluster.remote.connect", false).build(), null, null) { @Override void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { @@ -187,7 +187,7 @@ public class ExpandSearchPhaseTests extends ESTestCase { public void testSkipPhase() throws IOException { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); mockSearchPhaseContext.searchTransport = new SearchTransportService( - Settings.builder().put("search.remote.connect", false).build(), null, null) { + Settings.builder().put("cluster.remote.connect", false).build(), null, null) { @Override void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { @@ -218,7 +218,7 @@ public class ExpandSearchPhaseTests extends ESTestCase { public void testSkipExpandCollapseNoHits() throws IOException { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); mockSearchPhaseContext.searchTransport = new SearchTransportService( - Settings.builder().put("search.remote.connect", false).build(), null, null) { + Settings.builder().put("cluster.remote.connect", false).build(), null, null) { @Override void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { @@ -250,7 +250,7 @@ public class ExpandSearchPhaseTests extends ESTestCase { boolean version = randomBoolean(); mockSearchPhaseContext.searchTransport = new SearchTransportService( - Settings.builder().put("search.remote.connect", false).build(), null, null) { + Settings.builder().put("cluster.remote.connect", false).build(), null, null) { @Override void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { diff --git a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java index 7f4fbc91157..55ca24826fc 100644 --- a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java @@ -20,7 +20,9 @@ package org.elasticsearch.action.search; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.MockDirectoryWrapper; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.Index; @@ -55,7 +57,8 @@ public class FetchSearchPhaseTests extends ESTestCase { final int numHits; if (hasHits) { QuerySearchResult queryResult = new QuerySearchResult(); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 1.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 1.0F), new DocValueFormat[0]); queryResult.size(1); FetchSearchResult fetchResult = new FetchSearchResult(); fetchResult.hits(new SearchHits(new SearchHit[] {new SearchHit(42)}, 1, 1.0F)); @@ -94,19 +97,21 @@ public class FetchSearchPhaseTests extends ESTestCase { AtomicReference responseRef = new AtomicReference<>(); int resultSetSize = randomIntBetween(2, 10); QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set queryResult.setShardIndex(0); results.consumeResult(queryResult); queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new Index("test", "na"), 1, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(84, 2.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(84, 2.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); queryResult.setShardIndex(1); results.consumeResult(queryResult); SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("search.remote.connect", false).build(), null, null) { + Settings.builder().put("cluster.remote.connect", false).build(), null, null) { @Override public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task, SearchActionListener listener) { @@ -149,19 +154,21 @@ public class FetchSearchPhaseTests extends ESTestCase { AtomicReference responseRef = new AtomicReference<>(); int resultSetSize = randomIntBetween(2, 10); QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set queryResult.setShardIndex(0); results.consumeResult(queryResult); queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new Index("test", "na"), 1, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(84, 2.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(84, 2.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); queryResult.setShardIndex(1); results.consumeResult(queryResult); SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("search.remote.connect", false).build(), null, null) { + Settings.builder().put("cluster.remote.connect", false).build(), null, null) { @Override public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task, SearchActionListener listener) { @@ -209,13 +216,14 @@ public class FetchSearchPhaseTests extends ESTestCase { AtomicReference responseRef = new AtomicReference<>(); for (int i = 0; i < numHits; i++) { QuerySearchResult queryResult = new QuerySearchResult(i, new SearchShardTarget("node1", new Index("test", "na"), 0, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(i+1, i)}, i), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(i+1, i)}), i), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set queryResult.setShardIndex(i); results.consumeResult(queryResult); } SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("search.remote.connect", false).build(), null, null) { + Settings.builder().put("cluster.remote.connect", false).build(), null, null) { @Override public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task, SearchActionListener listener) { @@ -265,19 +273,21 @@ public class FetchSearchPhaseTests extends ESTestCase { AtomicReference responseRef = new AtomicReference<>(); int resultSetSize = randomIntBetween(2, 10); QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set queryResult.setShardIndex(0); results.consumeResult(queryResult); queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new Index("test", "na"), 1, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(84, 2.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(84, 2.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); queryResult.setShardIndex(1); results.consumeResult(queryResult); AtomicInteger numFetches = new AtomicInteger(0); SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("search.remote.connect", false).build(), null, null) { + Settings.builder().put("cluster.remote.connect", false).build(), null, null) { @Override public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task, SearchActionListener listener) { @@ -319,19 +329,21 @@ public class FetchSearchPhaseTests extends ESTestCase { AtomicReference responseRef = new AtomicReference<>(); int resultSetSize = 1; QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set queryResult.setShardIndex(0); results.consumeResult(queryResult); queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new Index("test", "na"), 1, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(84, 2.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(84, 2.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); queryResult.setShardIndex(1); results.consumeResult(queryResult); SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("search.remote.connect", false).build(), null, null) { + Settings.builder().put("cluster.remote.connect", false).build(), null, null) { @Override public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task, SearchActionListener listener) { diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java index 1f7f6f4249b..95282e358e1 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -377,7 +377,7 @@ public class SearchAsyncActionTests extends ESTestCase { ArrayList unassigned = new ArrayList<>(); ShardRouting routing = ShardRouting.newUnassigned(new ShardId(new Index(index, "_na_"), i), true, - RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar")); + RecoverySource.EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar")); routing = routing.initialize(primaryNode.getId(), i + "p", 0); routing.started(); started.add(routing); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index 393c45fa572..b109e82beef 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -22,6 +22,9 @@ package org.elasticsearch.action.search; import com.carrotsearch.randomizedtesting.RandomizedContext; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; +import org.apache.lucene.search.TotalHits.Relation; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.util.BigArrays; @@ -33,7 +36,7 @@ import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.aggregations.metrics.max.InternalMax; +import org.elasticsearch.search.aggregations.metrics.InternalMax; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.SearchHit; @@ -187,11 +190,11 @@ public class SearchPhaseControllerTests extends ESTestCase { for (int shardIndex = 0; shardIndex < nShards; shardIndex++) { QuerySearchResult querySearchResult = new QuerySearchResult(shardIndex, new SearchShardTarget("", new Index("", ""), shardIndex, null)); - TopDocs topDocs = new TopDocs(0, new ScoreDoc[0], 0); + TopDocs topDocs = new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]); + float maxScore = 0; if (searchHitsSize > 0) { int nDocs = randomIntBetween(0, searchHitsSize); ScoreDoc[] scoreDocs = new ScoreDoc[nDocs]; - float maxScore = 0F; for (int i = 0; i < nDocs; i++) { float score = useConstantScore ? 1.0F : Math.abs(randomFloat()); scoreDocs[i] = new ScoreDoc(i, score); @@ -199,7 +202,7 @@ public class SearchPhaseControllerTests extends ESTestCase { maxScore = score; } } - topDocs = new TopDocs(scoreDocs.length, scoreDocs, maxScore); + topDocs = new TopDocs(new TotalHits(scoreDocs.length, TotalHits.Relation.EQUAL_TO), scoreDocs); } List shardSuggestion = new ArrayList<>(); for (CompletionSuggestion completionSuggestion : suggestions) { @@ -208,19 +211,19 @@ public class SearchPhaseControllerTests extends ESTestCase { final CompletionSuggestion.Entry completionEntry = new CompletionSuggestion.Entry(new Text(""), 0, 5); suggestion.addTerm(completionEntry); int optionSize = randomIntBetween(1, suggestion.getSize()); - float maxScore = randomIntBetween(suggestion.getSize(), (int) Float.MAX_VALUE); + float maxScoreValue = randomIntBetween(suggestion.getSize(), (int) Float.MAX_VALUE); for (int i = 0; i < optionSize; i++) { - completionEntry.addOption(new CompletionSuggestion.Entry.Option(i, new Text(""), maxScore, + completionEntry.addOption(new CompletionSuggestion.Entry.Option(i, new Text(""), maxScoreValue, Collections.emptyMap())); float dec = randomIntBetween(0, optionSize); - if (dec <= maxScore) { - maxScore -= dec; + if (dec <= maxScoreValue) { + maxScoreValue -= dec; } } suggestion.setShardIndex(shardIndex); shardSuggestion.add(suggestion); } - querySearchResult.topDocs(topDocs, null); + querySearchResult.topDocs(new TopDocsAndMaxScore(topDocs, maxScore), null); querySearchResult.size(searchHitsSize); querySearchResult.suggest(new Suggest(new ArrayList<>(shardSuggestion))); querySearchResult.setShardIndex(shardIndex); @@ -232,7 +235,9 @@ public class SearchPhaseControllerTests extends ESTestCase { private int getTotalQueryHits(AtomicArray results) { int resultCount = 0; for (SearchPhaseResult shardResult : results.asList()) { - resultCount += shardResult.queryResult().topDocs().totalHits; + TopDocs topDocs = shardResult.queryResult().topDocs().topDocs; + assert topDocs.totalHits.relation == Relation.EQUAL_TO; + resultCount += topDocs.totalHits.value; } return resultCount; } @@ -292,7 +297,8 @@ public class SearchPhaseControllerTests extends ESTestCase { request.setBatchedReduceSize(bufferSize); InitialSearchPhase.ArraySearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults(request, 3); QuerySearchResult result = new QuerySearchResult(0, new SearchShardTarget("node", new Index("a", "b"), 0, null)); - result.topDocs(new TopDocs(0, new ScoreDoc[0], 0.0F), new DocValueFormat[0]); + result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), Float.NaN), + new DocValueFormat[0]); InternalAggregations aggs = new InternalAggregations(Arrays.asList(new InternalMax("test", 1.0D, DocValueFormat.RAW, Collections.emptyList(), Collections.emptyMap()))); result.aggregations(aggs); @@ -300,7 +306,8 @@ public class SearchPhaseControllerTests extends ESTestCase { consumer.consumeResult(result); result = new QuerySearchResult(1, new SearchShardTarget("node", new Index("a", "b"), 0, null)); - result.topDocs(new TopDocs(0, new ScoreDoc[0], 0.0F), new DocValueFormat[0]); + result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), Float.NaN), + new DocValueFormat[0]); aggs = new InternalAggregations(Arrays.asList(new InternalMax("test", 3.0D, DocValueFormat.RAW, Collections.emptyList(), Collections.emptyMap()))); result.aggregations(aggs); @@ -308,7 +315,8 @@ public class SearchPhaseControllerTests extends ESTestCase { consumer.consumeResult(result); result = new QuerySearchResult(1, new SearchShardTarget("node", new Index("a", "b"), 0, null)); - result.topDocs(new TopDocs(0, new ScoreDoc[0], 0.0F), new DocValueFormat[0]); + result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), Float.NaN), + new DocValueFormat[0]); aggs = new InternalAggregations(Arrays.asList(new InternalMax("test", 2.0D, DocValueFormat.RAW, Collections.emptyList(), Collections.emptyMap()))); result.aggregations(aggs); @@ -347,7 +355,9 @@ public class SearchPhaseControllerTests extends ESTestCase { int number = randomIntBetween(1, 1000); max.updateAndGet(prev -> Math.max(prev, number)); QuerySearchResult result = new QuerySearchResult(id, new SearchShardTarget("node", new Index("a", "b"), id, null)); - result.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(0, number)}, number), new DocValueFormat[0]); + result.topDocs(new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(0, number)}), number), + new DocValueFormat[0]); InternalAggregations aggs = new InternalAggregations(Arrays.asList(new InternalMax("test", (double) number, DocValueFormat.RAW, Collections.emptyList(), Collections.emptyMap()))); result.aggregations(aggs); @@ -384,7 +394,8 @@ public class SearchPhaseControllerTests extends ESTestCase { int number = randomIntBetween(1, 1000); max.updateAndGet(prev -> Math.max(prev, number)); QuerySearchResult result = new QuerySearchResult(id, new SearchShardTarget("node", new Index("a", "b"), id, null)); - result.topDocs(new TopDocs(1, new ScoreDoc[0], number), new DocValueFormat[0]); + result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), number), + new DocValueFormat[0]); InternalAggregations aggs = new InternalAggregations(Arrays.asList(new InternalMax("test", (double) number, DocValueFormat.RAW, Collections.emptyList(), Collections.emptyMap()))); result.aggregations(aggs); @@ -417,7 +428,8 @@ public class SearchPhaseControllerTests extends ESTestCase { int number = randomIntBetween(1, 1000); max.updateAndGet(prev -> Math.max(prev, number)); QuerySearchResult result = new QuerySearchResult(id, new SearchShardTarget("node", new Index("a", "b"), id, null)); - result.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(0, number)}, number), new DocValueFormat[0]); + result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(0, number)}), number), new DocValueFormat[0]); result.setShardIndex(id); result.size(1); consumer.consumeResult(result); @@ -477,7 +489,8 @@ public class SearchPhaseControllerTests extends ESTestCase { for (int j = 0; j < docs.length; j++) { docs[j] = new ScoreDoc(0, score--); } - result.topDocs(new TopDocs(3, docs, docs[0].score), new DocValueFormat[0]); + result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(3, TotalHits.Relation.EQUAL_TO), docs), docs[0].score), + new DocValueFormat[0]); result.setShardIndex(i); result.size(5); result.from(5); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseExecutionExceptionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseExecutionExceptionTests.java index e96a0975fd4..9fbf3704fff 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseExecutionExceptionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseExecutionExceptionTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContent; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; @@ -38,8 +37,6 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import static java.util.Collections.singletonMap; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.Matchers.hasSize; @@ -87,56 +84,6 @@ public class SearchPhaseExecutionExceptionTests extends ESTestCase { "}" + "}" + "]}", Strings.toString(exception)); - - // Failures are NOT grouped - ToXContent.MapParams params = new ToXContent.MapParams(singletonMap("group_shard_failures", "false")); - try (XContentBuilder builder = jsonBuilder()) { - builder.startObject(); - exception.toXContent(builder, params); - builder.endObject(); - - assertEquals("{" + - "\"type\":\"search_phase_execution_exception\"," + - "\"reason\":\"all shards failed\"," + - "\"phase\":\"test\"," + - "\"grouped\":false," + - "\"failed_shards\":[" + - "{" + - "\"shard\":0," + - "\"index\":\"foo\"," + - "\"node\":\"node_1\"," + - "\"reason\":{" + - "\"type\":\"parsing_exception\"," + - "\"reason\":\"foobar\"," + - "\"line\":1," + - "\"col\":2" + - "}" + - "}," + - "{" + - "\"shard\":1," + - "\"index\":\"foo\"," + - "\"node\":\"node_2\"," + - "\"reason\":{" + - "\"type\":\"index_shard_closed_exception\"," + - "\"reason\":\"CurrentState[CLOSED] Closed\"," + - "\"index_uuid\":\"_na_\"," + - "\"shard\":\"1\"," + - "\"index\":\"foo\"" + - "}" + - "}," + - "{" + - "\"shard\":2," + - "\"index\":\"foo\"," + - "\"node\":\"node_3\"," + - "\"reason\":{" + - "\"type\":\"parsing_exception\"," + - "\"reason\":\"foobar\"," + - "\"line\":5," + - "\"col\":7" + - "}" + - "}" + - "]}", Strings.toString(builder)); - } } public void testToAndFromXContent() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java index 87e66477a04..d6fbf59d941 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.search; -import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -47,13 +47,11 @@ import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.SuggestTests; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalAggregationTestCase; -import org.elasticsearch.test.VersionUtils; import org.junit.After; import org.junit.Before; import java.io.IOException; import java.util.ArrayList; -import java.util.Base64; import java.util.Collections; import java.util.List; @@ -183,7 +181,7 @@ public class SearchResponseTests extends ESTestCase { int numFailures = randomIntBetween(1, 5); ShardSearchFailure[] failures = new ShardSearchFailure[numFailures]; for (int i = 0; i < failures.length; i++) { - failures[i] = ShardSearchFailureTests.createTestItem(); + failures[i] = ShardSearchFailureTests.createTestItem(IndexMetaData.INDEX_UUID_NA_VALUE); } SearchResponse response = createTestItem(failures); XContentType xcontentType = randomFrom(XContentType.values()); @@ -290,27 +288,4 @@ public class SearchResponseTests extends ESTestCase { assertEquals(searchResponse.getClusters(), serialized.getClusters()); } } - - public void testSerializationBwc() throws IOException { - final byte[] data = Base64.getDecoder().decode("AAAAAAAAAAAAAgABBQUAAAoAAAAAAAAA"); - final Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_6_5, Version.V_6_0_0); - try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(data), namedWriteableRegistry)) { - in.setVersion(version); - SearchResponse deserialized = new SearchResponse(); - deserialized.readFrom(in); - assertSame(SearchResponse.Clusters.EMPTY, deserialized.getClusters()); - - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(version); - deserialized.writeTo(out); - try (StreamInput in2 = new NamedWriteableAwareStreamInput(StreamInput.wrap(out.bytes().toBytesRef().bytes), - namedWriteableRegistry)) { - in2.setVersion(version); - SearchResponse deserialized2 = new SearchResponse(); - deserialized2.readFrom(in2); - assertSame(SearchResponse.Clusters.EMPTY, deserialized2.getClusters()); - } - } - } - } } diff --git a/server/src/test/java/org/elasticsearch/action/search/ShardSearchFailureTests.java b/server/src/test/java/org/elasticsearch/action/search/ShardSearchFailureTests.java index bd892829c95..f62f874c9e2 100644 --- a/server/src/test/java/org/elasticsearch/action/search/ShardSearchFailureTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/ShardSearchFailureTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; @@ -38,7 +39,7 @@ import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; public class ShardSearchFailureTests extends ESTestCase { - public static ShardSearchFailure createTestItem() { + public static ShardSearchFailure createTestItem(String indexUuid) { String randomMessage = randomAlphaOfLengthBetween(3, 20); Exception ex = new ParsingException(0, 0, randomMessage , new IllegalArgumentException("some bad argument")); SearchShardTarget searchShardTarget = null; @@ -47,7 +48,7 @@ public class ShardSearchFailureTests extends ESTestCase { String indexName = randomAlphaOfLengthBetween(5, 10); String clusterAlias = randomBoolean() ? randomAlphaOfLengthBetween(5, 10) : null; searchShardTarget = new SearchShardTarget(nodeId, - new ShardId(new Index(indexName, IndexMetaData.INDEX_UUID_NA_VALUE), randomInt()), clusterAlias, OriginalIndices.NONE); + new ShardId(new Index(indexName, indexUuid), randomInt()), clusterAlias, OriginalIndices.NONE); } return new ShardSearchFailure(ex, searchShardTarget); } @@ -66,7 +67,7 @@ public class ShardSearchFailureTests extends ESTestCase { } private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { - ShardSearchFailure response = createTestItem(); + ShardSearchFailure response = createTestItem(IndexMetaData.INDEX_UUID_NA_VALUE); XContentType xContentType = randomFrom(XContentType.values()); boolean humanReadable = randomBoolean(); BytesReference originalBytes = toShuffledXContent(response, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); @@ -134,4 +135,15 @@ public class ShardSearchFailureTests extends ESTestCase { + "}", xContent.utf8ToString()); } + + public void testSerialization() throws IOException { + ShardSearchFailure testItem = createTestItem(randomAlphaOfLength(12)); + ShardSearchFailure deserializedInstance = copyStreamable(testItem, writableRegistry(), + ShardSearchFailure::new, VersionUtils.randomVersion(random())); + assertEquals(testItem.index(), deserializedInstance.index()); + assertEquals(testItem.shard(), deserializedInstance.shard()); + assertEquals(testItem.shardId(), deserializedInstance.shardId()); + assertEquals(testItem.reason(), deserializedInstance.reason()); + assertEquals(testItem.status(), deserializedInstance.status()); + } } diff --git a/server/src/test/java/org/elasticsearch/action/support/broadcast/AbstractBroadcastResponseTestCase.java b/server/src/test/java/org/elasticsearch/action/support/broadcast/AbstractBroadcastResponseTestCase.java index cec5e27f076..5bf48fa5897 100644 --- a/server/src/test/java/org/elasticsearch/action/support/broadcast/AbstractBroadcastResponseTestCase.java +++ b/server/src/test/java/org/elasticsearch/action/support/broadcast/AbstractBroadcastResponseTestCase.java @@ -33,7 +33,6 @@ import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import static org.hamcrest.CoreMatchers.anyOf; @@ -130,29 +129,6 @@ public abstract class AbstractBroadcastResponseTestCase BootstrapChecks.check(defaultContext, true, Collections.singletonList(check))); - assertThat(e.getMessage(), containsString("max virtual memory areas vm.max_map_count")); - - maxMapCount.set(randomIntBetween(limit + 1, Integer.MAX_VALUE)); - - BootstrapChecks.check(defaultContext, true, Collections.singletonList(check)); - - // nothing should happen if current vm.max_map_count is not - // available - maxMapCount.set(-1); - BootstrapChecks.check(defaultContext, true, Collections.singletonList(check)); - } - public void testClientJvmCheck() throws NodeValidationException { final AtomicReference vmName = new AtomicReference<>("Java HotSpot(TM) 32-Bit Client VM"); final BootstrapCheck check = new BootstrapChecks.ClientJvmCheck() { diff --git a/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java b/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java index c5b99a91ffa..9a964a97bd7 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java +++ b/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java @@ -24,16 +24,21 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.Constants; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import java.io.BufferedReader; import java.io.IOException; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.Predicate; import static org.hamcrest.CoreMatchers.equalTo; @@ -45,6 +50,66 @@ import static org.mockito.Mockito.when; public class MaxMapCountCheckTests extends ESTestCase { + // initialize as if the max map count is under the limit, tests can override by setting maxMapCount before executing the check + private final AtomicLong maxMapCount = new AtomicLong(randomIntBetween(1, Math.toIntExact(BootstrapChecks.MaxMapCountCheck.LIMIT) - 1)); + private final BootstrapChecks.MaxMapCountCheck check = new BootstrapChecks.MaxMapCountCheck() { + @Override + long getMaxMapCount() { + return maxMapCount.get(); + } + }; + + private void assertFailure(final BootstrapCheck.BootstrapCheckResult result) { + assertTrue(result.isFailure()); + assertThat( + result.getMessage(), + equalTo( + "max virtual memory areas vm.max_map_count [" + maxMapCount.get() + "] is too low, " + + "increase to at least [" + BootstrapChecks.MaxMapCountCheck.LIMIT + "]")); + } + + public void testMaxMapCountCheckBelowLimit() { + assertFailure(check.check(BootstrapChecksTests.defaultContext)); + } + + public void testMaxMapCountCheckBelowLimitAndMemoryMapAllowed() { + /* + * There are two ways that memory maps are allowed: + * - by default + * - mmapfs is explicitly allowed + * We want to test that if mmapfs is allowed then the max map count check is enforced. + */ + final List settingsThatAllowMemoryMap = new ArrayList<>(); + settingsThatAllowMemoryMap.add(Settings.EMPTY); + settingsThatAllowMemoryMap.add(Settings.builder().put("node.store.allow_mmapfs", true).build()); + + for (final Settings settingThatAllowsMemoryMap : settingsThatAllowMemoryMap) { + assertFailure(check.check(new BootstrapContext(settingThatAllowsMemoryMap, MetaData.EMPTY_META_DATA))); + } + } + + public void testMaxMapCountCheckNotEnforcedIfMemoryMapNotAllowed() { + // nothing should happen if current vm.max_map_count is under the limit but mmapfs is not allowed + final Settings settings = Settings.builder().put("node.store.allow_mmapfs", false).build(); + final BootstrapContext context = new BootstrapContext(settings, MetaData.EMPTY_META_DATA); + final BootstrapCheck.BootstrapCheckResult result = check.check(context); + assertTrue(result.isSuccess()); + } + + public void testMaxMapCountCheckAboveLimit() { + // nothing should happen if current vm.max_map_count exceeds the limit + maxMapCount.set(randomIntBetween(Math.toIntExact(BootstrapChecks.MaxMapCountCheck.LIMIT) + 1, Integer.MAX_VALUE)); + final BootstrapCheck.BootstrapCheckResult result = check.check(BootstrapChecksTests.defaultContext); + assertTrue(result.isSuccess()); + } + + public void testMaxMapCountCheckMaxMapCountNotAvailable() { + // nothing should happen if current vm.max_map_count is not available + maxMapCount.set(-1); + final BootstrapCheck.BootstrapCheckResult result = check.check(BootstrapChecksTests.defaultContext); + assertTrue(result.isSuccess()); + } + public void testGetMaxMapCountOnLinux() { if (Constants.LINUX) { final BootstrapChecks.MaxMapCountCheck check = new BootstrapChecks.MaxMapCountCheck(); @@ -142,7 +207,7 @@ public class MaxMapCountCheckTests extends ESTestCase { } @Override - public void match(LogEvent event) { + public void match(final LogEvent event) { if (event.getLevel().equals(level) && event.getLoggerName().equals(loggerName) && event.getMessage() instanceof ParameterizedMessage) { diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java index afc6b47483e..9baf2e1c956 100644 --- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java @@ -379,10 +379,10 @@ public class TransportClientNodesServiceTests extends ESTestCase { transportClientNodesService.addTransportAddresses(remoteService.getLocalDiscoNode().getAddress()); assertEquals(1, transportClientNodesService.connectedNodes().size()); - assertEquals(1, clientService.connectionManager().connectedNodeCount()); + assertEquals(1, clientService.connectionManager().size()); transportClientNodesService.doSample(); - assertEquals(1, clientService.connectionManager().connectedNodeCount()); + assertEquals(1, clientService.connectionManager().size()); establishedConnections.clear(); handler.blockRequest(); diff --git a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java index 473f5152e8f..b8c39c48f88 100644 --- a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -134,7 +134,7 @@ public class NodeConnectionsServiceTests extends ESTestCase { private void assertConnectedExactlyToNodes(ClusterState state) { assertConnected(state.nodes()); - assertThat(transportService.getConnectionManager().connectedNodeCount(), equalTo(state.nodes().getSize())); + assertThat(transportService.getConnectionManager().size(), equalTo(state.nodes().getSize())); } private void assertConnected(Iterable nodes) { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DiffableStringMapTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DiffableStringMapTests.java new file mode 100644 index 00000000000..341022030b3 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DiffableStringMapTests.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class DiffableStringMapTests extends ESTestCase { + + public void testDiffableStringMapDiff() { + Map m = new HashMap<>(); + m.put("foo", "bar"); + m.put("baz", "eggplant"); + m.put("potato", "canon"); + DiffableStringMap dsm = new DiffableStringMap(m); + + Map m2 = new HashMap<>(); + m2.put("foo", "not-bar"); + m2.put("newkey", "yay"); + m2.put("baz", "eggplant"); + DiffableStringMap dsm2 = new DiffableStringMap(m2); + + Diff diff = dsm2.diff(dsm); + assertThat(diff, instanceOf(DiffableStringMap.DiffableStringMapDiff.class)); + DiffableStringMap.DiffableStringMapDiff dsmd = (DiffableStringMap.DiffableStringMapDiff) diff; + + assertThat(dsmd.getDeletes(), containsInAnyOrder("potato")); + assertThat(dsmd.getDiffs().size(), equalTo(0)); + Map upserts = new HashMap<>(); + upserts.put("foo", "not-bar"); + upserts.put("newkey", "yay"); + assertThat(dsmd.getUpserts(), equalTo(upserts)); + + DiffableStringMap dsm3 = diff.apply(dsm); + assertThat(dsm3.get("foo"), equalTo("not-bar")); + assertThat(dsm3.get("newkey"), equalTo("yay")); + assertThat(dsm3.get("baz"), equalTo("eggplant")); + assertThat(dsm3.get("potato"), equalTo(null)); + } + + public void testRandomDiffing() { + Map m = new HashMap<>(); + m.put("1", "1"); + m.put("2", "2"); + m.put("3", "3"); + DiffableStringMap dsm = new DiffableStringMap(m); + DiffableStringMap expected = new DiffableStringMap(m); + + for (int i = 0; i < randomIntBetween(5, 50); i++) { + if (randomBoolean() && expected.size() > 1) { + expected.remove(randomFrom(expected.keySet())); + } else if (randomBoolean()) { + expected.put(randomFrom(expected.keySet()), randomAlphaOfLength(4)); + } else { + expected.put(randomAlphaOfLength(2), randomAlphaOfLength(4)); + } + dsm = expected.diff(dsm).apply(dsm); + } + assertThat(expected, equalTo(dsm)); + } + + public void testSerialization() throws IOException { + Map m = new HashMap<>(); + // Occasionally have an empty map + if (frequently()) { + m.put("foo", "bar"); + m.put("baz", "eggplant"); + m.put("potato", "canon"); + } + DiffableStringMap dsm = new DiffableStringMap(m); + + BytesStreamOutput bso = new BytesStreamOutput(); + dsm.writeTo(bso); + DiffableStringMap deserialized = new DiffableStringMap(bso.bytes().streamInput()); + assertThat(deserialized, equalTo(dsm)); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java index 744a29e843c..1aaec080307 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java @@ -56,11 +56,11 @@ import org.hamcrest.Matchers; import org.mockito.ArgumentCaptor; import java.io.IOException; -import java.util.Map; -import java.util.HashSet; -import java.util.Set; -import java.util.Collections; import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; import java.util.function.Supplier; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; @@ -71,13 +71,13 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.startsWith; import static org.mockito.Matchers.anyObject; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.anyMap; -import static org.mockito.Mockito.times; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class IndexCreationTaskTests extends ESTestCase { @@ -127,14 +127,12 @@ public class IndexCreationTaskTests extends ESTestCase { addMatchingTemplate(builder -> builder .putAlias(AliasMetaData.builder("alias1")) .putMapping("mapping1", createMapping()) - .putCustom("custom1", createCustom()) .settings(Settings.builder().put("key1", "value1")) ); final ClusterState result = executeTask(); assertThat(result.metaData().index("test").getAliases(), hasKey("alias1")); - assertThat(result.metaData().index("test").getCustoms(), hasKey("custom1")); assertThat(result.metaData().index("test").getSettings().get("key1"), equalTo("value1")); assertThat(getMappingsFromResponse(), Matchers.hasKey("mapping1")); } @@ -142,41 +140,31 @@ public class IndexCreationTaskTests extends ESTestCase { public void testApplyDataFromRequest() throws Exception { setupRequestAlias(new Alias("alias1")); setupRequestMapping("mapping1", createMapping()); - setupRequestCustom("custom1", createCustom()); reqSettings.put("key1", "value1"); final ClusterState result = executeTask(); assertThat(result.metaData().index("test").getAliases(), hasKey("alias1")); - assertThat(result.metaData().index("test").getCustoms(), hasKey("custom1")); assertThat(result.metaData().index("test").getSettings().get("key1"), equalTo("value1")); assertThat(getMappingsFromResponse(), Matchers.hasKey("mapping1")); } public void testRequestDataHavePriorityOverTemplateData() throws Exception { - final IndexMetaData.Custom tplCustom = createCustom(); - final IndexMetaData.Custom reqCustom = createCustom(); - final IndexMetaData.Custom mergedCustom = createCustom(); - when(reqCustom.mergeWith(tplCustom)).thenReturn(mergedCustom); - final CompressedXContent tplMapping = createMapping("text"); final CompressedXContent reqMapping = createMapping("keyword"); addMatchingTemplate(builder -> builder .putAlias(AliasMetaData.builder("alias1").searchRouting("fromTpl").build()) .putMapping("mapping1", tplMapping) - .putCustom("custom1", tplCustom) .settings(Settings.builder().put("key1", "tplValue")) ); setupRequestAlias(new Alias("alias1").searchRouting("fromReq")); setupRequestMapping("mapping1", reqMapping); - setupRequestCustom("custom1", reqCustom); reqSettings.put("key1", "reqValue"); final ClusterState result = executeTask(); - assertThat(result.metaData().index("test").getCustoms().get("custom1"), equalTo(mergedCustom)); assertThat(result.metaData().index("test").getAliases().get("alias1").getSearchRouting(), equalTo("fromReq")); assertThat(result.metaData().index("test").getSettings().get("key1"), equalTo("reqValue")); assertThat(getMappingsFromResponse().get("mapping1").toString(), equalTo("{type={properties={field={type=keyword}}}}")); @@ -272,14 +260,13 @@ public class IndexCreationTaskTests extends ESTestCase { addMatchingTemplate(builder -> builder .putAlias(AliasMetaData.builder("alias1").searchRouting("fromTpl").build()) .putMapping("mapping1", createMapping()) - .putCustom("custom1", createCustom()) .settings(Settings.builder().put("key1", "tplValue")) ); final ClusterState result = executeTask(); assertThat(result.metaData().index("test").getAliases(), not(hasKey("alias1"))); - assertThat(result.metaData().index("test").getCustoms(), not(hasKey("custom1"))); + assertThat(result.metaData().index("test").getCustomData(), not(hasKey("custom1"))); assertThat(result.metaData().index("test").getSettings().keySet(), not(Matchers.contains("key1"))); assertThat(getMappingsFromResponse(), not(Matchers.hasKey("mapping1"))); } @@ -296,7 +283,6 @@ public class IndexCreationTaskTests extends ESTestCase { Boolean writeIndex = randomBoolean() ? null : randomBoolean(); setupRequestAlias(new Alias("alias1").writeIndex(writeIndex)); setupRequestMapping("mapping1", createMapping()); - setupRequestCustom("custom1", createCustom()); reqSettings.put("key1", "value1"); final ClusterState result = executeTask(); @@ -310,7 +296,6 @@ public class IndexCreationTaskTests extends ESTestCase { .numberOfShards(1).numberOfReplicas(0).build(); idxBuilder.put("test2", existingWriteIndex); setupRequestMapping("mapping1", createMapping()); - setupRequestCustom("custom1", createCustom()); reqSettings.put("key1", "value1"); setupRequestAlias(new Alias("alias1").writeIndex(true)); @@ -342,8 +327,8 @@ public class IndexCreationTaskTests extends ESTestCase { .numberOfReplicas(numReplicas); } - private IndexMetaData.Custom createCustom() { - return mock(IndexMetaData.Custom.class); + private Map createCustom() { + return Collections.singletonMap("a", "b"); } private interface MetaDataBuilderConfigurator { @@ -372,10 +357,6 @@ public class IndexCreationTaskTests extends ESTestCase { when(request.mappings()).thenReturn(Collections.singletonMap(mappingKey, mapping.string())); } - private void setupRequestCustom(String customKey, IndexMetaData.Custom custom) throws IOException { - when(request.customs()).thenReturn(Collections.singletonMap(customKey, custom)); - } - private CompressedXContent createMapping() throws IOException { return createMapping("text"); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java index 9e8a5e04f43..393f7f6b1d4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java @@ -23,7 +23,9 @@ import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition; import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -45,6 +47,8 @@ import org.junit.Before; import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import java.util.Set; import static org.hamcrest.Matchers.is; @@ -71,6 +75,9 @@ public class IndexMetaDataTests extends ESTestCase { public void testIndexMetaDataSerialization() throws IOException { Integer numShard = randomFrom(1, 2, 4, 8, 16); int numberOfReplicas = randomIntBetween(0, 10); + Map customMap = new HashMap<>(); + customMap.put(randomAlphaOfLength(5), randomAlphaOfLength(10)); + customMap.put(randomAlphaOfLength(10), randomAlphaOfLength(15)); IndexMetaData metaData = IndexMetaData.builder("foo") .settings(Settings.builder() .put("index.version.created", 1) @@ -80,6 +87,7 @@ public class IndexMetaDataTests extends ESTestCase { .creationDate(randomLong()) .primaryTerm(0, 2) .setRoutingNumShards(32) + .putCustom("my_custom", customMap) .putRolloverInfo( new RolloverInfo(randomAlphaOfLength(5), Arrays.asList(new MaxAgeCondition(TimeValue.timeValueMillis(randomNonNegativeLong())), @@ -93,7 +101,8 @@ public class IndexMetaDataTests extends ESTestCase { builder.endObject(); XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); final IndexMetaData fromXContentMeta = IndexMetaData.fromXContent(parser); - assertEquals(metaData, fromXContentMeta); + assertEquals("expected: " + Strings.toString(metaData) + "\nactual : " + Strings.toString(fromXContentMeta), + metaData, fromXContentMeta); assertEquals(metaData.hashCode(), fromXContentMeta.hashCode()); assertEquals(metaData.getNumberOfReplicas(), fromXContentMeta.getNumberOfReplicas()); @@ -103,6 +112,11 @@ public class IndexMetaDataTests extends ESTestCase { assertEquals(metaData.getCreationDate(), fromXContentMeta.getCreationDate()); assertEquals(metaData.getRoutingFactor(), fromXContentMeta.getRoutingFactor()); assertEquals(metaData.primaryTerm(0), fromXContentMeta.primaryTerm(0)); + ImmutableOpenMap.Builder expectedCustomBuilder = ImmutableOpenMap.builder(); + expectedCustomBuilder.put("my_custom", new DiffableStringMap(customMap)); + ImmutableOpenMap expectedCustom = expectedCustomBuilder.build(); + assertEquals(metaData.getCustomData(), expectedCustom); + assertEquals(metaData.getCustomData(), fromXContentMeta.getCustomData()); final BytesStreamOutput out = new BytesStreamOutput(); metaData.writeTo(out); @@ -119,6 +133,8 @@ public class IndexMetaDataTests extends ESTestCase { assertEquals(metaData.getRoutingFactor(), deserialized.getRoutingFactor()); assertEquals(metaData.primaryTerm(0), deserialized.primaryTerm(0)); assertEquals(metaData.getRolloverInfos(), deserialized.getRolloverInfos()); + assertEquals(deserialized.getCustomData(), expectedCustom); + assertEquals(metaData.getCustomData(), deserialized.getCustomData()); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java index 6d489f5feb3..5fc07642354 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java @@ -18,12 +18,9 @@ */ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -35,62 +32,15 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; -import java.io.IOException; import java.util.Arrays; -import java.util.Base64; import java.util.Collections; import static java.util.Collections.singletonMap; -import static org.elasticsearch.cluster.metadata.AliasMetaData.newAliasMetaDataBuilder; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.contains; public class IndexTemplateMetaDataTests extends ESTestCase { - // bwc for #21009 - public void testIndexTemplateMetaData510() throws IOException { - IndexTemplateMetaData metaData = IndexTemplateMetaData.builder("foo") - .patterns(Collections.singletonList("bar")) - .order(1) - .settings(Settings.builder() - .put("setting1", "value1") - .put("setting2", "value2")) - .putAlias(newAliasMetaDataBuilder("alias-bar1")).build(); - - IndexTemplateMetaData multiMetaData = IndexTemplateMetaData.builder("foo") - .patterns(Arrays.asList("bar", "foo")) - .order(1) - .settings(Settings.builder() - .put("setting1", "value1") - .put("setting2", "value2")) - .putAlias(newAliasMetaDataBuilder("alias-bar1")).build(); - - // These bytes were retrieved by Base64 encoding the result of the above with 5_0_0 code - String templateBytes = "A2ZvbwAAAAEDYmFyAghzZXR0aW5nMQEGdmFsdWUxCHNldHRpbmcyAQZ2YWx1ZTIAAQphbGlhcy1iYXIxAAAAAAA="; - BytesArray bytes = new BytesArray(Base64.getDecoder().decode(templateBytes)); - - try (StreamInput in = bytes.streamInput()) { - in.setVersion(Version.V_5_0_0); - IndexTemplateMetaData readMetaData = IndexTemplateMetaData.readFrom(in); - assertEquals(0, in.available()); - assertEquals(metaData.getName(), readMetaData.getName()); - assertEquals(metaData.getPatterns(), readMetaData.getPatterns()); - assertTrue(metaData.aliases().containsKey("alias-bar1")); - assertEquals(1, metaData.aliases().size()); - - BytesStreamOutput output = new BytesStreamOutput(); - output.setVersion(Version.V_5_0_0); - readMetaData.writeTo(output); - assertEquals(bytes.toBytesRef(), output.bytes().toBytesRef()); - - // test that multi templates are reverse-compatible. - // for the bwc case, if multiple patterns, use only the first pattern seen. - output.reset(); - multiMetaData.writeTo(output); - assertEquals(bytes.toBytesRef(), output.bytes().toBytesRef()); - } - } - public void testIndexTemplateMetaDataXContentRoundTrip() throws Exception { ToXContent.Params params = new ToXContent.MapParams(singletonMap("reduce_mappings", "true")); @@ -128,13 +78,13 @@ public class IndexTemplateMetaDataTests extends ESTestCase { public void testValidateInvalidIndexPatterns() throws Exception { final IllegalArgumentException emptyPatternError = expectThrows(IllegalArgumentException.class, () -> { new IndexTemplateMetaData(randomRealisticUnicodeOfLengthBetween(5, 10), randomInt(), randomInt(), - Collections.emptyList(), Settings.EMPTY, ImmutableOpenMap.of(), ImmutableOpenMap.of(), ImmutableOpenMap.of()); + Collections.emptyList(), Settings.EMPTY, ImmutableOpenMap.of(), ImmutableOpenMap.of()); }); assertThat(emptyPatternError.getMessage(), equalTo("Index patterns must not be null or empty; got []")); final IllegalArgumentException nullPatternError = expectThrows(IllegalArgumentException.class, () -> { new IndexTemplateMetaData(randomRealisticUnicodeOfLengthBetween(5, 10), randomInt(), randomInt(), - null, Settings.EMPTY, ImmutableOpenMap.of(), ImmutableOpenMap.of(), ImmutableOpenMap.of()); + null, Settings.EMPTY, ImmutableOpenMap.of(), ImmutableOpenMap.of()); }); assertThat(nullPatternError.getMessage(), equalTo("Index patterns must not be null or empty; got null")); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index 24f5a696561..abb34f80eac 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -261,6 +261,7 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase { .put("index.version.upgraded", upgraded) .put("index.similarity.default.type", "BM25") .put("index.analysis.analyzer.default.tokenizer", "keyword") + .put("index.soft_deletes.enabled", "true") .build(); runPrepareResizeIndexSettingsTest( indexSettings, @@ -277,6 +278,7 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase { assertThat(settings.get("index.allocation.max_retries"), equalTo("1")); assertThat(settings.getAsVersion("index.version.created", null), equalTo(version)); assertThat(settings.getAsVersion("index.version.upgraded", null), equalTo(upgraded)); + assertThat(settings.get("index.soft_deletes.enabled"), equalTo("true")); }); } @@ -337,6 +339,15 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase { } + public void testDoNotOverrideSoftDeletesSettingOnResize() { + runPrepareResizeIndexSettingsTest( + Settings.builder().put("index.soft_deletes.enabled", "false").build(), + Settings.builder().put("index.soft_deletes.enabled", "true").build(), + Collections.emptyList(), + randomBoolean(), + settings -> assertThat(settings.get("index.soft_deletes.enabled"), equalTo("true"))); + } + private void runPrepareResizeIndexSettingsTest( final Settings sourceSettings, final Settings requestSettings, diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java index e329e70134c..c1e341fd5bc 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java @@ -147,7 +147,7 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase { .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_CREATION_DATE, 1) .put(IndexMetaData.SETTING_INDEX_UUID, "BOOM") - .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_5_0_0_beta1) + .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_6_0_0_alpha1) .put(indexSettings) .build(); return IndexMetaData.builder(name).settings(build).build(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java index 1e46c2c4286..865059c3379 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java @@ -16,12 +16,15 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.cluster.metadata; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -31,6 +34,7 @@ import java.util.Collection; import java.util.Collections; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; public class MetaDataMappingServiceTests extends ESSingleNodeTestCase { @@ -47,8 +51,18 @@ public class MetaDataMappingServiceTests extends ESSingleNodeTestCase { final ClusterService clusterService = getInstanceFromNode(ClusterService.class); // TODO - it will be nice to get a random mapping generator final PutMappingClusterStateUpdateRequest request = new PutMappingClusterStateUpdateRequest().type("type"); - request.source("{ \"properties\" { \"field\": { \"type\": \"text\" }}}"); - mappingService.putMappingExecutor.execute(clusterService.state(), Collections.singletonList(request)); + request.indices(new Index[] {indexService.index()}); + request.source("{ \"properties\": { \"field\": { \"type\": \"text\" }}}"); + final ClusterStateTaskExecutor.ClusterTasksResult result = + mappingService.putMappingExecutor.execute(clusterService.state(), Collections.singletonList(request)); + // the task completed successfully + assertThat(result.executionResults.size(), equalTo(1)); + assertTrue(result.executionResults.values().iterator().next().isSuccess()); + // the task really was a mapping update + assertThat( + indexService.mapperService().documentMapper("type").mappingSource(), + not(equalTo(result.resultingState.metaData().index("test").mapping("type").source()))); + // since we never committed the cluster state update, the in-memory state is unchanged assertThat(indexService.mapperService().documentMapper("type").mappingSource(), equalTo(currentMapping)); } @@ -69,4 +83,35 @@ public class MetaDataMappingServiceTests extends ESSingleNodeTestCase { assertSame(result, result2); } + + public void testMappingVersion() throws Exception { + final IndexService indexService = createIndex("test", client().admin().indices().prepareCreate("test").addMapping("type")); + final long previousVersion = indexService.getMetaData().getMappingVersion(); + final MetaDataMappingService mappingService = getInstanceFromNode(MetaDataMappingService.class); + final ClusterService clusterService = getInstanceFromNode(ClusterService.class); + final PutMappingClusterStateUpdateRequest request = new PutMappingClusterStateUpdateRequest().type("type"); + request.indices(new Index[] {indexService.index()}); + request.source("{ \"properties\": { \"field\": { \"type\": \"text\" }}}"); + final ClusterStateTaskExecutor.ClusterTasksResult result = + mappingService.putMappingExecutor.execute(clusterService.state(), Collections.singletonList(request)); + assertThat(result.executionResults.size(), equalTo(1)); + assertTrue(result.executionResults.values().iterator().next().isSuccess()); + assertThat(result.resultingState.metaData().index("test").getMappingVersion(), equalTo(1 + previousVersion)); + } + + public void testMappingVersionUnchanged() throws Exception { + final IndexService indexService = createIndex("test", client().admin().indices().prepareCreate("test").addMapping("type")); + final long previousVersion = indexService.getMetaData().getMappingVersion(); + final MetaDataMappingService mappingService = getInstanceFromNode(MetaDataMappingService.class); + final ClusterService clusterService = getInstanceFromNode(ClusterService.class); + final PutMappingClusterStateUpdateRequest request = new PutMappingClusterStateUpdateRequest().type("type"); + request.indices(new Index[] {indexService.index()}); + request.source("{ \"properties\": {}}"); + final ClusterStateTaskExecutor.ClusterTasksResult result = + mappingService.putMappingExecutor.execute(clusterService.state(), Collections.singletonList(request)); + assertThat(result.executionResults.size(), equalTo(1)); + assertTrue(result.executionResults.values().iterator().next().isSuccess()); + assertThat(result.resultingState.metaData().index("test").getMappingVersion(), equalTo(previousVersion)); + } + } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java index 9d82e9e1cdc..da50e99705d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java @@ -109,6 +109,38 @@ public class MetaDataTests extends ESTestCase { } } + public void testFindAliasWithExclusion() { + MetaData metaData = MetaData.builder().put( + IndexMetaData.builder("index") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0) + .putAlias(AliasMetaData.builder("alias1").build()) + .putAlias(AliasMetaData.builder("alias2").build()) + ).build(); + List aliases = + metaData.findAliases(new GetAliasesRequest().aliases("*", "-alias1"), new String[] {"index"}).get("index"); + assertThat(aliases.size(), equalTo(1)); + assertThat(aliases.get(0).alias(), equalTo("alias2")); + } + + public void testFindAliasWithExclusionAndOverride() { + MetaData metaData = MetaData.builder().put( + IndexMetaData.builder("index") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0) + .putAlias(AliasMetaData.builder("aa").build()) + .putAlias(AliasMetaData.builder("ab").build()) + .putAlias(AliasMetaData.builder("bb").build()) + ).build(); + List aliases = + metaData.findAliases(new GetAliasesRequest().aliases("a*", "-*b", "b*"), new String[] {"index"}).get("index"); + assertThat(aliases.size(), equalTo(2)); + assertThat(aliases.get(0).alias(), equalTo("aa")); + assertThat(aliases.get(1).alias(), equalTo("bb")); + } + public void testIndexAndAliasWithSameName() { IndexMetaData.Builder builder = IndexMetaData.builder("index") .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java index c1861572d83..86dbeabd1d7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.cluster.routing; -import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource; +import org.elasticsearch.cluster.routing.RecoverySource.ExistingStoreRecoverySource; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentFactory; @@ -37,7 +37,7 @@ import static org.hamcrest.Matchers.nullValue; public class AllocationIdTests extends ESTestCase { public void testShardToStarted() { logger.info("-- create unassigned shard"); - ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, StoreRecoverySource.EXISTING_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); + ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); assertThat(shard.allocationId(), nullValue()); logger.info("-- initialize the shard"); @@ -57,7 +57,7 @@ public class AllocationIdTests extends ESTestCase { public void testSuccessfulRelocation() { logger.info("-- build started shard"); - ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, StoreRecoverySource.EXISTING_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); + ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); shard = shard.initialize("node1", null, -1); shard = shard.moveToStarted(); @@ -80,7 +80,7 @@ public class AllocationIdTests extends ESTestCase { public void testCancelRelocation() { logger.info("-- build started shard"); - ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, StoreRecoverySource.EXISTING_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); + ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); shard = shard.initialize("node1", null, -1); shard = shard.moveToStarted(); @@ -100,7 +100,7 @@ public class AllocationIdTests extends ESTestCase { public void testMoveToUnassigned() { logger.info("-- build started shard"); - ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, StoreRecoverySource.EXISTING_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); + ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); shard = shard.initialize("node1", null, -1); shard = shard.moveToStarted(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/GroupShardsIteratorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/GroupShardsIteratorTests.java index f2571fce339..66eabd4cbd9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/GroupShardsIteratorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/GroupShardsIteratorTests.java @@ -77,7 +77,7 @@ public class GroupShardsIteratorTests extends ESTestCase { public ShardRouting newRouting(Index index, int id, boolean started) { ShardRouting shardRouting = ShardRouting.newUnassigned(new ShardId(index, id), true, - RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + RecoverySource.EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); shardRouting = ShardRoutingHelper.initialize(shardRouting, "some node"); if (started) { shardRouting = ShardRoutingHelper.moveToStarted(shardRouting); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index 90173455c3b..9b2db5b34b1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.gateway.GatewayAllocator; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; @@ -55,6 +56,7 @@ import java.util.List; import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; @@ -64,6 +66,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.isIn; import static org.hamcrest.Matchers.not; @@ -83,18 +86,9 @@ public class PrimaryAllocationIT extends ESIntegTestCase { .put(TestZenDiscovery.USE_MOCK_PINGS.getKey(), false).build(); } - private void createStaleReplicaScenario() throws Exception { - logger.info("--> starting 3 nodes, 1 master, 2 data"); - String master = internalCluster().startMasterOnlyNode(Settings.EMPTY); - internalCluster().startDataOnlyNodes(2); - - assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder() - .put("index.number_of_shards", 1).put("index.number_of_replicas", 1)).get()); - ensureGreen(); - logger.info("--> indexing..."); + private void createStaleReplicaScenario(String master) throws Exception { client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); refresh(); - ClusterState state = client().admin().cluster().prepareState().all().get().getState(); List shards = state.routingTable().allShards("test"); assertThat(shards.size(), equalTo(2)); @@ -140,7 +134,13 @@ public class PrimaryAllocationIT extends ESIntegTestCase { } public void testDoNotAllowStaleReplicasToBePromotedToPrimary() throws Exception { - createStaleReplicaScenario(); + logger.info("--> starting 3 nodes, 1 master, 2 data"); + String master = internalCluster().startMasterOnlyNode(Settings.EMPTY); + internalCluster().startDataOnlyNodes(2); + assertAcked(client().admin().indices().prepareCreate("test") + .setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 1)).get()); + ensureGreen(); + createStaleReplicaScenario(master); logger.info("--> starting node that reuses data folder with the up-to-date primary shard"); internalCluster().startDataOnlyNode(Settings.EMPTY); @@ -176,9 +176,17 @@ public class PrimaryAllocationIT extends ESIntegTestCase { } public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { - boolean useStaleReplica = randomBoolean(); // if true, use stale replica, otherwise a completely empty copy - createStaleReplicaScenario(); + logger.info("--> starting 3 nodes, 1 master, 2 data"); + String master = internalCluster().startMasterOnlyNode(Settings.EMPTY); + internalCluster().startDataOnlyNodes(2); + assertAcked(client().admin().indices().prepareCreate("test") + .setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 1)).get()); + ensureGreen(); + Set historyUUIDs = Arrays.stream(client().admin().indices().prepareStats("test").clear().get().getShards()) + .map(shard -> shard.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY)).collect(Collectors.toSet()); + createStaleReplicaScenario(master); + boolean useStaleReplica = randomBoolean(); // if true, use stale replica, otherwise a completely empty copy logger.info("--> explicitly promote old primary shard"); final String idxName = "test"; ImmutableOpenIntMap> storeStatuses = client().admin().indices().prepareShardStores(idxName).get().getStoreStatuses().get(idxName); @@ -213,6 +221,11 @@ public class PrimaryAllocationIT extends ESIntegTestCase { ClusterState state = client().admin().cluster().prepareState().get().getState(); assertEquals(Collections.singleton(state.routingTable().index(idxName).shard(0).primary.allocationId().getId()), state.metaData().index(idxName).inSyncAllocationIds(0)); + + Set newHistoryUUIds = Arrays.stream(client().admin().indices().prepareStats("test").clear().get().getShards()) + .map(shard -> shard.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY)).collect(Collectors.toSet()); + assertThat(newHistoryUUIds, everyItem(not(isIn(historyUUIDs)))); + assertThat(newHistoryUUIds, hasSize(1)); } public void testForcePrimaryShardIfAllocationDecidersSayNoAfterIndexCreation() throws ExecutionException, InterruptedException { @@ -392,6 +405,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase { assertThat(shard.getLocalCheckpoint(), equalTo(numDocs + moreDocs)); } }, 30, TimeUnit.SECONDS); + internalCluster().assertConsistentHistoryBetweenTranslogAndLuceneIndex(); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java index 8038d9b5e18..d4645208071 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -228,7 +228,7 @@ public class FailedNodeRoutingTests extends ESAllocationTestCase { } final String id = String.format(Locale.ROOT, "node_%03d", nodeIdGenerator.incrementAndGet()); return new DiscoveryNode(id, id, buildNewFakeTransportAddress(), Collections.emptyMap(), roles, - VersionUtils.randomVersionBetween(random(), Version.V_5_6_0, null)); + VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_alpha1, null)); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java index 1fa1ff3a154..787789d410f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -576,7 +576,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { // add a single node clusterState = ClusterState.builder(clusterState).nodes( DiscoveryNodes.builder() - .add(newNode("node1-5.x", Version.V_5_6_0))) + .add(newNode("node1-5.x", Version.fromId(5060099)))) .build(); clusterState = ClusterState.builder(clusterState).routingTable(allocation.reroute(clusterState, "reroute").routingTable()).build(); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); @@ -590,7 +590,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { // add another 5.6 node clusterState = ClusterState.builder(clusterState).nodes( DiscoveryNodes.builder(clusterState.nodes()) - .add(newNode("node2-5.x", Version.V_5_6_0))) + .add(newNode("node2-5.x", Version.fromId(5060099)))) .build(); // start the shards, should have 1 primary and 1 replica available diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java index 79473759f8f..86e8887688f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.routing.allocation; -import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -27,48 +26,170 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.node.DiscoveryNodes.Builder; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.hamcrest.Matchers; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; +import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING; +import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING; +import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING; +import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; +import static org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING; import static org.hamcrest.Matchers.equalTo; public class FilterRoutingTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(FilterRoutingTests.class); - public void testClusterFilters() { - AllocationService strategy = createAllocationService(Settings.builder() - .put("cluster.routing.allocation.include.tag1", "value1,value2") - .put("cluster.routing.allocation.exclude.tag1", "value3,value4") - .build()); + public void testClusterIncludeFiltersSingleAttribute() { + testClusterFilters(Settings.builder().put(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "tag1", "value1,value2"), + DiscoveryNodes.builder() + .add(newNode("node1", attrMap("tag1", "value1"))) + .add(newNode("node2", attrMap("tag1", "value2"))) + .add(newNode("node3", attrMap("tag1", "value3"))) + .add(newNode("node4", attrMap("tag1", "value4")))); + } + + public void testClusterIncludeFiltersMultipleAttributes() { + testClusterFilters(Settings.builder() + .put(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "tag1", "value1") + .put(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "tag2", "value2"), + DiscoveryNodes.builder() + .add(newNode("node1", attrMap("tag1", "value1"))) + .add(newNode("node2", attrMap("tag2", "value2"))) + .add(newNode("node3", attrMap("tag1", "value3"))) + .add(newNode("node4", attrMap("tag2", "value4")))); + } + + public void testClusterIncludeFiltersOptionalAttribute() { + testClusterFilters(Settings.builder().put(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "tag1", "value1,value2"), + DiscoveryNodes.builder() + .add(newNode("node1", attrMap("tag1", "value1"))) + .add(newNode("node2", attrMap("tag1", "value2"))) + .add(newNode("node3", attrMap())) + .add(newNode("node4", attrMap()))); + } + + public void testClusterIncludeFiltersWildcards() { + testClusterFilters(Settings.builder() + .put(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "tag1", "*incl*") + .put(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "tag2", "*incl*"), + DiscoveryNodes.builder() + .add(newNode("node1", attrMap("tag1", "do_include_this"))) + .add(newNode("node2", attrMap("tag2", "also_include_this"))) + .add(newNode("node3", attrMap("tag1", "exclude_this"))) + .add(newNode("node4", attrMap("tag2", "also_exclude_this")))); + } + + public void testClusterExcludeFiltersSingleAttribute() { + testClusterFilters(Settings.builder().put(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "tag1", "value3,value4"), + DiscoveryNodes.builder() + .add(newNode("node1", attrMap("tag1", "value1"))) + .add(newNode("node2", attrMap("tag1", "value2"))) + .add(newNode("node3", attrMap("tag1", "value3"))) + .add(newNode("node4", attrMap("tag1", "value4")))); + } + + public void testClusterExcludeFiltersMultipleAttributes() { + testClusterFilters(Settings.builder() + .put(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "tag1", "value3") + .put(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "tag2", "value4"), + DiscoveryNodes.builder() + .add(newNode("node1", attrMap("tag1", "value1"))) + .add(newNode("node2", attrMap("tag2", "value2"))) + .add(newNode("node3", attrMap("tag1", "value3"))) + .add(newNode("node4", attrMap("tag2", "value4")))); + } + + public void testClusterExcludeFiltersOptionalAttribute() { + testClusterFilters(Settings.builder().put(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "tag1", "value3,value4"), + DiscoveryNodes.builder() + .add(newNode("node1", attrMap())) + .add(newNode("node2", attrMap())) + .add(newNode("node3", attrMap("tag1", "value3"))) + .add(newNode("node4", attrMap("tag1", "value4")))); + } + + public void testClusterExcludeFiltersWildcards() { + testClusterFilters(Settings.builder() + .put(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "tag1", "*excl*") + .put(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "tag2", "*excl*"), + DiscoveryNodes.builder() + .add(newNode("node1", attrMap("tag1", "do_include_this"))) + .add(newNode("node2", attrMap("tag2", "also_include_this"))) + .add(newNode("node3", attrMap("tag1", "exclude_this"))) + .add(newNode("node4", attrMap("tag2", "also_exclude_this")))); + } + + public void testClusterIncludeAndExcludeFilters() { + testClusterFilters(Settings.builder() + .put(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "tag1", "*incl*") + .put(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "tag2", "*excl*"), + DiscoveryNodes.builder() + .add(newNode("node1", attrMap("tag1", "do_include_this"))) + .add(newNode("node2", attrMap("tag1", "also_include_this", "tag2", "ok_by_tag2"))) + .add(newNode("node3", attrMap("tag1", "included_by_tag1", "tag2", "excluded_by_tag2"))) + .add(newNode("node4", attrMap("tag1", "excluded_by_tag1", "tag2", "included_by_tag2")))); + } + + public void testClusterRequireFilters() { + testClusterFilters(Settings.builder() + .put(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "tag1", "req1") + .put(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "tag2", "req2"), + DiscoveryNodes.builder() + .add(newNode("node1", attrMap("tag1", "req1", "tag2", "req2"))) + .add(newNode("node2", attrMap("tag1", "req1", "tag2", "req2"))) + .add(newNode("node3", attrMap("tag1", "req1"))) + .add(newNode("node4", attrMap("tag1", "other", "tag2", "req2")))); + } + + private static Map attrMap(String... keysValues) { + if (keysValues.length == 0) { + return emptyMap(); + } + if (keysValues.length == 2) { + return singletonMap(keysValues[0], keysValues[1]); + } + Map result = new HashMap<>(); + for (int i = 0; i < keysValues.length; i += 2) { + result.put(keysValues[i], keysValues[i + 1]); + } + return result; + } + + /** + * A test that creates a 2p1r index and which expects the given allocation service's settings only to allocate the shards of this index + * to `node1` and `node2`. + */ + private void testClusterFilters(Settings.Builder allocationServiceSettings, DiscoveryNodes.Builder nodes) { + final AllocationService strategy = createAllocationService(allocationServiceSettings.build()); logger.info("Building initial routing table"); - MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(1)) - .build(); + final MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(1)) + .build(); - RoutingTable initialRoutingTable = RoutingTable.builder() - .addAsNew(metaData.index("test")) - .build(); + final RoutingTable initialRoutingTable = RoutingTable.builder() + .addAsNew(metaData.index("test")) + .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(metaData).routingTable(initialRoutingTable).nodes(nodes).build(); - logger.info("--> adding four nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .add(newNode("node1", singletonMap("tag1", "value1"))) - .add(newNode("node2", singletonMap("tag1", "value2"))) - .add(newNode("node3", singletonMap("tag1", "value3"))) - .add(newNode("node4", singletonMap("tag1", "value4"))) - ).build(); + logger.info("--> rerouting"); clusterState = strategy.reroute(clusterState, "reroute"); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2)); @@ -79,41 +200,99 @@ public class FilterRoutingTests extends ESAllocationTestCase { clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); logger.info("--> make sure shards are only allocated on tag1 with value1 and value2"); - List startedShards = clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED); + final List startedShards = clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED); assertThat(startedShards.size(), equalTo(4)); for (ShardRouting startedShard : startedShards) { assertThat(startedShard.currentNodeId(), Matchers.anyOf(equalTo("node1"), equalTo("node2"))); } } - public void testIndexFilters() { + public void testIndexIncludeFilters() { + testIndexFilters( + Settings.builder().put(INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "tag1", "value1,value2"), + Settings.builder().put(INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "tag1", "value1,value4"), + DiscoveryNodes.builder() + .add(newNode("node1", attrMap("tag1", "value1"))) + .add(newNode("node2", attrMap("tag1", "value2"))) + .add(newNode("node3", attrMap("tag1", "value3"))) + .add(newNode("node4", attrMap("tag1", "value4"))) + .add(newNode("node5", attrMap())) + ); + } + + public void testIndexExcludeFilters() { + testIndexFilters( + Settings.builder().put(INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "tag1", "value3,value4"), + Settings.builder().put(INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "tag1", "value2,value3"), + DiscoveryNodes.builder() + .add(newNode("node1", attrMap())) + .add(newNode("node2", attrMap("tag1", "value2"))) + .add(newNode("node3", attrMap("tag1", "value3"))) + .add(newNode("node4", attrMap("tag1", "value4")))); + } + + public void testIndexIncludeThenExcludeFilters() { + testIndexFilters( + Settings.builder().put(INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "tag1", "value1,value2"), + Settings.builder().put(INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "tag1", "value2,value3") + .putNull(INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "tag1"), + DiscoveryNodes.builder() + .add(newNode("node1", attrMap("tag1", "value1"))) + .add(newNode("node2", attrMap("tag1", "value2"))) + .add(newNode("node3", attrMap("tag1", "value3"))) + .add(newNode("node4", attrMap()))); + } + + public void testIndexExcludeThenIncludeFilters() { + testIndexFilters( + Settings.builder().put(INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "tag1", "value3,value4"), + Settings.builder().put(INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "tag1", "value1,value4") + .putNull(INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "tag1"), + DiscoveryNodes.builder() + .add(newNode("node1", attrMap("tag1", "value1"))) + .add(newNode("node2", attrMap())) + .add(newNode("node3", attrMap("tag1", "value3"))) + .add(newNode("node4", attrMap("tag1", "value4")))); + } + + public void testIndexRequireFilters() { + testIndexFilters( + Settings.builder() + .put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "tag1", "value1") + .put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "tag2", "value2"), + Settings.builder() + .putNull(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "tag2") + .put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "tag3", "value3"), + DiscoveryNodes.builder() + .add(newNode("node1", attrMap("tag1", "value1", "tag2", "value2", "tag3", "value3"))) + .add(newNode("node2", attrMap("tag1", "value1", "tag2", "value2", "tag3", "other"))) + .add(newNode("node3", attrMap("tag1", "other", "tag2", "value2", "tag3", "other"))) + .add(newNode("node4", attrMap("tag1", "value1", "tag2", "other", "tag3", "value3"))) + .add(newNode("node5", attrMap("tag2", "value2", "tag3", "value3"))) + .add(newNode("node6", attrMap()))); + } + + /** + * A test that creates a 2p1r index and expects the given index allocation settings only to allocate the shards to `node1` and `node2`; + * on updating the index allocation settings the shards should be relocated to nodes `node1` and `node4`. + */ + private void testIndexFilters(Settings.Builder initialIndexSettings, Settings.Builder updatedIndexSettings, Builder nodesBuilder) { AllocationService strategy = createAllocationService(Settings.builder() - .build()); + .build()); logger.info("Building initial routing table"); - MetaData initialMetaData = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT) - .put("index.number_of_shards", 2) - .put("index.number_of_replicas", 1) - .put("index.routing.allocation.include.tag1", "value1,value2") - .put("index.routing.allocation.exclude.tag1", "value3,value4") - .build())) - .build(); + final MetaData initialMetaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT) + .put("index.number_of_shards", 2).put("index.number_of_replicas", 1).put(initialIndexSettings.build()))).build(); - RoutingTable initialRoutingTable = RoutingTable.builder() - .addAsNew(initialMetaData.index("test")) - .build(); + final RoutingTable initialRoutingTable = RoutingTable.builder() + .addAsNew(initialMetaData.index("test")) + .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(initialMetaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(initialMetaData).routingTable(initialRoutingTable).nodes(nodesBuilder).build(); - logger.info("--> adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .add(newNode("node1", singletonMap("tag1", "value1"))) - .add(newNode("node2", singletonMap("tag1", "value2"))) - .add(newNode("node3", singletonMap("tag1", "value3"))) - .add(newNode("node4", singletonMap("tag1", "value4"))) - ).build(); + logger.info("--> rerouting"); clusterState = strategy.reroute(clusterState, "reroute"); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2)); @@ -132,13 +311,11 @@ public class FilterRoutingTests extends ESAllocationTestCase { logger.info("--> switch between value2 and value4, shards should be relocating"); - IndexMetaData existingMetaData = clusterState.metaData().index("test"); - MetaData updatedMetaData = MetaData.builder() - .put(IndexMetaData.builder(existingMetaData).settings(Settings.builder().put(existingMetaData.getSettings()) - .put("index.routing.allocation.include.tag1", "value1,value4") - .put("index.routing.allocation.exclude.tag1", "value2,value3") - .build())) - .build(); + final IndexMetaData existingMetaData = clusterState.metaData().index("test"); + final MetaData updatedMetaData + = MetaData.builder().put(IndexMetaData.builder(existingMetaData).settings(Settings.builder() + .put(existingMetaData.getSettings()).put(updatedIndexSettings.build()).build())).build(); + clusterState = ClusterState.builder(clusterState).metaData(updatedMetaData).build(); clusterState = strategy.reroute(clusterState, "reroute"); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2)); @@ -160,16 +337,17 @@ public class FilterRoutingTests extends ESAllocationTestCase { logger.info("Building initial routing table"); MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0)) - .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0)) - .build(); + .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0)) + .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0)) + .build(); RoutingTable initialRoutingTable = RoutingTable.builder() - .addAsNew(metaData.index("test1")) - .addAsNew(metaData.index("test2")) - .build(); + .addAsNew(metaData.index("test1")) + .addAsNew(metaData.index("test2")) + .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("--> adding two nodes and performing rerouting"); DiscoveryNode node1 = newNode("node1", singletonMap("tag1", "value1")); @@ -187,9 +365,9 @@ public class FilterRoutingTests extends ESAllocationTestCase { logger.info("--> disable allocation for node1 and reroute"); strategy = createAllocationService(Settings.builder() - .put("cluster.routing.allocation.node_concurrent_recoveries", "1") - .put("cluster.routing.allocation.exclude.tag1", "value1") - .build()); + .put(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), "1") + .put(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "tag1", "value1") + .build()); logger.info("--> move shards from node1 to node2"); clusterState = strategy.reroute(clusterState, "reroute"); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java index 2022ecb945b..eeec65f0e2e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.routing.allocation; import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.shrink.ResizeAction; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -39,7 +38,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.ResizeAllocationDeci import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.gateway.TestGatewayAllocator; import java.util.Arrays; @@ -136,8 +134,8 @@ public class ResizeAllocationDeciderTests extends ESAllocationTestCase { ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(Settings.EMPTY); RoutingAllocation routingAllocation = new RoutingAllocation(null, null, clusterState, null, 0); - ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(idx, 0), null, true, RecoverySource - .LocalShardsRecoverySource.INSTANCE, ShardRoutingState.UNASSIGNED); + ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(idx, 0), null, true, ShardRoutingState.UNASSIGNED, + RecoverySource.LocalShardsRecoverySource.INSTANCE); assertEquals(Decision.ALWAYS, resizeAllocationDecider.canAllocate(shardRouting, routingAllocation)); assertEquals(Decision.ALWAYS, resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node1"), routingAllocation)); @@ -166,8 +164,8 @@ public class ResizeAllocationDeciderTests extends ESAllocationTestCase { RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, 0); int shardId = randomIntBetween(0, 3); int sourceShardId = IndexMetaData.selectSplitShard(shardId, clusterState.metaData().index("source"), 4).id(); - ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(idx, shardId), null, true, RecoverySource - .LocalShardsRecoverySource.INSTANCE, ShardRoutingState.UNASSIGNED); + ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(idx, shardId), null, true, ShardRoutingState.UNASSIGNED, + RecoverySource.LocalShardsRecoverySource.INSTANCE); assertEquals(Decision.NO, resizeAllocationDecider.canAllocate(shardRouting, routingAllocation)); assertEquals(Decision.NO, resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node1"), routingAllocation)); @@ -206,8 +204,8 @@ public class ResizeAllocationDeciderTests extends ESAllocationTestCase { RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, 0); int shardId = randomIntBetween(0, 3); int sourceShardId = IndexMetaData.selectSplitShard(shardId, clusterState.metaData().index("source"), 4).id(); - ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(idx, shardId), null, true, RecoverySource - .LocalShardsRecoverySource.INSTANCE, ShardRoutingState.UNASSIGNED); + ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(idx, shardId), null, true, ShardRoutingState.UNASSIGNED, + RecoverySource.LocalShardsRecoverySource.INSTANCE); assertEquals(Decision.YES, resizeAllocationDecider.canAllocate(shardRouting, routingAllocation)); String allowedNode = clusterState.getRoutingTable().index("source").shard(sourceShardId).primaryShard().currentNodeId(); @@ -243,46 +241,4 @@ public class ResizeAllocationDeciderTests extends ESAllocationTestCase { routingAllocation).getExplanation()); } } - - public void testAllocateOnOldNode() { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, - VersionUtils.getPreviousVersion(ResizeAction.COMPATIBILITY_VERSION)); - ClusterState clusterState = createInitialClusterState(true, version); - MetaData.Builder metaBuilder = MetaData.builder(clusterState.metaData()); - metaBuilder.put(IndexMetaData.builder("target").settings(settings(Version.CURRENT) - .put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME.getKey(), "source") - .put(IndexMetaData.INDEX_RESIZE_SOURCE_UUID_KEY, IndexMetaData.INDEX_UUID_NA_VALUE)) - .numberOfShards(4).numberOfReplicas(0)); - MetaData metaData = metaBuilder.build(); - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(clusterState.routingTable()); - routingTableBuilder.addAsNew(metaData.index("target")); - - clusterState = ClusterState.builder(clusterState) - .routingTable(routingTableBuilder.build()) - .metaData(metaData).build(); - Index idx = clusterState.metaData().index("target").getIndex(); - - - ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(Settings.EMPTY); - RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, 0); - int shardId = randomIntBetween(0, 3); - int sourceShardId = IndexMetaData.selectSplitShard(shardId, clusterState.metaData().index("source"), 4).id(); - ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(idx, shardId), null, true, RecoverySource - .LocalShardsRecoverySource.INSTANCE, ShardRoutingState.UNASSIGNED); - assertEquals(Decision.YES, resizeAllocationDecider.canAllocate(shardRouting, routingAllocation)); - - assertEquals(Decision.NO, resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node1"), - routingAllocation)); - assertEquals(Decision.NO, resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node2"), - routingAllocation)); - - routingAllocation.debugDecision(true); - assertEquals("source primary is active", resizeAllocationDecider.canAllocate(shardRouting, routingAllocation).getExplanation()); - assertEquals("node [node1] is too old to split a shard", - resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node1"), - routingAllocation).getExplanation()); - assertEquals("node [node2] is too old to split a shard", - resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node2"), - routingAllocation).getExplanation()); - } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java index d32ebe62ec1..01586d9c495 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java @@ -399,7 +399,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { final boolean primary = randomBoolean(); final ShardRouting unassigned = ShardRouting.newUnassigned(new ShardId(index, shard), primary, primary ? - RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE : + RecoverySource.EmptyStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "test") ); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index 10fc358e4d4..ce53c14807c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -29,9 +29,9 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RecoverySource.EmptyStoreRecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.LocalShardsRecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource; -import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; @@ -69,7 +69,7 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { final Index index = metaData.index("test").getIndex(); - ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), true, StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), true, EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); DiscoveryNode node_0 = new DiscoveryNode("node_0", buildNewFakeTransportAddress(), Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT); DiscoveryNode node_1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Collections.emptyMap(), @@ -125,22 +125,22 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { .build(); final IndexMetaData indexMetaData = metaData.index("test"); - ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 0), true, StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 0), true, EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); test_0 = ShardRoutingHelper.initialize(test_0, node_0.getId()); test_0 = ShardRoutingHelper.moveToStarted(test_0); shardRoutingMap.put(test_0, "/node0/least"); - ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 1), true, StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 1), true, EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); test_1 = ShardRoutingHelper.initialize(test_1, node_1.getId()); test_1 = ShardRoutingHelper.moveToStarted(test_1); shardRoutingMap.put(test_1, "/node1/least"); - ShardRouting test_2 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 2), true, StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRouting test_2 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 2), true, EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); test_2 = ShardRoutingHelper.initialize(test_2, node_1.getId()); test_2 = ShardRoutingHelper.moveToStarted(test_2); shardRoutingMap.put(test_2, "/node1/most"); - ShardRouting test_3 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 3), true, StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRouting test_3 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 3), true, EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); test_3 = ShardRoutingHelper.initialize(test_3, node_1.getId()); test_3 = ShardRoutingHelper.moveToStarted(test_3); // Intentionally not in the shardRoutingMap. We want to test what happens when we don't know where it is. @@ -286,16 +286,19 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { metaBuilder.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT).put("index.uuid", "1234")) .numberOfShards(4).numberOfReplicas(0)); metaBuilder.put(IndexMetaData.builder("target").settings(settings(Version.CURRENT).put("index.uuid", "5678") - .put("index.shrink.source.name", "test").put("index.shrink.source.uuid", "1234")).numberOfShards(1).numberOfReplicas(0)); + .put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME_KEY, "test").put(IndexMetaData.INDEX_RESIZE_SOURCE_UUID_KEY, "1234")) + .numberOfShards(1) + .numberOfReplicas(0)); metaBuilder.put(IndexMetaData.builder("target2").settings(settings(Version.CURRENT).put("index.uuid", "9101112") - .put("index.shrink.source.name", "test").put("index.shrink.source.uuid", "1234")).numberOfShards(2).numberOfReplicas(0)); + .put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME_KEY, "test").put(IndexMetaData.INDEX_RESIZE_SOURCE_UUID_KEY, "1234")) + .numberOfShards(2).numberOfReplicas(0)); MetaData metaData = metaBuilder.build(); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); routingTableBuilder.addAsNew(metaData.index("test")); routingTableBuilder.addAsNew(metaData.index("target")); routingTableBuilder.addAsNew(metaData.index("target2")); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) - .metaData(metaData).routingTable(routingTableBuilder.build()).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTableBuilder.build()).build(); AllocationService allocationService = createAllocationService(); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))) @@ -330,7 +333,6 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { assertEquals(100L, DiskThresholdDecider.getExpectedShardSize(test_1, allocation, 0)); assertEquals(10L, DiskThresholdDecider.getExpectedShardSize(test_0, allocation, 0)); - ShardRouting target = ShardRouting.newUnassigned(new ShardId(new Index("target", "5678"), 0), true, LocalShardsRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); assertEquals(1110L, DiskThresholdDecider.getExpectedShardSize(target, allocation, 0)); @@ -350,12 +352,9 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { .build(); allocationService.reroute(clusterState, "foo"); - RoutingAllocation allocationWithMissingSourceIndex = new RoutingAllocation(null, clusterStateWithMissingSourceIndex.getRoutingNodes(), clusterStateWithMissingSourceIndex, info, 0); - assertEquals(42L, DiskThresholdDecider.getExpectedShardSize(target, allocationWithMissingSourceIndex, 42L)); assertEquals(42L, DiskThresholdDecider.getExpectedShardSize(target2, allocationWithMissingSourceIndex, 42L)); } - } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java index b1fa8346e2c..ba6fe5b9a5a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java @@ -42,8 +42,8 @@ import org.elasticsearch.test.gateway.TestGatewayAllocator; import java.util.Arrays; import java.util.Collections; -import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_SHRINK_SOURCE_NAME; -import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_SHRINK_SOURCE_UUID; +import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_RESIZE_SOURCE_NAME; +import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_RESIZE_SOURCE_UUID; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; @@ -151,8 +151,8 @@ public class FilterAllocationDeciderTests extends ESAllocationTestCase { .putInSyncAllocationIds(1, Collections.singleton("aid1")) .build(); metaData.put(sourceIndex, false); - indexSettings.put(INDEX_SHRINK_SOURCE_UUID.getKey(), sourceIndex.getIndexUUID()); - indexSettings.put(INDEX_SHRINK_SOURCE_NAME.getKey(), sourceIndex.getIndex().getName()); + indexSettings.put(INDEX_RESIZE_SOURCE_UUID.getKey(), sourceIndex.getIndexUUID()); + indexSettings.put(INDEX_RESIZE_SOURCE_NAME.getKey(), sourceIndex.getIndex().getName()); } else { sourceIndex = null; } diff --git a/server/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java b/server/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java index cdcaf4a1b9c..633e043ddd1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java @@ -22,6 +22,7 @@ package org.elasticsearch.cluster.settings; import org.apache.logging.log4j.Level; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.common.logging.ESLoggerFactory; @@ -285,7 +286,7 @@ public class ClusterSettingsIT extends ESIntegTestCase { .get(); fail("bogus value"); } catch (IllegalArgumentException ex) { - assertEquals(ex.getMessage(), "Failed to parse value [-1] for setting [discovery.zen.publish_timeout] must be >= 0s"); + assertEquals(ex.getMessage(), "failed to parse value [-1] for setting [discovery.zen.publish_timeout], must be >= [0ms]"); } assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1L)); @@ -380,4 +381,34 @@ public class ClusterSettingsIT extends ESIntegTestCase { } } + public void testUserMetadata() { + String key = "cluster.metadata." + randomAlphaOfLengthBetween(5, 20); + String value = randomRealisticUnicodeOfCodepointLengthBetween(5, 50); + String updatedValue = randomRealisticUnicodeOfCodepointLengthBetween(5, 50); + logger.info("Attempting to store [{}]: [{}], then update to [{}]", key, value, updatedValue); + + final Settings settings = Settings.builder().put(key, value).build(); + final Settings updatedSettings = Settings.builder().put(key, updatedValue).build(); + if (randomBoolean()) { + logger.info("Using persistent settings"); + + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings).execute().actionGet(); + ClusterStateResponse state = client().admin().cluster().prepareState().execute().actionGet(); + assertEquals(value, state.getState().getMetaData().persistentSettings().get(key)); + + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(updatedSettings).execute().actionGet(); + ClusterStateResponse updatedState = client().admin().cluster().prepareState().execute().actionGet(); + assertEquals(updatedValue, updatedState.getState().getMetaData().persistentSettings().get(key)); + } else { + logger.info("Using transient settings"); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).execute().actionGet(); + ClusterStateResponse state = client().admin().cluster().prepareState().execute().actionGet(); + assertEquals(value, state.getState().getMetaData().transientSettings().get(key)); + + client().admin().cluster().prepareUpdateSettings().setTransientSettings(updatedSettings).execute().actionGet(); + ClusterStateResponse updatedState = client().admin().cluster().prepareState().execute().actionGet(); + assertEquals(updatedValue, updatedState.getState().getMetaData().transientSettings().get(key)); + } + } + } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java index 87f98389231..b4a24cfc4fc 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java @@ -98,6 +98,11 @@ public class GeoHashTests extends ESTestCase { } } + public void testNorthPoleBoundingBox() { + Rectangle bbox = GeoHashUtils.bbox("zzbxfpgzupbx"); // Bounding box with maximum precision touching north pole + assertEquals(90.0, bbox.maxLat, 0.0000001); // Should be 90 degrees + } + public void testInvalidGeohashes() { IllegalArgumentException ex; diff --git a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java index 1551a315b26..cea83bfbccf 100644 --- a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java @@ -71,7 +71,15 @@ public class JavaJodaTimeDuellingTests extends ESTestCase { public void testDuellingFormatsValidParsing() { assertSameDate("1522332219", "epoch_second"); + assertSameDate("0", "epoch_second"); + assertSameDate("1", "epoch_second"); + assertSameDate("-1", "epoch_second"); + assertSameDate("-1522332219", "epoch_second"); assertSameDate("1522332219321", "epoch_millis"); + assertSameDate("0", "epoch_millis"); + assertSameDate("1", "epoch_millis"); + assertSameDate("-1", "epoch_millis"); + assertSameDate("-1522332219321", "epoch_millis"); assertSameDate("20181126", "basic_date"); assertSameDate("20181126T121212.123Z", "basic_date_time"); diff --git a/server/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java b/server/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java index 753aedea01e..b677247f266 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java @@ -33,18 +33,24 @@ import org.apache.lucene.index.NoDeletionPolicy; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.SoftDeletesRetentionMergePolicy; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MMapDirectory; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.Bits; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.io.StringReader; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; @@ -53,6 +59,8 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; +import static org.hamcrest.Matchers.equalTo; + public class LuceneTests extends ESTestCase { public void testWaitForIndex() throws Exception { final MockDirectoryWrapper dir = newMockDirectory(); @@ -198,10 +206,10 @@ public class LuceneTests extends ESTestCase { assertEquals(3, open.maxDoc()); IndexSearcher s = new IndexSearcher(open); - assertEquals(s.search(new TermQuery(new Term("id", "1")), 1).totalHits, 1); - assertEquals(s.search(new TermQuery(new Term("id", "2")), 1).totalHits, 1); - assertEquals(s.search(new TermQuery(new Term("id", "3")), 1).totalHits, 1); - assertEquals(s.search(new TermQuery(new Term("id", "4")), 1).totalHits, 0); + assertEquals(s.search(new TermQuery(new Term("id", "1")), 1).totalHits.value, 1); + assertEquals(s.search(new TermQuery(new Term("id", "2")), 1).totalHits.value, 1); + assertEquals(s.search(new TermQuery(new Term("id", "3")), 1).totalHits.value, 1); + assertEquals(s.search(new TermQuery(new Term("id", "4")), 1).totalHits.value, 0); for (String file : dir.listAll()) { assertFalse("unexpected file: " + file, file.equals("segments_3") || file.startsWith("_2")); @@ -374,7 +382,7 @@ public class LuceneTests extends ESTestCase { try (DirectoryReader reader = DirectoryReader.open(w)) { IndexSearcher searcher = newSearcher(reader); - Weight termWeight = new TermQuery(new Term("foo", "bar")).createWeight(searcher, false, 1f); + Weight termWeight = new TermQuery(new Term("foo", "bar")).createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, 1f); assertEquals(1, reader.leaves().size()); LeafReaderContext leafReaderContext = searcher.getIndexReader().leaves().get(0); Bits bits = Lucene.asSequentialAccessBits(leafReaderContext.reader().maxDoc(), termWeight.scorerSupplier(leafReaderContext)); @@ -406,4 +414,88 @@ public class LuceneTests extends ESTestCase { // add assume's here if needed for certain platforms, but we should know if it does not work. assertTrue("MMapDirectory does not support unmapping: " + MMapDirectory.UNMAP_NOT_SUPPORTED_REASON, MMapDirectory.UNMAP_SUPPORTED); } + + public void testWrapAllDocsLive() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig config = newIndexWriterConfig().setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) + .setMergePolicy(new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, MatchAllDocsQuery::new, newMergePolicy())); + IndexWriter writer = new IndexWriter(dir, config); + int numDocs = between(1, 10); + Set liveDocs = new HashSet<>(); + for (int i = 0; i < numDocs; i++) { + String id = Integer.toString(i); + Document doc = new Document(); + doc.add(new StringField("id", id, Store.YES)); + writer.addDocument(doc); + liveDocs.add(id); + } + for (int i = 0; i < numDocs; i++) { + if (randomBoolean()) { + String id = Integer.toString(i); + Document doc = new Document(); + doc.add(new StringField("id", "v2-" + id, Store.YES)); + if (randomBoolean()) { + doc.add(Lucene.newSoftDeletesField()); + } + writer.softUpdateDocument(new Term("id", id), doc, Lucene.newSoftDeletesField()); + liveDocs.add("v2-" + id); + } + } + try (DirectoryReader unwrapped = DirectoryReader.open(writer)) { + DirectoryReader reader = Lucene.wrapAllDocsLive(unwrapped); + assertThat(reader.numDocs(), equalTo(liveDocs.size())); + IndexSearcher searcher = new IndexSearcher(reader); + Set actualDocs = new HashSet<>(); + TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE); + for (ScoreDoc scoreDoc : topDocs.scoreDocs) { + actualDocs.add(reader.document(scoreDoc.doc).get("id")); + } + assertThat(actualDocs, equalTo(liveDocs)); + } + IOUtils.close(writer, dir); + } + + public void testWrapLiveDocsNotExposeAbortedDocuments() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig config = newIndexWriterConfig().setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) + .setMergePolicy(new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, MatchAllDocsQuery::new, newMergePolicy())); + IndexWriter writer = new IndexWriter(dir, config); + int numDocs = between(1, 10); + List liveDocs = new ArrayList<>(); + for (int i = 0; i < numDocs; i++) { + String id = Integer.toString(i); + Document doc = new Document(); + doc.add(new StringField("id", id, Store.YES)); + if (randomBoolean()) { + doc.add(Lucene.newSoftDeletesField()); + } + writer.addDocument(doc); + liveDocs.add(id); + } + int abortedDocs = between(1, 10); + for (int i = 0; i < abortedDocs; i++) { + try { + Document doc = new Document(); + doc.add(new StringField("id", "aborted-" + i, Store.YES)); + StringReader reader = new StringReader(""); + doc.add(new TextField("other", reader)); + reader.close(); // mark the indexing hit non-aborting error + writer.addDocument(doc); + fail("index should have failed"); + } catch (Exception ignored) { } + } + try (DirectoryReader unwrapped = DirectoryReader.open(writer)) { + DirectoryReader reader = Lucene.wrapAllDocsLive(unwrapped); + assertThat(reader.maxDoc(), equalTo(numDocs + abortedDocs)); + assertThat(reader.numDocs(), equalTo(liveDocs.size())); + IndexSearcher searcher = new IndexSearcher(reader); + List actualDocs = new ArrayList<>(); + TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE); + for (ScoreDoc scoreDoc : topDocs.scoreDocs) { + actualDocs.add(reader.document(scoreDoc.doc).get("id")); + } + assertThat(actualDocs, equalTo(liveDocs)); + } + IOUtils.close(writer, dir); + } } diff --git a/server/src/test/java/org/elasticsearch/common/lucene/search/function/MinScoreScorerTests.java b/server/src/test/java/org/elasticsearch/common/lucene/search/function/MinScoreScorerTests.java index 6ebb604725d..e9685c03bc4 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/search/function/MinScoreScorerTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/search/function/MinScoreScorerTests.java @@ -19,9 +19,14 @@ package org.elasticsearch.common.lucene.search.function; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.Term; import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TwoPhaseIterator; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; @@ -36,7 +41,7 @@ public class MinScoreScorerTests extends LuceneTestCase { return new DocIdSetIterator() { int i = -1; - + @Override public int nextDoc() throws IOException { if (i + 1 == docs.length) { @@ -45,17 +50,17 @@ public class MinScoreScorerTests extends LuceneTestCase { return docs[++i]; } } - + @Override public int docID() { return i < 0 ? -1 : i == docs.length ? NO_MORE_DOCS : docs[i]; } - + @Override public long cost() { return docs.length; } - + @Override public int advance(int target) throws IOException { return slowAdvance(target); @@ -63,9 +68,36 @@ public class MinScoreScorerTests extends LuceneTestCase { }; } + private static Weight fakeWeight() { + return new Weight(new MatchAllDocsQuery()) { + @Override + public void extractTerms(Set terms) { + + } + + @Override + public Explanation explain(LeafReaderContext context, int doc) throws IOException { + return null; + } + + @Override + public Scorer scorer(LeafReaderContext context) throws IOException { + return null; + } + + @Override + public boolean isCacheable(LeafReaderContext ctx) { + return false; + } + }; + } + private static Scorer scorer(int maxDoc, final int[] docs, final float[] scores, final boolean twoPhase) { final DocIdSetIterator iterator = twoPhase ? DocIdSetIterator.all(maxDoc) : iterator(docs); - return new Scorer(null) { + return new Scorer(fakeWeight()) { + + int lastScoredDoc = -1; + public DocIdSetIterator iterator() { if (twoPhase) { return TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator()); @@ -77,12 +109,12 @@ public class MinScoreScorerTests extends LuceneTestCase { public TwoPhaseIterator twoPhaseIterator() { if (twoPhase) { return new TwoPhaseIterator(iterator) { - + @Override public boolean matches() throws IOException { return Arrays.binarySearch(docs, iterator.docID()) >= 0; } - + @Override public float matchCost() { return 10; @@ -100,9 +132,16 @@ public class MinScoreScorerTests extends LuceneTestCase { @Override public float score() throws IOException { + assertNotEquals("score() called twice on doc " + docID(), lastScoredDoc, docID()); + lastScoredDoc = docID(); final int idx = Arrays.binarySearch(docs, docID()); return scores[idx]; } + + @Override + public float getMaxScore(int upTo) throws IOException { + return Float.MAX_VALUE; + } }; } @@ -125,7 +164,7 @@ public class MinScoreScorerTests extends LuceneTestCase { } Scorer scorer = scorer(maxDoc, docs, scores, twoPhase); final float minScore = random().nextFloat(); - Scorer minScoreScorer = new MinScoreScorer(null, scorer, minScore); + Scorer minScoreScorer = new MinScoreScorer(fakeWeight(), scorer, minScore); int doc = -1; while (doc != DocIdSetIterator.NO_MORE_DOCS) { final int target; @@ -147,7 +186,7 @@ public class MinScoreScorerTests extends LuceneTestCase { assertEquals(DocIdSetIterator.NO_MORE_DOCS, doc); } else { assertEquals(docs[idx], doc); - assertEquals(scores[idx], scorer.score(), 0f); + assertEquals(scores[idx], minScoreScorer.score(), 0f); } } } diff --git a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index 2376d566340..0ee1d2e9c4a 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting.Property; @@ -31,6 +32,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -52,6 +54,7 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.startsWith; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.sameInstance; public class ScopedSettingsTests extends ESTestCase { @@ -170,7 +173,7 @@ public class ScopedSettingsTests extends ESTestCase { IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> service.validate(Settings.builder().put("foo.test.bar", 7).build(), true)); - assertEquals("Missing required setting [foo.test.name] for setting [foo.test.bar]", iae.getMessage()); + assertEquals("missing required setting [foo.test.name] for setting [foo.test.bar]", iae.getMessage()); service.validate(Settings.builder() .put("foo.test.name", "test") @@ -180,6 +183,127 @@ public class ScopedSettingsTests extends ESTestCase { service.validate(Settings.builder().put("foo.test.bar", 7).build(), false); } + public void testDependentSettingsWithFallback() { + Setting.AffixSetting nameFallbackSetting = + Setting.affixKeySetting("fallback.", "name", k -> Setting.simpleString(k, Property.Dynamic, Property.NodeScope)); + Setting.AffixSetting nameSetting = Setting.affixKeySetting( + "foo.", + "name", + k -> Setting.simpleString( + k, + "_na_".equals(k) + ? nameFallbackSetting.getConcreteSettingForNamespace(k) + : nameFallbackSetting.getConcreteSetting(k.replaceAll("^foo", "fallback")), + Property.Dynamic, + Property.NodeScope)); + Setting.AffixSetting barSetting = + Setting.affixKeySetting("foo.", "bar", k -> Setting.intSetting(k, 1, Property.Dynamic, Property.NodeScope), nameSetting); + + final AbstractScopedSettings service = + new ClusterSettings(Settings.EMPTY,new HashSet<>(Arrays.asList(nameFallbackSetting, nameSetting, barSetting))); + + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> service.validate(Settings.builder().put("foo.test.bar", 7).build(), true)); + assertThat(e, hasToString(containsString("missing required setting [foo.test.name] for setting [foo.test.bar]"))); + + service.validate(Settings.builder().put("foo.test.name", "test").put("foo.test.bar", 7).build(), true); + service.validate(Settings.builder().put("fallback.test.name", "test").put("foo.test.bar", 7).build(), true); + } + + public void testTupleAffixUpdateConsumer() { + String prefix = randomAlphaOfLength(3) + "foo."; + String intSuffix = randomAlphaOfLength(3); + String listSuffix = randomAlphaOfLength(4); + Setting.AffixSetting intSetting = Setting.affixKeySetting(prefix, intSuffix, + (k) -> Setting.intSetting(k, 1, Property.Dynamic, Property.NodeScope)); + Setting.AffixSetting> listSetting = Setting.affixKeySetting(prefix, listSuffix, + (k) -> Setting.listSetting(k, Arrays.asList("1"), Integer::parseInt, Property.Dynamic, Property.NodeScope)); + AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY,new HashSet<>(Arrays.asList(intSetting, listSetting))); + Map, Integer>> results = new HashMap<>(); + Function listBuilder = g -> (prefix + g + "." + listSuffix); + Function intBuilder = g -> (prefix + g + "." + intSuffix); + String group1 = randomAlphaOfLength(3); + String group2 = randomAlphaOfLength(4); + String group3 = randomAlphaOfLength(5); + BiConsumer, Integer>> listConsumer = results::put; + + service.addAffixUpdateConsumer(listSetting, intSetting, listConsumer, (s, k) -> { + if (k.v1().isEmpty() && k.v2() == 2) { + throw new IllegalArgumentException("boom"); + } + }); + assertEquals(0, results.size()); + service.applySettings(Settings.builder() + .put(intBuilder.apply(group1), 2) + .put(intBuilder.apply(group2), 7) + .putList(listBuilder.apply(group1), "16", "17") + .putList(listBuilder.apply(group2), "18", "19", "20") + .build()); + assertEquals(2, results.get(group1).v2().intValue()); + assertEquals(7, results.get(group2).v2().intValue()); + assertEquals(Arrays.asList(16, 17), results.get(group1).v1()); + assertEquals(Arrays.asList(18, 19, 20), results.get(group2).v1()); + assertEquals(2, results.size()); + + results.clear(); + + service.applySettings(Settings.builder() + .put(intBuilder.apply(group1), 2) + .put(intBuilder.apply(group2), 7) + .putList(listBuilder.apply(group1), "16", "17") + .putNull(listBuilder.apply(group2)) // removed + .build()); + + assertNull(group1 + " wasn't changed", results.get(group1)); + assertEquals(1, results.get(group2).v1().size()); + assertEquals(Arrays.asList(1), results.get(group2).v1()); + assertEquals(7, results.get(group2).v2().intValue()); + assertEquals(1, results.size()); + results.clear(); + + service.applySettings(Settings.builder() + .put(intBuilder.apply(group1), 2) + .put(intBuilder.apply(group2), 7) + .putList(listBuilder.apply(group1), "16", "17") + .putList(listBuilder.apply(group3), "5", "6") // added + .build()); + assertNull(group1 + " wasn't changed", results.get(group1)); + assertNull(group2 + " wasn't changed", results.get(group2)); + + assertEquals(2, results.get(group3).v1().size()); + assertEquals(Arrays.asList(5, 6), results.get(group3).v1()); + assertEquals(1, results.get(group3).v2().intValue()); + assertEquals(1, results.size()); + results.clear(); + + service.applySettings(Settings.builder() + .put(intBuilder.apply(group1), 4) // modified + .put(intBuilder.apply(group2), 7) + .putList(listBuilder.apply(group1), "16", "17") + .putList(listBuilder.apply(group3), "5", "6") + .build()); + assertNull(group2 + " wasn't changed", results.get(group2)); + assertNull(group3 + " wasn't changed", results.get(group3)); + + assertEquals(2, results.get(group1).v1().size()); + assertEquals(Arrays.asList(16, 17), results.get(group1).v1()); + assertEquals(4, results.get(group1).v2().intValue()); + assertEquals(1, results.size()); + results.clear(); + + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> + service.applySettings(Settings.builder() + .put(intBuilder.apply(group1), 2) // modified to trip validator + .put(intBuilder.apply(group2), 7) + .putList(listBuilder.apply(group1)) // modified to trip validator + .putList(listBuilder.apply(group3), "5", "6") + .build()) + ); + assertEquals("boom", iae.getMessage()); + assertEquals(0, results.size()); + } + public void testAddConsumerAffix() { Setting.AffixSetting intSetting = Setting.affixKeySetting("foo.", "bar", (k) -> Setting.intSetting(k, 1, Property.Dynamic, Property.NodeScope)); @@ -885,19 +1009,166 @@ public class ScopedSettingsTests extends ESTestCase { IllegalArgumentException.class, () -> { final Settings settings = Settings.builder().put("index.internal", "internal").build(); - indexScopedSettings.validate(settings, false, /* validateInternalIndex */ true); + indexScopedSettings.validate(settings, false, /* validateInternalOrPrivateIndex */ true); }); final String message = "can not update internal setting [index.internal]; this setting is managed via a dedicated API"; assertThat(e, hasToString(containsString(message))); } + public void testPrivateIndexSettingsFailsValidation() { + final Setting indexInternalSetting = Setting.simpleString("index.private", Property.PrivateIndex, Property.IndexScope); + final IndexScopedSettings indexScopedSettings = + new IndexScopedSettings(Settings.EMPTY, Collections.singleton(indexInternalSetting)); + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> { + final Settings settings = Settings.builder().put("index.private", "private").build(); + indexScopedSettings.validate(settings, false, /* validateInternalOrPrivateIndex */ true); + }); + final String message = "can not update private setting [index.private]; this setting is managed by Elasticsearch"; + assertThat(e, hasToString(containsString(message))); + } + public void testInternalIndexSettingsSkipValidation() { final Setting internalIndexSetting = Setting.simpleString("index.internal", Property.InternalIndex, Property.IndexScope); - final IndexScopedSettings indexScopedSettings = + final IndexScopedSettings indexScopedSettings = new IndexScopedSettings(Settings.EMPTY, Collections.singleton(internalIndexSetting)); // nothing should happen, validation should not throw an exception final Settings settings = Settings.builder().put("index.internal", "internal").build(); - indexScopedSettings.validate(settings, false, /* validateInternalIndex */ false); + indexScopedSettings.validate(settings, false, /* validateInternalOrPrivateIndex */ false); + } + + public void testPrivateIndexSettingsSkipValidation() { + final Setting internalIndexSetting = Setting.simpleString("index.private", Property.PrivateIndex, Property.IndexScope); + final IndexScopedSettings indexScopedSettings = + new IndexScopedSettings(Settings.EMPTY, Collections.singleton(internalIndexSetting)); + // nothing should happen, validation should not throw an exception + final Settings settings = Settings.builder().put("index.private", "private").build(); + indexScopedSettings.validate(settings, false, /* validateInternalOrPrivateIndex */ false); + } + + public void testUpgradeSetting() { + final Setting oldSetting = Setting.simpleString("foo.old", Property.NodeScope); + final Setting newSetting = Setting.simpleString("foo.new", Property.NodeScope); + final Setting remainingSetting = Setting.simpleString("foo.remaining", Property.NodeScope); + + final AbstractScopedSettings service = + new ClusterSettings( + Settings.EMPTY, + new HashSet<>(Arrays.asList(oldSetting, newSetting, remainingSetting)), + Collections.singleton(new SettingUpgrader() { + + @Override + public Setting getSetting() { + return oldSetting; + } + + @Override + public String getKey(final String key) { + return "foo.new"; + } + + @Override + public String getValue(final String value) { + return "new." + value; + } + + })); + + final Settings settings = + Settings.builder() + .put("foo.old", randomAlphaOfLength(8)) + .put("foo.remaining", randomAlphaOfLength(8)) + .build(); + final Settings upgradedSettings = service.upgradeSettings(settings); + assertFalse(oldSetting.exists(upgradedSettings)); + assertTrue(newSetting.exists(upgradedSettings)); + assertThat(newSetting.get(upgradedSettings), equalTo("new." + oldSetting.get(settings))); + assertTrue(remainingSetting.exists(upgradedSettings)); + assertThat(remainingSetting.get(upgradedSettings), equalTo(remainingSetting.get(settings))); + } + + public void testUpgradeSettingsNoChangesPreservesInstance() { + final Setting oldSetting = Setting.simpleString("foo.old", Property.NodeScope); + final Setting newSetting = Setting.simpleString("foo.new", Property.NodeScope); + final Setting remainingSetting = Setting.simpleString("foo.remaining", Property.NodeScope); + + final AbstractScopedSettings service = + new ClusterSettings( + Settings.EMPTY, + new HashSet<>(Arrays.asList(oldSetting, newSetting, remainingSetting)), + Collections.singleton(new SettingUpgrader() { + + @Override + public Setting getSetting() { + return oldSetting; + } + + @Override + public String getKey(final String key) { + return "foo.new"; + } + + })); + + final Settings settings = Settings.builder().put("foo.remaining", randomAlphaOfLength(8)).build(); + final Settings upgradedSettings = service.upgradeSettings(settings); + assertThat(upgradedSettings, sameInstance(settings)); + } + + public void testUpgradeComplexSetting() { + final Setting.AffixSetting oldSetting = + Setting.affixKeySetting("foo.old.", "suffix", key -> Setting.simpleString(key, Property.NodeScope)); + final Setting.AffixSetting newSetting = + Setting.affixKeySetting("foo.new.", "suffix", key -> Setting.simpleString(key, Property.NodeScope)); + final Setting.AffixSetting remainingSetting = + Setting.affixKeySetting("foo.remaining.", "suffix", key -> Setting.simpleString(key, Property.NodeScope)); + + final AbstractScopedSettings service = + new ClusterSettings( + Settings.EMPTY, + new HashSet<>(Arrays.asList(oldSetting, newSetting, remainingSetting)), + Collections.singleton(new SettingUpgrader() { + + @Override + public Setting getSetting() { + return oldSetting; + } + + @Override + public String getKey(final String key) { + return key.replaceFirst("^foo\\.old", "foo\\.new"); + } + + @Override + public String getValue(final String value) { + return "new." + value; + } + + })); + + final int count = randomIntBetween(1, 8); + final List concretes = new ArrayList<>(count); + final Settings.Builder builder = Settings.builder(); + for (int i = 0; i < count; i++) { + final String concrete = randomAlphaOfLength(8); + concretes.add(concrete); + builder.put("foo.old." + concrete + ".suffix", randomAlphaOfLength(8)); + builder.put("foo.remaining." + concrete + ".suffix", randomAlphaOfLength(8)); + } + final Settings settings = builder.build(); + final Settings upgradedSettings = service.upgradeSettings(settings); + for (final String concrete : concretes) { + assertFalse(oldSetting.getConcreteSettingForNamespace(concrete).exists(upgradedSettings)); + assertTrue(newSetting.getConcreteSettingForNamespace(concrete).exists(upgradedSettings)); + assertThat( + newSetting.getConcreteSettingForNamespace(concrete).get(upgradedSettings), + equalTo("new." + oldSetting.getConcreteSettingForNamespace(concrete).get(settings))); + assertTrue(remainingSetting.getConcreteSettingForNamespace(concrete).exists(upgradedSettings)); + assertThat( + remainingSetting.getConcreteSettingForNamespace(concrete).get(upgradedSettings), + equalTo(remainingSetting.getConcreteSettingForNamespace(concrete).get(settings))); + } } } diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index aa2452633fb..d394d71317c 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -180,6 +180,13 @@ public class SettingTests extends ESTestCase { } } + public void testValidateStringSetting() { + Settings settings = Settings.builder().putList("foo.bar", Arrays.asList("bla-a", "bla-b")).build(); + Setting stringSetting = Setting.simpleString("foo.bar", Property.NodeScope); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> stringSetting.get(settings)); + assertEquals("Found list type value for setting [foo.bar] but but did not expect a list for it.", e.getMessage()); + } + private static final Setting FOO_BAR_SETTING = new Setting<>( "foo.bar", "foobar", @@ -462,6 +469,26 @@ public class SettingTests extends ESTestCase { } + public void testListSettingsDeprecated() { + final Setting> deprecatedListSetting = + Setting.listSetting( + "foo.deprecated", + Collections.singletonList("foo.deprecated"), + Function.identity(), + Property.Deprecated, + Property.NodeScope); + final Setting> nonDeprecatedListSetting = + Setting.listSetting( + "foo.non_deprecated", Collections.singletonList("foo.non_deprecated"), Function.identity(), Property.NodeScope); + final Settings settings = Settings.builder() + .put("foo.deprecated", "foo.deprecated1,foo.deprecated2") + .put("foo.deprecated", "foo.non_deprecated1,foo.non_deprecated2") + .build(); + deprecatedListSetting.get(settings); + nonDeprecatedListSetting.get(settings); + assertSettingDeprecationsAndWarnings(new Setting[]{deprecatedListSetting}); + } + public void testListSettings() { Setting> listSetting = Setting.listSetting("foo.bar", Arrays.asList("foo,bar"), (s) -> s.toString(), Property.Dynamic, Property.NodeScope); @@ -735,13 +762,20 @@ public class SettingTests extends ESTestCase { assertThat(e, hasToString(containsString("non-index-scoped setting [foo.bar] can not have property [NotCopyableOnResize]"))); } - public void testRejectNonIndexScopedIndexInternalSetting() { + public void testRejectNonIndexScopedInternalIndexSetting() { final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> Setting.simpleString("foo.bar", Property.InternalIndex)); assertThat(e, hasToString(containsString("non-index-scoped setting [foo.bar] can not have property [InternalIndex]"))); } + public void testRejectNonIndexScopedPrivateIndexSetting() { + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> Setting.simpleString("foo.bar", Property.PrivateIndex)); + assertThat(e, hasToString(containsString("non-index-scoped setting [foo.bar] can not have property [PrivateIndex]"))); + } + public void testTimeValue() { final TimeValue random = TimeValue.parseTimeValue(randomTimeValue(), "test"); @@ -786,7 +820,7 @@ public class SettingTests extends ESTestCase { public void testSettingsGroupUpdater() { Setting intSetting = Setting.intSetting("prefix.foo", 1, Property.NodeScope, Property.Dynamic); Setting intSetting2 = Setting.intSetting("prefix.same", 1, Property.NodeScope, Property.Dynamic); - AbstractScopedSettings.SettingUpdater updater = Setting.groupedSettingsUpdater(s -> {}, logger, + AbstractScopedSettings.SettingUpdater updater = Setting.groupedSettingsUpdater(s -> {}, Arrays.asList(intSetting, intSetting2)); Settings current = Settings.builder().put("prefix.foo", 123).put("prefix.same", 5555).build(); @@ -797,7 +831,7 @@ public class SettingTests extends ESTestCase { public void testSettingsGroupUpdaterRemoval() { Setting intSetting = Setting.intSetting("prefix.foo", 1, Property.NodeScope, Property.Dynamic); Setting intSetting2 = Setting.intSetting("prefix.same", 1, Property.NodeScope, Property.Dynamic); - AbstractScopedSettings.SettingUpdater updater = Setting.groupedSettingsUpdater(s -> {}, logger, + AbstractScopedSettings.SettingUpdater updater = Setting.groupedSettingsUpdater(s -> {}, Arrays.asList(intSetting, intSetting2)); Settings current = Settings.builder().put("prefix.same", 5555).build(); @@ -812,7 +846,7 @@ public class SettingTests extends ESTestCase { Setting.AffixSetting affixSetting = Setting.affixKeySetting("prefix.foo.", "suffix", key -> Setting.simpleString(key,Property.NodeScope, Property.Dynamic)); - AbstractScopedSettings.SettingUpdater updater = Setting.groupedSettingsUpdater(s -> {}, logger, + AbstractScopedSettings.SettingUpdater updater = Setting.groupedSettingsUpdater(s -> {}, Arrays.asList(intSetting, prefixKeySetting, affixSetting)); Settings.Builder currentSettingsBuilder = Settings.builder() @@ -858,4 +892,30 @@ public class SettingTests extends ESTestCase { assertThat(affixSetting.getNamespaces(Settings.builder().put("prefix.infix.suffix", "anything").build()), hasSize(1)); assertThat(affixSetting.getNamespaces(Settings.builder().put("prefix.infix.suffix.anything", "anything").build()), hasSize(1)); } + + public void testExists() { + final Setting fooSetting = Setting.simpleString("foo", Property.NodeScope); + assertFalse(fooSetting.exists(Settings.EMPTY)); + assertTrue(fooSetting.exists(Settings.builder().put("foo", "bar").build())); + } + + public void testExistsWithFallback() { + final int count = randomIntBetween(1, 16); + Setting current = Setting.simpleString("fallback0", Property.NodeScope); + for (int i = 1; i < count; i++) { + final Setting next = + new Setting<>(new Setting.SimpleKey("fallback" + i), current, Function.identity(), Property.NodeScope); + current = next; + } + final Setting fooSetting = new Setting<>(new Setting.SimpleKey("foo"), current, Function.identity(), Property.NodeScope); + assertFalse(fooSetting.exists(Settings.EMPTY)); + if (randomBoolean()) { + assertTrue(fooSetting.exists(Settings.builder().put("foo", "bar").build())); + } else { + final String setting = "fallback" + randomIntBetween(0, count - 1); + assertFalse(fooSetting.exists(Settings.builder().put(setting, "bar").build())); + assertTrue(fooSetting.existsOrFallbackExists(Settings.builder().put(setting, "bar").build())); + } + } + } diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java index 6a2be8217a6..c6182eac8f6 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.Setting.Property; import java.util.Arrays; +import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.containsString; public class SettingsModuleTests extends ModuleTestCase { @@ -103,14 +104,14 @@ public class SettingsModuleTests extends ModuleTestCase { try { new SettingsModule(settings, Arrays.asList(Setting.boolSetting("foo.bar", true, Property.NodeScope), Setting.boolSetting("bar.foo", true, Property.NodeScope, Property.Filtered), - Setting.boolSetting("bar.baz", true, Property.NodeScope)), Arrays.asList("foo.*", "bar.foo")); + Setting.boolSetting("bar.baz", true, Property.NodeScope)), Arrays.asList("foo.*", "bar.foo"), emptySet()); fail(); } catch (IllegalArgumentException ex) { assertEquals("filter [bar.foo] has already been registered", ex.getMessage()); } SettingsModule module = new SettingsModule(settings, Arrays.asList(Setting.boolSetting("foo.bar", true, Property.NodeScope), Setting.boolSetting("bar.foo", true, Property.NodeScope, Property.Filtered), - Setting.boolSetting("bar.baz", true, Property.NodeScope)), Arrays.asList("foo.*")); + Setting.boolSetting("bar.baz", true, Property.NodeScope)), Arrays.asList("foo.*"), emptySet()); assertInstanceBinding(module, Settings.class, (s) -> s == settings); assertInstanceBinding(module, SettingsFilter.class, (s) -> s.filter(settings).size() == 1); assertInstanceBinding(module, SettingsFilter.class, (s) -> s.filter(settings).keySet().contains("bar.baz")); diff --git a/server/src/test/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java b/server/src/test/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java new file mode 100644 index 00000000000..839b96e6418 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java @@ -0,0 +1,125 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.junit.After; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.function.BiConsumer; +import java.util.function.Function; + +import static org.hamcrest.Matchers.equalTo; + +public class UpgradeSettingsIT extends ESSingleNodeTestCase { + + @After + public void cleanup() throws Exception { + client() + .admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("*")) + .setTransientSettings(Settings.builder().putNull("*")) + .get(); + } + + @Override + protected Collection> getPlugins() { + return Collections.singletonList(UpgradeSettingsPlugin.class); + } + + public static class UpgradeSettingsPlugin extends Plugin { + + static final Setting oldSetting = Setting.simpleString("foo.old", Setting.Property.Dynamic, Setting.Property.NodeScope); + static final Setting newSetting = Setting.simpleString("foo.new", Setting.Property.Dynamic, Setting.Property.NodeScope); + + public UpgradeSettingsPlugin(){ + + } + + @Override + public List> getSettings() { + return Arrays.asList(oldSetting, newSetting); + } + + @Override + public List> getSettingUpgraders() { + return Collections.singletonList(new SettingUpgrader() { + + @Override + public Setting getSetting() { + return oldSetting; + } + + @Override + public String getKey(final String key) { + return "foo.new"; + } + + @Override + public String getValue(final String value) { + return "new." + value; + } + }); + } + } + + public void testUpgradePersistentSettingsOnUpdate() { + runUpgradeSettingsOnUpdateTest((settings, builder) -> builder.setPersistentSettings(settings), MetaData::persistentSettings); + } + + public void testUpgradeTransientSettingsOnUpdate() { + runUpgradeSettingsOnUpdateTest((settings, builder) -> builder.setTransientSettings(settings), MetaData::transientSettings); + } + + private void runUpgradeSettingsOnUpdateTest( + final BiConsumer consumer, + final Function settingsFunction) { + final String value = randomAlphaOfLength(8); + final ClusterUpdateSettingsRequestBuilder builder = + client() + .admin() + .cluster() + .prepareUpdateSettings(); + consumer.accept(Settings.builder().put("foo.old", value).build(), builder); + builder.get(); + + final ClusterStateResponse response = client() + .admin() + .cluster() + .prepareState() + .clear() + .setMetaData(true) + .get(); + + assertFalse(UpgradeSettingsPlugin.oldSetting.exists(settingsFunction.apply(response.getState().metaData()))); + assertTrue(UpgradeSettingsPlugin.newSetting.exists(settingsFunction.apply(response.getState().metaData()))); + assertThat(UpgradeSettingsPlugin.newSetting.get(settingsFunction.apply(response.getState().metaData())), equalTo("new." + value)); + } + +} diff --git a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java new file mode 100644 index 00000000000..f96674cf7a4 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.time; + +import org.elasticsearch.test.ESTestCase; + +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.format.DateTimeParseException; +import java.time.temporal.TemporalAccessor; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +public class DateFormattersTests extends ESTestCase { + + // the epoch milli parser is a bit special, as it does not use date formatter, see comments in DateFormatters + public void testEpochMilliParser() { + CompoundDateTimeFormatter formatter = DateFormatters.forPattern("epoch_millis"); + + DateTimeParseException e = expectThrows(DateTimeParseException.class, () -> formatter.parse("invalid")); + assertThat(e.getMessage(), containsString("invalid number")); + + // different zone, should still yield the same output, as epoch is time zoned independent + ZoneId zoneId = randomZone(); + CompoundDateTimeFormatter zonedFormatter = formatter.withZone(zoneId); + assertThat(zonedFormatter.printer.getZone(), is(zoneId)); + + // test with negative and non negative values + assertThatSameDateTime(formatter, zonedFormatter, randomNonNegativeLong() * -1); + assertThatSameDateTime(formatter, zonedFormatter, randomNonNegativeLong()); + assertThatSameDateTime(formatter, zonedFormatter, 0); + assertThatSameDateTime(formatter, zonedFormatter, -1); + assertThatSameDateTime(formatter, zonedFormatter, 1); + + // format() output should be equal as well + assertSameFormat(formatter, randomNonNegativeLong() * -1); + assertSameFormat(formatter, randomNonNegativeLong()); + assertSameFormat(formatter, 0); + assertSameFormat(formatter, -1); + assertSameFormat(formatter, 1); + } + + private void assertThatSameDateTime(CompoundDateTimeFormatter formatter, CompoundDateTimeFormatter zonedFormatter, long millis) { + String millisAsString = String.valueOf(millis); + ZonedDateTime formatterZonedDateTime = DateFormatters.toZonedDateTime(formatter.parse(millisAsString)); + ZonedDateTime zonedFormatterZonedDateTime = DateFormatters.toZonedDateTime(zonedFormatter.parse(millisAsString)); + assertThat(formatterZonedDateTime.toInstant().toEpochMilli(), is(zonedFormatterZonedDateTime.toInstant().toEpochMilli())); + } + + private void assertSameFormat(CompoundDateTimeFormatter formatter, long millis) { + String millisAsString = String.valueOf(millis); + TemporalAccessor accessor = formatter.parse(millisAsString); + assertThat(millisAsString, is(formatter.format(accessor))); + } +} diff --git a/server/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java b/server/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java index e193ea34498..feaa7c4a0ae 100644 --- a/server/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java +++ b/server/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.unit; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.hamcrest.MatcherAssert; @@ -319,9 +318,4 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase resize.invoke(bigArrays, array, newSize)); + assertTrue(e.getCause() instanceof CircuitBreakingException); assertEquals(array.ramBytesUsed(), hcbs.getBreaker(CircuitBreaker.REQUEST).getUsed()); array.close(); assertEquals(0, hcbs.getBreaker(CircuitBreaker.REQUEST).getUsed()); diff --git a/server/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java b/server/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java deleted file mode 100644 index 76dd8e343a2..00000000000 --- a/server/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.util; - -import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.routing.AllocationId; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.shard.ShardStateMetaData; -import org.elasticsearch.test.ESTestCase; - -import java.io.BufferedWriter; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; - -@LuceneTestCase.SuppressFileSystems("ExtrasFS") -public class IndexFolderUpgraderTests extends ESTestCase { - - /** - * tests custom data paths are upgraded - */ - public void testUpgradeCustomDataPath() throws IOException { - Path customPath = createTempDir(); - final Settings nodeSettings = Settings.builder() - .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), customPath.toAbsolutePath().toString()).build(); - try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) { - final Index index = new Index(randomAlphaOfLength(10), UUIDs.randomBase64UUID()); - Settings settings = Settings.builder() - .put(nodeSettings) - .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) - .put(IndexMetaData.SETTING_DATA_PATH, customPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .build(); - IndexMetaData indexState = IndexMetaData.builder(index.getName()).settings(settings).build(); - int numIdxFiles = randomIntBetween(1, 5); - int numTranslogFiles = randomIntBetween(1, 5); - IndexSettings indexSettings = new IndexSettings(indexState, nodeSettings); - writeIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles); - IndexFolderUpgrader helper = new IndexFolderUpgrader(settings, nodeEnv); - helper.upgrade(indexSettings.getIndex().getName()); - checkIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles); - } - } - - /** - * tests upgrade on partially upgraded index, when we crash while upgrading - */ - public void testPartialUpgradeCustomDataPath() throws IOException { - Path customPath = createTempDir(); - final Settings nodeSettings = Settings.builder() - .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), customPath.toAbsolutePath().toString()).build(); - try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) { - final Index index = new Index(randomAlphaOfLength(10), UUIDs.randomBase64UUID()); - Settings settings = Settings.builder() - .put(nodeSettings) - .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) - .put(IndexMetaData.SETTING_DATA_PATH, customPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .build(); - IndexMetaData indexState = IndexMetaData.builder(index.getName()).settings(settings).build(); - int numIdxFiles = randomIntBetween(1, 5); - int numTranslogFiles = randomIntBetween(1, 5); - IndexSettings indexSettings = new IndexSettings(indexState, nodeSettings); - writeIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles); - IndexFolderUpgrader helper = new IndexFolderUpgrader(settings, nodeEnv) { - @Override - void upgrade(Index index, Path source, Path target) throws IOException { - if(randomBoolean()) { - throw new FileNotFoundException("simulated"); - } - } - }; - // only upgrade some paths - try { - helper.upgrade(index.getName()); - } catch (IOException e) { - assertTrue(e instanceof FileNotFoundException); - } - helper = new IndexFolderUpgrader(settings, nodeEnv); - // try to upgrade again - helper.upgrade(indexSettings.getIndex().getName()); - checkIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles); - } - } - - public void testUpgrade() throws IOException { - final Settings nodeSettings = Settings.EMPTY; - try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) { - final Index index = new Index(randomAlphaOfLength(10), UUIDs.randomBase64UUID()); - Settings settings = Settings.builder() - .put(nodeSettings) - .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .build(); - IndexMetaData indexState = IndexMetaData.builder(index.getName()).settings(settings).build(); - int numIdxFiles = randomIntBetween(1, 5); - int numTranslogFiles = randomIntBetween(1, 5); - IndexSettings indexSettings = new IndexSettings(indexState, nodeSettings); - writeIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles); - IndexFolderUpgrader helper = new IndexFolderUpgrader(settings, nodeEnv); - helper.upgrade(indexSettings.getIndex().getName()); - checkIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles); - } - } - - public void testUpgradeIndices() throws IOException { - final Settings nodeSettings = Settings.EMPTY; - try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) { - Map> indexSettingsMap = new HashMap<>(); - for (int i = 0; i < randomIntBetween(2, 5); i++) { - final Index index = new Index(randomAlphaOfLength(10), UUIDs.randomBase64UUID()); - Settings settings = Settings.builder() - .put(nodeSettings) - .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .build(); - IndexMetaData indexState = IndexMetaData.builder(index.getName()).settings(settings).build(); - Tuple fileCounts = new Tuple<>(randomIntBetween(1, 5), randomIntBetween(1, 5)); - IndexSettings indexSettings = new IndexSettings(indexState, nodeSettings); - indexSettingsMap.put(indexSettings, fileCounts); - writeIndex(nodeEnv, indexSettings, fileCounts.v1(), fileCounts.v2()); - } - IndexFolderUpgrader.upgradeIndicesIfNeeded(nodeSettings, nodeEnv); - for (Map.Entry> entry : indexSettingsMap.entrySet()) { - checkIndex(nodeEnv, entry.getKey(), entry.getValue().v1(), entry.getValue().v2()); - } - } - } - - public void testNeedsUpgrade() throws IOException { - final Index index = new Index("foo", UUIDs.randomBase64UUID()); - IndexMetaData indexState = IndexMetaData.builder(index.getName()) - .settings(Settings.builder() - .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) - .numberOfShards(1) - .numberOfReplicas(0) - .build(); - try (NodeEnvironment nodeEnvironment = newNodeEnvironment()) { - IndexMetaData.FORMAT.write(indexState, nodeEnvironment.indexPaths(index)); - assertFalse(IndexFolderUpgrader.needsUpgrade(index, index.getUUID())); - } - } - - private void checkIndex(NodeEnvironment nodeEnv, IndexSettings indexSettings, - int numIdxFiles, int numTranslogFiles) throws IOException { - final Index index = indexSettings.getIndex(); - // ensure index state can be loaded - IndexMetaData loadLatestState = IndexMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, - nodeEnv.indexPaths(index)); - assertNotNull(loadLatestState); - assertEquals(loadLatestState.getIndex(), index); - for (int shardId = 0; shardId < indexSettings.getNumberOfShards(); shardId++) { - // ensure shard path can be loaded - ShardPath targetShardPath = ShardPath.loadShardPath(logger, nodeEnv, new ShardId(index, shardId), indexSettings); - assertNotNull(targetShardPath); - // ensure shard contents are copied over - final Path translog = targetShardPath.resolveTranslog(); - final Path idx = targetShardPath.resolveIndex(); - - // ensure index and translog files are copied over - assertEquals(numTranslogFiles, FileSystemUtils.files(translog).length); - assertEquals(numIdxFiles, FileSystemUtils.files(idx).length); - Path[] files = FileSystemUtils.files(translog); - final HashSet translogFiles = new HashSet<>(Arrays.asList(files)); - for (int i = 0; i < numTranslogFiles; i++) { - final String name = Integer.toString(i); - translogFiles.contains(translog.resolve(name + ".translog")); - byte[] content = Files.readAllBytes(translog.resolve(name + ".translog")); - assertEquals(name , new String(content, StandardCharsets.UTF_8)); - } - Path[] indexFileList = FileSystemUtils.files(idx); - final HashSet idxFiles = new HashSet<>(Arrays.asList(indexFileList)); - for (int i = 0; i < numIdxFiles; i++) { - final String name = Integer.toString(i); - idxFiles.contains(idx.resolve(name + ".tst")); - byte[] content = Files.readAllBytes(idx.resolve(name + ".tst")); - assertEquals(name, new String(content, StandardCharsets.UTF_8)); - } - } - } - - private void writeIndex(NodeEnvironment nodeEnv, IndexSettings indexSettings, - int numIdxFiles, int numTranslogFiles) throws IOException { - NodeEnvironment.NodePath[] nodePaths = nodeEnv.nodePaths(); - Path[] oldIndexPaths = new Path[nodePaths.length]; - for (int i = 0; i < nodePaths.length; i++) { - oldIndexPaths[i] = nodePaths[i].indicesPath.resolve(indexSettings.getIndex().getName()); - } - IndexMetaData.FORMAT.write(indexSettings.getIndexMetaData(), oldIndexPaths); - for (int id = 0; id < indexSettings.getNumberOfShards(); id++) { - Path oldIndexPath = randomFrom(oldIndexPaths); - ShardId shardId = new ShardId(indexSettings.getIndex(), id); - if (indexSettings.hasCustomDataPath()) { - Path customIndexPath = nodeEnv.resolveBaseCustomLocation(indexSettings).resolve(indexSettings.getIndex().getName()); - writeShard(shardId, customIndexPath, numIdxFiles, numTranslogFiles); - } else { - writeShard(shardId, oldIndexPath, numIdxFiles, numTranslogFiles); - } - ShardStateMetaData state = new ShardStateMetaData(true, indexSettings.getUUID(), AllocationId.newInitializing()); - ShardStateMetaData.FORMAT.write(state, oldIndexPath.resolve(String.valueOf(shardId.getId()))); - } - } - - private void writeShard(ShardId shardId, Path indexLocation, - final int numIdxFiles, final int numTranslogFiles) throws IOException { - Path oldShardDataPath = indexLocation.resolve(String.valueOf(shardId.getId())); - final Path translogPath = oldShardDataPath.resolve(ShardPath.TRANSLOG_FOLDER_NAME); - final Path idxPath = oldShardDataPath.resolve(ShardPath.INDEX_FOLDER_NAME); - Files.createDirectories(translogPath); - Files.createDirectories(idxPath); - for (int i = 0; i < numIdxFiles; i++) { - String filename = Integer.toString(i); - try (BufferedWriter w = Files.newBufferedWriter(idxPath.resolve(filename + ".tst"), - StandardCharsets.UTF_8)) { - w.write(filename); - } - } - for (int i = 0; i < numTranslogFiles; i++) { - String filename = Integer.toString(i); - try (BufferedWriter w = Files.newBufferedWriter(translogPath.resolve(filename + ".translog"), - StandardCharsets.UTF_8)) { - w.write(filename); - } - } - } -} diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java index 170ea6cf931..3fb5f5996be 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java @@ -22,7 +22,6 @@ package org.elasticsearch.common.xcontent; import com.fasterxml.jackson.core.JsonGenerationException; import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParseException; - import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Constants; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -51,6 +50,19 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.math.BigInteger; import java.nio.file.Path; +import java.time.DayOfWeek; +import java.time.Duration; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.Month; +import java.time.MonthDay; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.time.Period; +import java.time.Year; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Arrays; import java.util.Calendar; @@ -459,6 +471,116 @@ public abstract class BaseXContentTestCase extends ESTestCase { .endObject()); } + public void testJavaTime() throws Exception { + final ZonedDateTime d1 = ZonedDateTime.of(2016, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + + // ZonedDateTime + assertResult("{'date':null}", () -> builder().startObject().timeField("date", (ZonedDateTime) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date").timeValue((ZonedDateTime) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date", (ZonedDateTime) null).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().timeField("d1", d1).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().field("d1").timeValue(d1).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().field("d1", d1).endObject()); + + // Instant + assertResult("{'date':null}", () -> builder().startObject().timeField("date", (java.time.Instant) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date").timeValue((java.time.Instant) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date", (java.time.Instant) null).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().timeField("d1", d1.toInstant()).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().field("d1").timeValue(d1.toInstant()).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().field("d1", d1.toInstant()).endObject()); + + // LocalDateTime (no time zone) + assertResult("{'date':null}", () -> builder().startObject().timeField("date", (LocalDateTime) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date").timeValue((LocalDateTime) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date", (LocalDateTime) null).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000'}", + () -> builder().startObject().timeField("d1", d1.toLocalDateTime()).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000'}", + () -> builder().startObject().field("d1").timeValue(d1.toLocalDateTime()).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000'}", () -> builder().startObject().field("d1", d1.toLocalDateTime()).endObject()); + + // LocalDate (no time, no time zone) + assertResult("{'date':null}", () -> builder().startObject().timeField("date", (LocalDate) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date").timeValue((LocalDate) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date", (LocalDate) null).endObject()); + assertResult("{'d1':'2016-01-01'}", () -> builder().startObject().timeField("d1", d1.toLocalDate()).endObject()); + assertResult("{'d1':'2016-01-01'}", () -> builder().startObject().field("d1").timeValue(d1.toLocalDate()).endObject()); + assertResult("{'d1':'2016-01-01'}", () -> builder().startObject().field("d1", d1.toLocalDate()).endObject()); + + // LocalTime (no date, no time zone) + assertResult("{'date':null}", () -> builder().startObject().timeField("date", (LocalTime) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date").timeValue((LocalTime) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date", (LocalTime) null).endObject()); + assertResult("{'d1':'00:00:00.000'}", () -> builder().startObject().timeField("d1", d1.toLocalTime()).endObject()); + assertResult("{'d1':'00:00:00.000'}", () -> builder().startObject().field("d1").timeValue(d1.toLocalTime()).endObject()); + assertResult("{'d1':'00:00:00.000'}", () -> builder().startObject().field("d1", d1.toLocalTime()).endObject()); + final ZonedDateTime d2 = ZonedDateTime.of(2016, 1, 1, 7, 59, 23, 123_000_000, ZoneOffset.UTC); + assertResult("{'d1':'07:59:23.123'}", () -> builder().startObject().timeField("d1", d2.toLocalTime()).endObject()); + assertResult("{'d1':'07:59:23.123'}", () -> builder().startObject().field("d1").timeValue(d2.toLocalTime()).endObject()); + assertResult("{'d1':'07:59:23.123'}", () -> builder().startObject().field("d1", d2.toLocalTime()).endObject()); + + // OffsetDateTime + assertResult("{'date':null}", () -> builder().startObject().timeField("date", (OffsetDateTime) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date").timeValue((OffsetDateTime) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date", (OffsetDateTime) null).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().field("d1", d1.toOffsetDateTime()).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", + () -> builder().startObject().timeField("d1", d1.toOffsetDateTime()).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", + () -> builder().startObject().field("d1").timeValue(d1.toOffsetDateTime()).endObject()); + // also test with a date that has a real offset + OffsetDateTime offsetDateTime = d1.withZoneSameLocal(ZoneOffset.ofHours(5)).toOffsetDateTime(); + assertResult("{'d1':'2016-01-01T00:00:00.000+05:00'}", () -> builder().startObject().field("d1", offsetDateTime).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000+05:00'}", () -> builder().startObject().timeField("d1", offsetDateTime).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000+05:00'}", + () -> builder().startObject().field("d1").timeValue(offsetDateTime).endObject()); + + // OffsetTime + assertResult("{'date':null}", () -> builder().startObject().timeField("date", (OffsetTime) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date").timeValue((OffsetTime) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date", (OffsetTime) null).endObject()); + final OffsetTime offsetTime = d2.toOffsetDateTime().toOffsetTime(); + assertResult("{'o':'07:59:23.123Z'}", () -> builder().startObject().timeField("o", offsetTime).endObject()); + assertResult("{'o':'07:59:23.123Z'}", () -> builder().startObject().field("o").timeValue(offsetTime).endObject()); + assertResult("{'o':'07:59:23.123Z'}", () -> builder().startObject().field("o", offsetTime).endObject()); + // also test with a date that has a real offset + final OffsetTime zonedOffsetTime = offsetTime.withOffsetSameLocal(ZoneOffset.ofHours(5)); + assertResult("{'o':'07:59:23.123+05:00'}", () -> builder().startObject().timeField("o", zonedOffsetTime).endObject()); + assertResult("{'o':'07:59:23.123+05:00'}", () -> builder().startObject().field("o").timeValue(zonedOffsetTime).endObject()); + assertResult("{'o':'07:59:23.123+05:00'}", () -> builder().startObject().field("o", zonedOffsetTime).endObject()); + + // DayOfWeek enum, not a real time value, but might be used in scripts + assertResult("{'dayOfWeek':null}", () -> builder().startObject().field("dayOfWeek", (DayOfWeek) null).endObject()); + DayOfWeek dayOfWeek = randomFrom(DayOfWeek.values()); + assertResult("{'dayOfWeek':'" + dayOfWeek + "'}", () -> builder().startObject().field("dayOfWeek", dayOfWeek).endObject()); + + // Month + Month month = randomFrom(Month.values()); + assertResult("{'m':null}", () -> builder().startObject().field("m", (Month) null).endObject()); + assertResult("{'m':'" + month + "'}", () -> builder().startObject().field("m", month).endObject()); + + // MonthDay + MonthDay monthDay = MonthDay.of(month, randomIntBetween(1, 28)); + assertResult("{'m':null}", () -> builder().startObject().field("m", (MonthDay) null).endObject()); + assertResult("{'m':'" + monthDay + "'}", () -> builder().startObject().field("m", monthDay).endObject()); + + // Year + Year year = Year.of(randomIntBetween(0, 2300)); + assertResult("{'y':null}", () -> builder().startObject().field("y", (Year) null).endObject()); + assertResult("{'y':'" + year + "'}", () -> builder().startObject().field("y", year).endObject()); + + // Duration + Duration duration = Duration.ofSeconds(randomInt(100000)); + assertResult("{'d':null}", () -> builder().startObject().field("d", (Duration) null).endObject()); + assertResult("{'d':'" + duration + "'}", () -> builder().startObject().field("d", duration).endObject()); + + // Period + Period period = Period.ofDays(randomInt(1000)); + assertResult("{'p':null}", () -> builder().startObject().field("p", (Period) null).endObject()); + assertResult("{'p':'" + period + "'}", () -> builder().startObject().field("p", period).endObject()); + } + public void testGeoPoint() throws Exception { assertResult("{'geo':null}", () -> builder().startObject().field("geo", (GeoPoint) null).endObject()); assertResult("{'geo':{'lat':52.4267578125,'lon':13.271484375}}", () -> builder() diff --git a/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java b/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java index 0475c324f06..7d01b3992fc 100644 --- a/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java +++ b/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java @@ -62,7 +62,7 @@ public class VectorHighlighterTests extends ESTestCase { IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); - assertThat(topDocs.totalHits, equalTo(1L)); + assertThat(topDocs.totalHits.value, equalTo(1L)); FastVectorHighlighter highlighter = new FastVectorHighlighter(); String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))), @@ -88,7 +88,7 @@ public class VectorHighlighterTests extends ESTestCase { IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); - assertThat(topDocs.totalHits, equalTo(1L)); + assertThat(topDocs.totalHits.value, equalTo(1L)); FastVectorHighlighter highlighter = new FastVectorHighlighter(); @@ -129,7 +129,7 @@ public class VectorHighlighterTests extends ESTestCase { IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); - assertThat(topDocs.totalHits, equalTo(1L)); + assertThat(topDocs.totalHits.value, equalTo(1L)); FastVectorHighlighter highlighter = new FastVectorHighlighter(); String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))), @@ -150,7 +150,7 @@ public class VectorHighlighterTests extends ESTestCase { IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); - assertThat(topDocs.totalHits, equalTo(1L)); + assertThat(topDocs.totalHits.value, equalTo(1L)); FastVectorHighlighter highlighter = new FastVectorHighlighter(); String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))), diff --git a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java index 0f3288b1973..ac2f2b0d4f3 100644 --- a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java +++ b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java @@ -109,6 +109,8 @@ public abstract class AbstractDisruptionTestCase extends ESIntegTestCase { protected void beforeIndexDeletion() throws Exception { if (disableBeforeIndexDeletion == false) { super.beforeIndexDeletion(); + internalCluster().assertConsistentHistoryBetweenTranslogAndLuceneIndex(); + assertSeqNos(); } } diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java index f2491b2db1f..82ec987420b 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.discovery; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -29,6 +28,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.plugins.DiscoveryPlugin; @@ -99,7 +99,7 @@ public class DiscoveryModuleTests extends ESTestCase { private DiscoveryModule newModule(Settings settings, List plugins) { return new DiscoveryModule(settings, threadPool, transportService, namedWriteableRegistry, null, masterService, - clusterApplier, clusterSettings, plugins, null); + clusterApplier, clusterSettings, plugins, null, createTempDir().toAbsolutePath()); } public void testDefaults() { diff --git a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java similarity index 63% rename from plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java rename to server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java index 5837d3bcdfe..8922a38ea1e 100644 --- a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.discovery.file; +package org.elasticsearch.discovery.zen; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; @@ -26,9 +26,7 @@ import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.discovery.zen.UnicastZenPing; import org.elasticsearch.env.Environment; -import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; @@ -50,16 +48,15 @@ import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import static org.elasticsearch.discovery.file.FileBasedUnicastHostsProvider.UNICAST_HOSTS_FILE; +import static org.elasticsearch.discovery.zen.FileBasedUnicastHostsProvider.UNICAST_HOSTS_FILE; -/** - * Tests for {@link FileBasedUnicastHostsProvider}. - */ public class FileBasedUnicastHostsProviderTests extends ESTestCase { + private boolean legacyLocation; private ThreadPool threadPool; private ExecutorService executorService; private MockTransportService transportService; + private Path configPath; @Before public void setUp() throws Exception { @@ -83,23 +80,20 @@ public class FileBasedUnicastHostsProviderTests extends ESTestCase { @Before public void createTransportSvc() { - MockTcpTransport transport = - new MockTcpTransport(Settings.EMPTY, - threadPool, - BigArrays.NON_RECYCLING_INSTANCE, - new NoneCircuitBreakerService(), - new NamedWriteableRegistry(Collections.emptyList()), - new NetworkService(Collections.emptyList())) { - @Override - public BoundTransportAddress boundAddress() { - return new BoundTransportAddress( - new TransportAddress[]{new TransportAddress(InetAddress.getLoopbackAddress(), 9300)}, - new TransportAddress(InetAddress.getLoopbackAddress(), 9300) - ); - } - }; + final MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, + new NoneCircuitBreakerService(), + new NamedWriteableRegistry(Collections.emptyList()), + new NetworkService(Collections.emptyList())) { + @Override + public BoundTransportAddress boundAddress() { + return new BoundTransportAddress( + new TransportAddress[]{new TransportAddress(InetAddress.getLoopbackAddress(), 9300)}, + new TransportAddress(InetAddress.getLoopbackAddress(), 9300) + ); + } + }; transportService = new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - null); + null); } public void testBuildDynamicNodes() throws Exception { @@ -114,18 +108,27 @@ public class FileBasedUnicastHostsProviderTests extends ESTestCase { assertEquals(9300, nodes.get(2).getPort()); } + public void testBuildDynamicNodesLegacyLocation() throws Exception { + legacyLocation = true; + testBuildDynamicNodes(); + assertDeprecatedLocationWarning(); + } + public void testEmptyUnicastHostsFile() throws Exception { final List hostEntries = Collections.emptyList(); final List addresses = setupAndRunHostProvider(hostEntries); assertEquals(0, addresses.size()); } - public void testUnicastHostsDoesNotExist() throws Exception { - final Settings settings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .build(); - final Environment environment = TestEnvironment.newEnvironment(settings); - final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(environment); + public void testEmptyUnicastHostsFileLegacyLocation() throws Exception { + legacyLocation = true; + testEmptyUnicastHostsFile(); + assertDeprecatedLocationWarning(); + } + + public void testUnicastHostsDoesNotExist() { + final Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); + final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(settings, createTempDir().toAbsolutePath()); final List addresses = provider.buildDynamicHosts((hosts, limitPortCounts) -> UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, TimeValue.timeValueSeconds(10))); @@ -133,42 +136,60 @@ public class FileBasedUnicastHostsProviderTests extends ESTestCase { } public void testInvalidHostEntries() throws Exception { - List hostEntries = Arrays.asList("192.168.0.1:9300:9300"); - List addresses = setupAndRunHostProvider(hostEntries); + final List hostEntries = Arrays.asList("192.168.0.1:9300:9300"); + final List addresses = setupAndRunHostProvider(hostEntries); assertEquals(0, addresses.size()); } + public void testInvalidHostEntriesLegacyLocation() throws Exception { + legacyLocation = true; + testInvalidHostEntries(); + assertDeprecatedLocationWarning(); + } + public void testSomeInvalidHostEntries() throws Exception { - List hostEntries = Arrays.asList("192.168.0.1:9300:9300", "192.168.0.1:9301"); - List addresses = setupAndRunHostProvider(hostEntries); + final List hostEntries = Arrays.asList("192.168.0.1:9300:9300", "192.168.0.1:9301"); + final List addresses = setupAndRunHostProvider(hostEntries); assertEquals(1, addresses.size()); // only one of the two is valid and will be used assertEquals("192.168.0.1", addresses.get(0).getAddress()); assertEquals(9301, addresses.get(0).getPort()); } + public void testSomeInvalidHostEntriesLegacyLocation() throws Exception { + legacyLocation = true; + testSomeInvalidHostEntries(); + assertDeprecatedLocationWarning(); + } + // sets up the config dir, writes to the unicast hosts file in the config dir, // and then runs the file-based unicast host provider to get the list of discovery nodes private List setupAndRunHostProvider(final List hostEntries) throws IOException { final Path homeDir = createTempDir(); final Settings settings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), homeDir) - .build(); - final Path configPath; + .put(Environment.PATH_HOME_SETTING.getKey(), homeDir) + .build(); if (randomBoolean()) { configPath = homeDir.resolve("config"); } else { configPath = createTempDir(); } - final Path discoveryFilePath = configPath.resolve("discovery-file"); + final Path discoveryFilePath = legacyLocation ? configPath.resolve("discovery-file") : configPath; Files.createDirectories(discoveryFilePath); final Path unicastHostsPath = discoveryFilePath.resolve(UNICAST_HOSTS_FILE); try (BufferedWriter writer = Files.newBufferedWriter(unicastHostsPath)) { writer.write(String.join("\n", hostEntries)); } - return new FileBasedUnicastHostsProvider( - new Environment(settings, configPath)).buildDynamicHosts((hosts, limitPortCounts) -> - UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, - TimeValue.timeValueSeconds(10))); + return new FileBasedUnicastHostsProvider(settings, configPath).buildDynamicHosts((hosts, limitPortCounts) -> + UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, + TimeValue.timeValueSeconds(10))); + } + + private void assertDeprecatedLocationWarning() { + assertWarnings("Found dynamic hosts list at [" + + configPath.resolve("discovery-file").resolve(UNICAST_HOSTS_FILE) + + "] but this path is deprecated. This list should be at [" + + configPath.resolve(UNICAST_HOSTS_FILE) + + "] instead. Support for the deprecated path will be removed in future."); } } diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/MembershipActionTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/MembershipActionTests.java index 4e96add0c53..f1392e5f070 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/MembershipActionTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/MembershipActionTests.java @@ -29,9 +29,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; -import java.util.stream.Collectors; - -import static org.elasticsearch.test.VersionUtils.allVersions; import static org.elasticsearch.test.VersionUtils.getPreviousVersion; import static org.elasticsearch.test.VersionUtils.incompatibleFutureVersion; import static org.elasticsearch.test.VersionUtils.maxCompatibleVersion; @@ -81,7 +78,7 @@ public class MembershipActionTests extends ESTestCase { final Version maxNodeVersion = nodes.getMaxNodeVersion(); final Version minNodeVersion = nodes.getMinNodeVersion(); - if (maxNodeVersion.onOrAfter(Version.V_6_0_0_alpha1)) { + if (maxNodeVersion.onOrAfter(Version.V_7_0_0_alpha1)) { final Version tooLow = getPreviousVersion(maxNodeVersion.minimumCompatibilityVersion()); expectThrows(IllegalStateException.class, () -> { if (randomBoolean()) { @@ -92,7 +89,7 @@ public class MembershipActionTests extends ESTestCase { }); } - if (minNodeVersion.before(Version.V_5_5_0)) { + if (minNodeVersion.before(Version.V_6_0_0)) { Version tooHigh = incompatibleFutureVersion(minNodeVersion); expectThrows(IllegalStateException.class, () -> { if (randomBoolean()) { @@ -103,8 +100,8 @@ public class MembershipActionTests extends ESTestCase { }); } - if (minNodeVersion.onOrAfter(Version.V_6_0_0_alpha1)) { - Version oldMajor = randomFrom(allVersions().stream().filter(v -> v.major < 6).collect(Collectors.toList())); + if (minNodeVersion.onOrAfter(Version.V_7_0_0_alpha1)) { + Version oldMajor = Version.V_6_4_0.minimumCompatibilityVersion(); expectThrows(IllegalStateException.class, () -> JoinTaskExecutor.ensureMajorVersionBarrier(oldMajor, minNodeVersion)); } diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index 39f03fefe4e..0b44d9c94d5 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -79,13 +79,13 @@ public class NodeEnvironmentTests extends ESTestCase { List dataPaths = Environment.PATH_DATA_SETTING.get(settings); // Reuse the same location and attempt to lock again - IllegalStateException ex = - expectThrows(IllegalStateException.class, () -> new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings))); + IllegalStateException ex = expectThrows(IllegalStateException.class, () -> + new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings), nodeId -> {})); assertThat(ex.getMessage(), containsString("failed to obtain node lock")); // Close the environment that holds the lock and make sure we can get the lock after release env.close(); - env = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)); + env = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings), nodeId -> {}); assertThat(env.nodeDataPaths(), arrayWithSize(dataPaths.size())); for (int i = 0; i < dataPaths.size(); i++) { @@ -120,7 +120,7 @@ public class NodeEnvironmentTests extends ESTestCase { final Settings settings = buildEnvSettings(Settings.builder().put("node.max_local_storage_nodes", 2).build()); final NodeEnvironment first = newNodeEnvironment(settings); List dataPaths = Environment.PATH_DATA_SETTING.get(settings); - NodeEnvironment second = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)); + NodeEnvironment second = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings), nodeId -> {}); assertEquals(first.nodeDataPaths().length, dataPaths.size()); assertEquals(second.nodeDataPaths().length, dataPaths.size()); for (int i = 0; i < dataPaths.size(); i++) { @@ -477,7 +477,7 @@ public class NodeEnvironmentTests extends ESTestCase { @Override public NodeEnvironment newNodeEnvironment(Settings settings) throws IOException { Settings build = buildEnvSettings(settings); - return new NodeEnvironment(build, TestEnvironment.newEnvironment(build)); + return new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {}); } public Settings buildEnvSettings(Settings settings) { @@ -492,7 +492,7 @@ public class NodeEnvironmentTests extends ESTestCase { .put(settings) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) .putList(Environment.PATH_DATA_SETTING.getKey(), dataPaths).build(); - return new NodeEnvironment(build, TestEnvironment.newEnvironment(build)); + return new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {}); } public NodeEnvironment newNodeEnvironment(String[] dataPaths, String sharedDataPath, Settings settings) throws IOException { @@ -501,6 +501,6 @@ public class NodeEnvironmentTests extends ESTestCase { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), sharedDataPath) .putList(Environment.PATH_DATA_SETTING.getKey(), dataPaths).build(); - return new NodeEnvironment(build, TestEnvironment.newEnvironment(build)); + return new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {}); } } diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayTests.java new file mode 100644 index 00000000000..457b3a14ebf --- /dev/null +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayTests.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gateway; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.SettingUpgrader; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; +import java.util.Set; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; + +public class GatewayTests extends ESTestCase { + + public void testUpgradePersistentSettings() { + runUpgradeSettings(MetaData.Builder::persistentSettings, MetaData::persistentSettings); + } + + public void testUpgradeTransientSettings() { + runUpgradeSettings(MetaData.Builder::transientSettings, MetaData::transientSettings); + } + + private void runUpgradeSettings( + final BiConsumer applySettingsToBuilder, final Function metaDataSettings) { + final Setting oldSetting = Setting.simpleString("foo.old", Setting.Property.Dynamic, Setting.Property.NodeScope); + final Setting newSetting = Setting.simpleString("foo.new", Setting.Property.Dynamic, Setting.Property.NodeScope); + final Set> settingsSet = + Stream.concat( + ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.stream(), + Stream.of(oldSetting, newSetting)).collect(Collectors.toSet()); + final ClusterSettings clusterSettings = new ClusterSettings( + Settings.EMPTY, + settingsSet, + Collections.singleton(new SettingUpgrader() { + + @Override + public Setting getSetting() { + return oldSetting; + } + + @Override + public String getKey(final String key) { + return "foo.new"; + } + + @Override + public String getValue(final String value) { + return "new." + value; + } + + })); + final ClusterService clusterService = new ClusterService(Settings.EMPTY, clusterSettings, null); + final Gateway gateway = new Gateway(Settings.EMPTY, clusterService, null, null); + final MetaData.Builder builder = MetaData.builder(); + final Settings settings = Settings.builder().put("foo.old", randomAlphaOfLength(8)).build(); + applySettingsToBuilder.accept(builder, settings); + final ClusterState state = gateway.upgradeAndArchiveUnknownOrInvalidSettings(builder).build(); + assertFalse(oldSetting.exists(metaDataSettings.apply(state.metaData()))); + assertTrue(newSetting.exists(metaDataSettings.apply(state.metaData()))); + assertThat(newSetting.get(metaDataSettings.apply(state.metaData())), equalTo("new." + oldSetting.get(settings))); + } + +} diff --git a/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java b/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java index d236d01f049..0bf80e52398 100644 --- a/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java @@ -267,6 +267,7 @@ public class MetaDataStateFormatTests extends ESTestCase { IndexMetaData deserialized = indices.get(original.getIndex().getName()); assertThat(deserialized, notNullValue()); assertThat(deserialized.getVersion(), equalTo(original.getVersion())); + assertThat(deserialized.getMappingVersion(), equalTo(original.getMappingVersion())); assertThat(deserialized.getNumberOfReplicas(), equalTo(original.getNumberOfReplicas())); assertThat(deserialized.getNumberOfShards(), equalTo(original.getNumberOfShards())); } diff --git a/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index d098c4918a7..b0b6c35f92a 100644 --- a/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -40,6 +40,7 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -397,7 +398,8 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { .get(); logger.info("--> indexing docs"); - for (int i = 0; i < randomIntBetween(1, 1024); i++) { + int numDocs = randomIntBetween(1, 1024); + for (int i = 0; i < numDocs; i++) { client(primaryNode).prepareIndex("test", "type").setSource("field", "value").execute().actionGet(); } @@ -419,12 +421,15 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { } logger.info("--> restart replica node"); + boolean softDeleteEnabled = internalCluster().getInstance(IndicesService.class, primaryNode) + .indexServiceSafe(resolveIndex("test")).getShard(0).indexSettings().isSoftDeleteEnabled(); + int moreDocs = randomIntBetween(1, 1024); internalCluster().restartNode(replicaNode, new RestartCallback() { @Override public Settings onNodeStopped(String nodeName) throws Exception { // index some more documents; we expect to reuse the files that already exist on the replica - for (int i = 0; i < randomIntBetween(1, 1024); i++) { + for (int i = 0; i < moreDocs; i++) { client(primaryNode).prepareIndex("test", "type").setSource("field", "value").execute().actionGet(); } @@ -432,8 +437,12 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { client(primaryNode).admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), "-1") .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), "-1") + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0) ).get(); client(primaryNode).admin().indices().prepareFlush("test").setForce(true).get(); + if (softDeleteEnabled) { // We need an extra flush to advance the min_retained_seqno of the SoftDeletesPolicy + client(primaryNode).admin().indices().prepareFlush("test").setForce(true).get(); + } return super.onNodeStopped(nodeName); } }); diff --git a/server/src/test/java/org/elasticsearch/get/GetActionIT.java b/server/src/test/java/org/elasticsearch/get/GetActionIT.java index 5ed6b957c78..edd8660586a 100644 --- a/server/src/test/java/org/elasticsearch/get/GetActionIT.java +++ b/server/src/test/java/org/elasticsearch/get/GetActionIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.get; -import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.flush.FlushResponse; @@ -214,7 +213,7 @@ public class GetActionIT extends ESIntegTestCase { assertThat(exception.getMessage(), endsWith("can't execute a single index op")); } - private static String indexOrAlias() { + static String indexOrAlias() { return randomBoolean() ? "test" : "alias"; } @@ -524,41 +523,6 @@ public class GetActionIT extends ESIntegTestCase { assertThat(response.getResponses()[2].getResponse().getSourceAsMap().get("field").toString(), equalTo("value2")); } - public void testGetFieldsMetaDataWithRouting() throws Exception { - assertAcked(prepareCreate("test") - .addMapping("_doc", "field1", "type=keyword,store=true") - .addAlias(new Alias("alias")) - .setSettings(Settings.builder().put("index.refresh_interval", -1).put("index.version.created", Version.V_5_6_0.id))); - // multi types in 5.6 - - client().prepareIndex("test", "_doc", "1") - .setRouting("1") - .setSource(jsonBuilder().startObject().field("field1", "value").endObject()) - .get(); - - GetResponse getResponse = client().prepareGet(indexOrAlias(), "_doc", "1") - .setRouting("1") - .setStoredFields("field1") - .get(); - assertThat(getResponse.isExists(), equalTo(true)); - assertThat(getResponse.getField("field1").isMetadataField(), equalTo(false)); - assertThat(getResponse.getField("field1").getValue().toString(), equalTo("value")); - assertThat(getResponse.getField("_routing").isMetadataField(), equalTo(true)); - assertThat(getResponse.getField("_routing").getValue().toString(), equalTo("1")); - - flush(); - - getResponse = client().prepareGet(indexOrAlias(), "_doc", "1") - .setStoredFields("field1") - .setRouting("1") - .get(); - assertThat(getResponse.isExists(), equalTo(true)); - assertThat(getResponse.getField("field1").isMetadataField(), equalTo(false)); - assertThat(getResponse.getField("field1").getValue().toString(), equalTo("value")); - assertThat(getResponse.getField("_routing").isMetadataField(), equalTo(true)); - assertThat(getResponse.getField("_routing").getValue().toString(), equalTo("1")); - } - public void testGetFieldsNonLeafField() throws Exception { assertAcked(prepareCreate("test").addAlias(new Alias("alias")) .addMapping("my-type1", jsonBuilder().startObject().startObject("my-type1").startObject("properties") diff --git a/server/src/test/java/org/elasticsearch/get/LegacyGetActionIT.java b/server/src/test/java/org/elasticsearch/get/LegacyGetActionIT.java new file mode 100644 index 00000000000..4382f677ad6 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/get/LegacyGetActionIT.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.get; + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESIntegTestCase; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.get.GetActionIT.indexOrAlias; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; + +public class LegacyGetActionIT extends ESIntegTestCase { + + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + + public void testGetFieldsMetaDataWithRouting() throws Exception { + assertAcked(prepareCreate("test") + .addMapping("_doc", "field1", "type=keyword,store=true") + .addAlias(new Alias("alias")) + .setSettings( + Settings.builder() + .put("index.refresh_interval", -1) + .put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.V_6_0_0))); // multi-types in 6.0.0 + + try (XContentBuilder source = jsonBuilder().startObject().field("field1", "value").endObject()) { + client() + .prepareIndex("test", "_doc", "1") + .setRouting("1") + .setSource(source) + .get(); + } + + { + final GetResponse getResponse = client() + .prepareGet(indexOrAlias(), "_doc", "1") + .setRouting("1") + .setStoredFields("field1") + .get(); + assertThat(getResponse.isExists(), equalTo(true)); + assertThat(getResponse.getField("field1").isMetadataField(), equalTo(false)); + assertThat(getResponse.getField("field1").getValue().toString(), equalTo("value")); + assertThat(getResponse.getField("_routing").isMetadataField(), equalTo(true)); + assertThat(getResponse.getField("_routing").getValue().toString(), equalTo("1")); + } + + flush(); + + { + final GetResponse getResponse = client() + .prepareGet(indexOrAlias(), "_doc", "1") + .setStoredFields("field1") + .setRouting("1") + .get(); + assertThat(getResponse.isExists(), equalTo(true)); + assertThat(getResponse.getField("field1").isMetadataField(), equalTo(false)); + assertThat(getResponse.getField("field1").getValue().toString(), equalTo("value")); + assertThat(getResponse.getField("_routing").isMetadataField(), equalTo(true)); + assertThat(getResponse.getField("_routing").getValue().toString(), equalTo("1")); + } + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/EsTieredMergePolicyTests.java b/server/src/test/java/org/elasticsearch/index/EsTieredMergePolicyTests.java index 30f73b887f7..ab9d24a7bb4 100644 --- a/server/src/test/java/org/elasticsearch/index/EsTieredMergePolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/EsTieredMergePolicyTests.java @@ -69,4 +69,10 @@ public class EsTieredMergePolicyTests extends ESTestCase { policy.setSegmentsPerTier(42); assertEquals(42, policy.regularMergePolicy.getSegmentsPerTier(), 0); } + + public void testSetDeletesPctAllowed() { + EsTieredMergePolicy policy = new EsTieredMergePolicy(); + policy.setDeletesPctAllowed(42); + assertEquals(42, policy.regularMergePolicy.getDeletesPctAllowed(), 0); + } } diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index a82b932e2b5..078ec5ec20a 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.index; import org.apache.lucene.index.AssertingDirectoryReader; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.FieldInvertState; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.IndexSearcher; @@ -87,6 +86,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; public class IndexModuleTests extends ESTestCase { @@ -132,7 +133,7 @@ public class IndexModuleTests extends ESTestCase { bigArrays = new BigArrays(pageCacheRecycler, circuitBreakerService); scriptService = new ScriptService(settings, Collections.emptyMap(), Collections.emptyMap()); clusterService = ClusterServiceUtils.createClusterService(threadPool); - nodeEnvironment = new NodeEnvironment(settings, environment); + nodeEnvironment = new NodeEnvironment(settings, environment, nodeId -> {}); mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); } @@ -376,6 +377,21 @@ public class IndexModuleTests extends ESTestCase { indexService.close("simon says", false); } + public void testMmapfsStoreTypeNotAllowed() { + final Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put("index.store.type", "mmapfs") + .build(); + final Settings nodeSettings = Settings.builder() + .put(IndexModule.NODE_STORE_ALLOW_MMAPFS.getKey(), false) + .build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(new Index("foo", "_na_"), settings, nodeSettings); + final IndexModule module = + new IndexModule(indexSettings, emptyAnalysisRegistry, new InternalEngineFactory(), Collections.emptyMap()); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> newIndexService(module)); + assertThat(e, hasToString(containsString("store type [mmapfs] is not allowed"))); + } + class CustomQueryCache implements QueryCache { @Override @@ -415,13 +431,8 @@ public class IndexModuleTests extends ESTestCase { } @Override - public SimWeight computeWeight(float boost, CollectionStatistics collectionStats, TermStatistics... termStats) { - return delegate.computeWeight(boost, collectionStats, termStats); - } - - @Override - public SimScorer simScorer(SimWeight weight, LeafReaderContext context) throws IOException { - return delegate.simScorer(weight, context); + public SimScorer scorer(float boost, CollectionStatistics collectionStats, TermStatistics... termStats) { + return delegate.scorer(boost, collectionStats, termStats); } } diff --git a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java index 28fa440d96a..52513ce7a8b 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -210,7 +211,7 @@ public class IndexServiceTests extends ESSingleNodeTestCase { // we are running on updateMetaData if the interval changes try (Engine.Searcher searcher = shard.acquireSearcher("test")) { TopDocs search = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, search.totalHits); + assertEquals(1, search.totalHits.value); } }); assertFalse(refreshTask.isClosed()); @@ -223,7 +224,7 @@ public class IndexServiceTests extends ESSingleNodeTestCase { // this one becomes visible due to the force refresh we are running on updateMetaData if the interval changes try (Engine.Searcher searcher = shard.acquireSearcher("test")) { TopDocs search = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(2, search.totalHits); + assertEquals(2, search.totalHits.value); } }); client().prepareIndex("test", "test", "2").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); @@ -231,7 +232,7 @@ public class IndexServiceTests extends ESSingleNodeTestCase { // this one becomes visible due to the scheduled refresh try (Engine.Searcher searcher = shard.acquireSearcher("test")) { TopDocs search = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(3, search.totalHits); + assertEquals(3, search.totalHits.value); } }); } @@ -306,7 +307,7 @@ public class IndexServiceTests extends ESSingleNodeTestCase { .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), -1)) .get(); IndexShard shard = indexService.getShard(0); - assertBusy(() -> assertThat(shard.estimateTranslogOperationsFromMinSeq(0L), equalTo(0))); + assertBusy(() -> assertThat(IndexShardTestCase.getTranslog(shard).totalOperations(), equalTo(0))); } public void testIllegalFsyncInterval() { @@ -317,7 +318,7 @@ public class IndexServiceTests extends ESSingleNodeTestCase { createIndex("test", settings); fail(); } catch (IllegalArgumentException ex) { - assertEquals("Failed to parse value [0ms] for setting [index.translog.sync_interval] must be >= 100ms", ex.getMessage()); + assertEquals("failed to parse value [0ms] for setting [index.translog.sync_interval], must be >= [100ms]", ex.getMessage()); } } } diff --git a/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java b/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java index b7da5add2ac..64a2fa69bcb 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java @@ -553,4 +553,12 @@ public class IndexSettingsTests extends ESTestCase { ); assertThat(index.getDefaultFields(), equalTo(Arrays.asList("body", "title"))); } + + public void testUpdateSoftDeletesFails() { + IndexScopedSettings settings = new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS); + IllegalArgumentException error = expectThrows(IllegalArgumentException.class, () -> + settings.updateSettings(Settings.builder().put("index.soft_deletes.enabled", randomBoolean()).build(), + Settings.builder(), Settings.builder(), "index")); + assertThat(error.getMessage(), equalTo("final index setting [index.soft_deletes.enabled], not updateable")); + } } diff --git a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java index 78569d927be..0dcba53df88 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java @@ -146,15 +146,4 @@ public class IndexSortSettingsTests extends ESTestCase { assertThat(exc.getMessage(), containsString("Illegal missing value:[default]," + " must be one of [_last, _first]")); } - - public void testInvalidVersion() throws IOException { - final Settings settings = Settings.builder() - .put("index.sort.field", "field1") - .build(); - IllegalArgumentException exc = - expectThrows(IllegalArgumentException.class, () -> indexSettings(settings, Version.V_5_4_0)); - assertThat(exc.getMessage(), - containsString("unsupported index.version.created:5.4.0, " + - "can't set index.sort on versions prior to 6.0.0-alpha1")); - } } diff --git a/server/src/test/java/org/elasticsearch/index/MergePolicySettingsTests.java b/server/src/test/java/org/elasticsearch/index/MergePolicySettingsTests.java index a8370095564..68869592485 100644 --- a/server/src/test/java/org/elasticsearch/index/MergePolicySettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/MergePolicySettingsTests.java @@ -29,6 +29,7 @@ import java.io.IOException; import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; import static org.elasticsearch.index.IndexSettingsTests.newIndexMeta; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class MergePolicySettingsTests extends ESTestCase { @@ -100,6 +101,14 @@ public class MergePolicySettingsTests extends ESTestCase { indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1).build())); assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1, 0); + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, 0); + indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 22).build())); + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), 22, 0); + + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> + indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 53).build()))); + final Throwable cause = exc.getCause(); + assertThat(cause.getMessage(), containsString("must be <= 50.0")); indexSettings.updateIndexMetaData(newIndexMeta("index", EMPTY_SETTINGS)); // see if defaults are restored assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d); assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), 0.00); @@ -107,6 +116,7 @@ public class MergePolicySettingsTests extends ESTestCase { assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT); assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001); assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, 0); + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, 0); } public Settings build(String value) { diff --git a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java index 26a5b87866c..04dc98deb7b 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java @@ -103,7 +103,7 @@ public class AnalysisRegistryTests extends ESTestCase { } public void testOverrideDefaultIndexAnalyzerIsUnsupported() { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0_alpha1, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_alpha1, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); AnalyzerProvider defaultIndex = new PreBuiltAnalyzerProvider("default_index", AnalyzerScope.INDEX, new EnglishAnalyzer()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, diff --git a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java index 9aba48f7de5..a5a4dbd9db8 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java @@ -41,6 +41,11 @@ import static org.hamcrest.Matchers.is; public class PreBuiltAnalyzerTests extends ESSingleNodeTestCase { + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + @Override protected Collection> getPlugins() { return pluginList(InternalSettingsPlugin.class); @@ -56,21 +61,21 @@ public class PreBuiltAnalyzerTests extends ESSingleNodeTestCase { public void testThatInstancesAreTheSameAlwaysForKeywordAnalyzer() { assertThat(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.CURRENT), - is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.V_5_0_0))); + is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.V_6_0_0))); } public void testThatInstancesAreCachedAndReused() { assertSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT), PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT)); // same es version should be cached - assertSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_5_2_1), - PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_5_2_1)); - assertNotSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_5_0_0), - PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_5_0_1)); + assertSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_6_2_1), + PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_6_2_1)); + assertNotSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_6_0_0), + PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_6_0_1)); // Same Lucene version should be cached: - assertSame(PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_5_2_1), - PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_5_2_2)); + assertSame(PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_6_2_1), + PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_6_2_2)); } public void testThatAnalyzersAreUsedInMapping() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java b/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java index f82f2c39f44..ddb2b857486 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java @@ -22,8 +22,7 @@ package org.elasticsearch.index.codec; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode; -import org.apache.lucene.codecs.lucene62.Lucene62Codec; -import org.apache.lucene.codecs.lucene70.Lucene70Codec; +import org.apache.lucene.codecs.lucene80.Lucene80Codec; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; @@ -54,8 +53,8 @@ public class CodecTests extends ESTestCase { public void testResolveDefaultCodecs() throws Exception { CodecService codecService = createCodecService(); assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class)); - assertThat(codecService.codec("default"), instanceOf(Lucene70Codec.class)); - assertThat(codecService.codec("Lucene62"), instanceOf(Lucene62Codec.class)); + assertThat(codecService.codec("default"), instanceOf(Lucene80Codec.class)); + assertThat(codecService.codec("Lucene80"), instanceOf(Lucene80Codec.class)); } public void testDefault() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java index ea7de50b7b3..3f9fc9a0429 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java @@ -51,20 +51,24 @@ public class CombinedDeletionPolicyTests extends ESTestCase { public void testKeepCommitsAfterGlobalCheckpoint() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); + final int extraRetainedOps = between(0, 100); + final SoftDeletesPolicy softDeletesPolicy = new SoftDeletesPolicy(globalCheckpoint::get, -1, extraRetainedOps); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, softDeletesPolicy, globalCheckpoint::get); final LongArrayList maxSeqNoList = new LongArrayList(); final LongArrayList translogGenList = new LongArrayList(); final List commitList = new ArrayList<>(); int totalCommits = between(2, 20); long lastMaxSeqNo = 0; + long lastCheckpoint = lastMaxSeqNo; long lastTranslogGen = 0; final UUID translogUUID = UUID.randomUUID(); for (int i = 0; i < totalCommits; i++) { lastMaxSeqNo += between(1, 10000); + lastCheckpoint = randomLongBetween(lastCheckpoint, lastMaxSeqNo); lastTranslogGen += between(1, 100); - commitList.add(mockIndexCommit(lastMaxSeqNo, translogUUID, lastTranslogGen)); + commitList.add(mockIndexCommit(lastCheckpoint, lastMaxSeqNo, translogUUID, lastTranslogGen)); maxSeqNoList.add(lastMaxSeqNo); translogGenList.add(lastTranslogGen); } @@ -85,14 +89,19 @@ public class CombinedDeletionPolicyTests extends ESTestCase { } assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), equalTo(translogGenList.get(keptIndex))); assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(lastTranslogGen)); + assertThat(softDeletesPolicy.getMinRetainedSeqNo(), + equalTo(Math.min(getLocalCheckpoint(commitList.get(keptIndex)) + 1, globalCheckpoint.get() + 1 - extraRetainedOps))); } public void testAcquireIndexCommit() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); + final int extraRetainedOps = between(0, 100); + final SoftDeletesPolicy softDeletesPolicy = new SoftDeletesPolicy(globalCheckpoint::get, -1, extraRetainedOps); final UUID translogUUID = UUID.randomUUID(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, softDeletesPolicy, globalCheckpoint::get); long lastMaxSeqNo = between(1, 1000); + long lastCheckpoint = randomLongBetween(-1, lastMaxSeqNo); long lastTranslogGen = between(1, 20); int safeIndex = 0; List commitList = new ArrayList<>(); @@ -102,8 +111,9 @@ public class CombinedDeletionPolicyTests extends ESTestCase { int newCommits = between(1, 10); for (int n = 0; n < newCommits; n++) { lastMaxSeqNo += between(1, 1000); + lastCheckpoint = randomLongBetween(lastCheckpoint, lastMaxSeqNo); lastTranslogGen += between(1, 20); - commitList.add(mockIndexCommit(lastMaxSeqNo, translogUUID, lastTranslogGen)); + commitList.add(mockIndexCommit(lastCheckpoint, lastMaxSeqNo, translogUUID, lastTranslogGen)); } // Advance the global checkpoint to between [safeIndex, safeIndex + 1) safeIndex = randomIntBetween(safeIndex, commitList.size() - 1); @@ -114,6 +124,9 @@ public class CombinedDeletionPolicyTests extends ESTestCase { globalCheckpoint.set(randomLongBetween(lower, upper)); commitList.forEach(this::resetDeletion); indexPolicy.onCommit(commitList); + IndexCommit safeCommit = CombinedDeletionPolicy.findSafeCommitPoint(commitList, globalCheckpoint.get()); + assertThat(softDeletesPolicy.getMinRetainedSeqNo(), + equalTo(Math.min(getLocalCheckpoint(safeCommit) + 1, globalCheckpoint.get() + 1 - extraRetainedOps))); // Captures and releases some commits int captures = between(0, 5); for (int n = 0; n < captures; n++) { @@ -132,7 +145,7 @@ public class CombinedDeletionPolicyTests extends ESTestCase { snapshottingCommits.remove(snapshot); final long pendingSnapshots = snapshottingCommits.stream().filter(snapshot::equals).count(); final IndexCommit lastCommit = commitList.get(commitList.size() - 1); - final IndexCommit safeCommit = CombinedDeletionPolicy.findSafeCommitPoint(commitList, globalCheckpoint.get()); + safeCommit = CombinedDeletionPolicy.findSafeCommitPoint(commitList, globalCheckpoint.get()); assertThat(indexPolicy.releaseCommit(snapshot), equalTo(pendingSnapshots == 0 && snapshot.equals(lastCommit) == false && snapshot.equals(safeCommit) == false)); } @@ -143,6 +156,8 @@ public class CombinedDeletionPolicyTests extends ESTestCase { equalTo(Long.parseLong(commitList.get(safeIndex).getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(Long.parseLong(commitList.get(commitList.size() - 1).getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); + assertThat(softDeletesPolicy.getMinRetainedSeqNo(), + equalTo(Math.min(getLocalCheckpoint(commitList.get(safeIndex)) + 1, globalCheckpoint.get() + 1 - extraRetainedOps))); } snapshottingCommits.forEach(indexPolicy::releaseCommit); globalCheckpoint.set(randomLongBetween(lastMaxSeqNo, Long.MAX_VALUE)); @@ -154,25 +169,27 @@ public class CombinedDeletionPolicyTests extends ESTestCase { assertThat(commitList.get(commitList.size() - 1).isDeleted(), equalTo(false)); assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), equalTo(lastTranslogGen)); assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(lastTranslogGen)); + IndexCommit safeCommit = CombinedDeletionPolicy.findSafeCommitPoint(commitList, globalCheckpoint.get()); + assertThat(softDeletesPolicy.getMinRetainedSeqNo(), + equalTo(Math.min(getLocalCheckpoint(safeCommit) + 1, globalCheckpoint.get() + 1 - extraRetainedOps))); } public void testLegacyIndex() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); + final SoftDeletesPolicy softDeletesPolicy = new SoftDeletesPolicy(globalCheckpoint::get, -1, 0); final UUID translogUUID = UUID.randomUUID(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, softDeletesPolicy, globalCheckpoint::get); long legacyTranslogGen = randomNonNegativeLong(); IndexCommit legacyCommit = mockLegacyIndexCommit(translogUUID, legacyTranslogGen); - indexPolicy.onCommit(singletonList(legacyCommit)); - verify(legacyCommit, never()).delete(); - assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), equalTo(legacyTranslogGen)); - assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(legacyTranslogGen)); + assertThat(CombinedDeletionPolicy.findSafeCommitPoint(singletonList(legacyCommit), globalCheckpoint.get()), + equalTo(legacyCommit)); long safeTranslogGen = randomLongBetween(legacyTranslogGen, Long.MAX_VALUE); long maxSeqNo = randomLongBetween(1, Long.MAX_VALUE); - final IndexCommit freshCommit = mockIndexCommit(maxSeqNo, translogUUID, safeTranslogGen); + final IndexCommit freshCommit = mockIndexCommit(randomLongBetween(-1, maxSeqNo), maxSeqNo, translogUUID, safeTranslogGen); globalCheckpoint.set(randomLongBetween(0, maxSeqNo - 1)); indexPolicy.onCommit(Arrays.asList(legacyCommit, freshCommit)); @@ -189,25 +206,32 @@ public class CombinedDeletionPolicyTests extends ESTestCase { verify(freshCommit, times(0)).delete(); assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), equalTo(safeTranslogGen)); assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(safeTranslogGen)); + assertThat(softDeletesPolicy.getMinRetainedSeqNo(), equalTo(getLocalCheckpoint(freshCommit) + 1)); } public void testDeleteInvalidCommits() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(randomNonNegativeLong()); + final SoftDeletesPolicy softDeletesPolicy = new SoftDeletesPolicy(globalCheckpoint::get, -1, 0); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, softDeletesPolicy, globalCheckpoint::get); final int invalidCommits = between(1, 10); final List commitList = new ArrayList<>(); for (int i = 0; i < invalidCommits; i++) { - commitList.add(mockIndexCommit(randomNonNegativeLong(), UUID.randomUUID(), randomNonNegativeLong())); + long maxSeqNo = randomNonNegativeLong(); + commitList.add(mockIndexCommit(randomLongBetween(-1, maxSeqNo), maxSeqNo, UUID.randomUUID(), randomNonNegativeLong())); } final UUID expectedTranslogUUID = UUID.randomUUID(); long lastTranslogGen = 0; final int validCommits = between(1, 10); + long lastMaxSeqNo = between(1, 1000); + long lastCheckpoint = randomLongBetween(-1, lastMaxSeqNo); for (int i = 0; i < validCommits; i++) { lastTranslogGen += between(1, 1000); - commitList.add(mockIndexCommit(randomNonNegativeLong(), expectedTranslogUUID, lastTranslogGen)); + lastMaxSeqNo += between(1, 1000); + lastCheckpoint = randomLongBetween(lastCheckpoint, lastMaxSeqNo); + commitList.add(mockIndexCommit(lastCheckpoint, lastMaxSeqNo, expectedTranslogUUID, lastTranslogGen)); } // We should never keep invalid commits regardless of the value of the global checkpoint. @@ -215,21 +239,26 @@ public class CombinedDeletionPolicyTests extends ESTestCase { for (int i = 0; i < invalidCommits - 1; i++) { verify(commitList.get(i), times(1)).delete(); } + assertThat(softDeletesPolicy.getMinRetainedSeqNo(), + equalTo(getLocalCheckpoint(CombinedDeletionPolicy.findSafeCommitPoint(commitList, globalCheckpoint.get())) + 1)); } public void testCheckUnreferencedCommits() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + final SoftDeletesPolicy softDeletesPolicy = new SoftDeletesPolicy(globalCheckpoint::get, -1, 0); final UUID translogUUID = UUID.randomUUID(); final TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, softDeletesPolicy, globalCheckpoint::get); final List commitList = new ArrayList<>(); int totalCommits = between(2, 20); long lastMaxSeqNo = between(1, 1000); + long lastCheckpoint = randomLongBetween(-1, lastMaxSeqNo); long lastTranslogGen = between(1, 50); for (int i = 0; i < totalCommits; i++) { lastMaxSeqNo += between(1, 10000); lastTranslogGen += between(1, 100); - commitList.add(mockIndexCommit(lastMaxSeqNo, translogUUID, lastTranslogGen)); + lastCheckpoint = randomLongBetween(lastCheckpoint, lastMaxSeqNo); + commitList.add(mockIndexCommit(lastCheckpoint, lastMaxSeqNo, translogUUID, lastTranslogGen)); } IndexCommit safeCommit = randomFrom(commitList); globalCheckpoint.set(Long.parseLong(safeCommit.getUserData().get(SequenceNumbers.MAX_SEQ_NO))); @@ -256,8 +285,9 @@ public class CombinedDeletionPolicyTests extends ESTestCase { } } - IndexCommit mockIndexCommit(long maxSeqNo, UUID translogUUID, long translogGen) throws IOException { + IndexCommit mockIndexCommit(long localCheckpoint, long maxSeqNo, UUID translogUUID, long translogGen) throws IOException { final Map userData = new HashMap<>(); + userData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(localCheckpoint)); userData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo)); userData.put(Translog.TRANSLOG_UUID_KEY, translogUUID.toString()); userData.put(Translog.TRANSLOG_GENERATION_KEY, Long.toString(translogGen)); @@ -278,6 +308,10 @@ public class CombinedDeletionPolicyTests extends ESTestCase { }).when(commit).delete(); } + private long getLocalCheckpoint(IndexCommit commit) throws IOException { + return Long.parseLong(commit.getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + } + IndexCommit mockLegacyIndexCommit(UUID translogUUID, long translogGen) throws IOException { final Map userData = new HashMap<>(); userData.put(Translog.TRANSLOG_UUID_KEY, translogUUID.toString()); @@ -287,4 +321,5 @@ public class CombinedDeletionPolicyTests extends ESTestCase { resetDeletion(commit); return commit; } + } diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 9151fa24fc9..a26fd72468b 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.engine; +import java.io.Closeable; import java.io.IOException; import java.io.UncheckedIOException; import java.nio.charset.Charset; @@ -77,10 +78,12 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LiveIndexWriterConfig; import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.LogDocMergePolicy; +import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.PointValues; import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.SoftDeletesRetentionMergePolicy; import org.apache.lucene.index.Term; import org.apache.lucene.index.TieredMergePolicy; import org.apache.lucene.search.IndexSearcher; @@ -114,6 +117,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver; @@ -133,6 +137,7 @@ import org.elasticsearch.index.fieldvisitor.FieldsVisitor; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.Mapper.BuilderContext; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; @@ -152,6 +157,7 @@ import org.elasticsearch.index.translog.SnapshotMatchers; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.test.IndexSettingsModule; import org.hamcrest.MatcherAssert; import org.hamcrest.Matchers; @@ -171,8 +177,10 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.isIn; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; @@ -246,8 +254,13 @@ public class InternalEngineTests extends EngineTestCase { } public void testSegments() throws Exception { + Settings settings = Settings.builder() + .put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false).build(); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( + IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build()); try (Store store = createStore(); - InternalEngine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { + InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null))) { List segments = engine.segments(false); assertThat(segments.isEmpty(), equalTo(true)); assertThat(engine.segmentsStats(false).getCount(), equalTo(0L)); @@ -648,7 +661,7 @@ public class InternalEngineTests extends EngineTestCase { trimUnsafeCommits(engine.config()); engine = new InternalEngine(engine.config()); assertTrue(engine.isRecovering()); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); Engine.Searcher searcher = wrapper.wrap(engine.acquireSearcher("test")); assertThat(counter.get(), equalTo(2)); searcher.close(); @@ -665,7 +678,7 @@ public class InternalEngineTests extends EngineTestCase { engine = new InternalEngine(engine.config()); expectThrows(IllegalStateException.class, () -> engine.flush(true, true)); assertTrue(engine.isRecovering()); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertFalse(engine.isRecovering()); doc = testParsedDocument("2", null, testDocumentWithTextField(), SOURCE, null); engine.index(indexForDoc(doc)); @@ -695,7 +708,7 @@ public class InternalEngineTests extends EngineTestCase { } trimUnsafeCommits(engine.config()); try (Engine recoveringEngine = new InternalEngine(engine.config())){ - recoveringEngine.recoverFromTranslog(); + recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); try (Engine.Searcher searcher = recoveringEngine.acquireSearcher("test")) { final TotalHitCountCollector collector = new TotalHitCountCollector(); searcher.searcher().search(new MatchAllDocsQuery(), collector); @@ -731,7 +744,7 @@ public class InternalEngineTests extends EngineTestCase { } }; assertThat(getTranslog(recoveringEngine).stats().getUncommittedOperations(), equalTo(docs)); - recoveringEngine.recoverFromTranslog(); + recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertTrue(committed.get()); } finally { IOUtils.close(recoveringEngine); @@ -765,16 +778,53 @@ public class InternalEngineTests extends EngineTestCase { initialEngine.close(); trimUnsafeCommits(initialEngine.config()); recoveringEngine = new InternalEngine(initialEngine.config()); - recoveringEngine.recoverFromTranslog(); + recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); try (Engine.Searcher searcher = recoveringEngine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), docs); - assertEquals(docs, topDocs.totalHits); + assertEquals(docs, topDocs.totalHits.value); } } finally { IOUtils.close(initialEngine, recoveringEngine, store); } } + public void testRecoveryFromTranslogUpToSeqNo() throws IOException { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + try (Store store = createStore()) { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + final long maxSeqNo; + try (InternalEngine engine = createEngine(config)) { + final int docs = randomIntBetween(1, 100); + for (int i = 0; i < docs; i++) { + final String id = Integer.toString(i); + final ParsedDocument doc = testParsedDocument(id, null, testDocumentWithTextField(), SOURCE, null); + engine.index(indexForDoc(doc)); + if (rarely()) { + engine.rollTranslogGeneration(); + } else if (rarely()) { + engine.flush(randomBoolean(), true); + } + } + maxSeqNo = engine.getLocalCheckpointTracker().getMaxSeqNo(); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); + engine.syncTranslog(); + } + trimUnsafeCommits(config); + try (InternalEngine engine = new InternalEngine(config)) { + engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); + assertThat(engine.getLocalCheckpoint(), equalTo(maxSeqNo)); + assertThat(engine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(maxSeqNo)); + } + trimUnsafeCommits(config); + try (InternalEngine engine = new InternalEngine(config)) { + long upToSeqNo = randomLongBetween(globalCheckpoint.get(), maxSeqNo); + engine.recoverFromTranslog(translogHandler, upToSeqNo); + assertThat(engine.getLocalCheckpoint(), equalTo(upToSeqNo)); + assertThat(engine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(upToSeqNo)); + } + } + } + public void testConcurrentGetAndFlush() throws Exception { ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); engine.index(indexForDoc(doc)); @@ -1152,7 +1202,7 @@ public class InternalEngineTests extends EngineTestCase { } trimUnsafeCommits(config); engine = new InternalEngine(config); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); } @@ -1171,7 +1221,7 @@ public class InternalEngineTests extends EngineTestCase { engine.close(); trimUnsafeCommits(config); engine = new InternalEngine(config); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertNull("Sync ID must be gone since we have a document to replay", engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID)); } @@ -1273,9 +1323,13 @@ public class InternalEngineTests extends EngineTestCase { assertThat(indexResult.getVersion(), equalTo(1L)); } - public void testForceMerge() throws IOException { + public void testForceMergeWithoutSoftDeletes() throws IOException { + Settings settings = Settings.builder() + .put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false).build(); + IndexMetaData indexMetaData = IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build(); try (Store store = createStore(); - Engine engine = createEngine(config(defaultSettings, store, createTempDir(), + Engine engine = createEngine(config(IndexSettingsModule.newIndexSettings(indexMetaData), store, createTempDir(), new LogByteSizeMergePolicy(), null))) { // use log MP here we test some behavior in ESMP int numDocs = randomIntBetween(10, 100); for (int i = 0; i < numDocs; i++) { @@ -1316,6 +1370,175 @@ public class InternalEngineTests extends EngineTestCase { } } + public void testForceMergeWithSoftDeletesRetention() throws Exception { + final long retainedExtraOps = randomLongBetween(0, 10); + Settings.Builder settings = Settings.builder() + .put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), retainedExtraOps); + final IndexMetaData indexMetaData = IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetaData); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final MapperService mapperService = createMapperService("test"); + final Set liveDocs = new HashSet<>(); + try (Store store = createStore(); + InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get))) { + int numDocs = scaledRandomIntBetween(10, 100); + for (int i = 0; i < numDocs; i++) { + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null); + engine.index(indexForDoc(doc)); + liveDocs.add(doc.id()); + } + for (int i = 0; i < numDocs; i++) { + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null); + if (randomBoolean()) { + engine.delete(new Engine.Delete(doc.type(), doc.id(), newUid(doc.id()), primaryTerm.get())); + liveDocs.remove(doc.id()); + } + if (randomBoolean()) { + engine.index(indexForDoc(doc)); + liveDocs.add(doc.id()); + } + if (randomBoolean()) { + engine.flush(randomBoolean(), true); + } + } + engine.flush(); + + long localCheckpoint = engine.getLocalCheckpoint(); + globalCheckpoint.set(randomLongBetween(0, localCheckpoint)); + engine.syncTranslog(); + final long safeCommitCheckpoint; + try (Engine.IndexCommitRef safeCommit = engine.acquireSafeIndexCommit()) { + safeCommitCheckpoint = Long.parseLong(safeCommit.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + } + engine.forceMerge(true, 1, false, false, false); + assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService); + Map ops = readAllOperationsInLucene(engine, mapperService) + .stream().collect(Collectors.toMap(Translog.Operation::seqNo, Function.identity())); + for (long seqno = 0; seqno <= localCheckpoint; seqno++) { + long minSeqNoToRetain = Math.min(globalCheckpoint.get() + 1 - retainedExtraOps, safeCommitCheckpoint + 1); + String msg = "seq# [" + seqno + "], global checkpoint [" + globalCheckpoint + "], retained-ops [" + retainedExtraOps + "]"; + if (seqno < minSeqNoToRetain) { + Translog.Operation op = ops.get(seqno); + if (op != null) { + assertThat(op, instanceOf(Translog.Index.class)); + assertThat(msg, ((Translog.Index) op).id(), isIn(liveDocs)); + assertEquals(msg, ((Translog.Index) op).source(), B_1); + } + } else { + assertThat(msg, ops.get(seqno), notNullValue()); + } + } + settings.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0); + indexSettings.updateIndexMetaData(IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build()); + engine.onSettingsChanged(); + globalCheckpoint.set(localCheckpoint); + engine.syncTranslog(); + + engine.forceMerge(true, 1, false, false, false); + assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService); + assertThat(readAllOperationsInLucene(engine, mapperService), hasSize(liveDocs.size())); + } + } + + public void testForceMergeWithSoftDeletesRetentionAndRecoverySource() throws Exception { + final long retainedExtraOps = randomLongBetween(0, 10); + Settings.Builder settings = Settings.builder() + .put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), retainedExtraOps); + final IndexMetaData indexMetaData = IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetaData); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final MapperService mapperService = createMapperService("test"); + final boolean omitSourceAllTheTime = randomBoolean(); + final Set liveDocs = new HashSet<>(); + final Set liveDocsWithSource = new HashSet<>(); + try (Store store = createStore(); + InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, + globalCheckpoint::get))) { + int numDocs = scaledRandomIntBetween(10, 100); + for (int i = 0; i < numDocs; i++) { + boolean useRecoverySource = randomBoolean() || omitSourceAllTheTime; + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null, useRecoverySource); + engine.index(indexForDoc(doc)); + liveDocs.add(doc.id()); + if (useRecoverySource == false) { + liveDocsWithSource.add(Integer.toString(i)); + } + } + for (int i = 0; i < numDocs; i++) { + boolean useRecoverySource = randomBoolean() || omitSourceAllTheTime; + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null, useRecoverySource); + if (randomBoolean()) { + engine.delete(new Engine.Delete(doc.type(), doc.id(), newUid(doc.id()), primaryTerm.get())); + liveDocs.remove(doc.id()); + liveDocsWithSource.remove(doc.id()); + } + if (randomBoolean()) { + engine.index(indexForDoc(doc)); + liveDocs.add(doc.id()); + if (useRecoverySource == false) { + liveDocsWithSource.add(doc.id()); + } else { + liveDocsWithSource.remove(doc.id()); + } + } + if (randomBoolean()) { + engine.flush(randomBoolean(), true); + } + } + engine.flush(); + globalCheckpoint.set(randomLongBetween(0, engine.getLocalCheckpoint())); + engine.syncTranslog(); + final long minSeqNoToRetain; + try (Engine.IndexCommitRef safeCommit = engine.acquireSafeIndexCommit()) { + long safeCommitLocalCheckpoint = Long.parseLong( + safeCommit.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + minSeqNoToRetain = Math.min(globalCheckpoint.get() + 1 - retainedExtraOps, safeCommitLocalCheckpoint + 1); + } + engine.forceMerge(true, 1, false, false, false); + assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService); + Map ops = readAllOperationsInLucene(engine, mapperService) + .stream().collect(Collectors.toMap(Translog.Operation::seqNo, Function.identity())); + for (long seqno = 0; seqno <= engine.getLocalCheckpoint(); seqno++) { + String msg = "seq# [" + seqno + "], global checkpoint [" + globalCheckpoint + "], retained-ops [" + retainedExtraOps + "]"; + if (seqno < minSeqNoToRetain) { + Translog.Operation op = ops.get(seqno); + if (op != null) { + assertThat(op, instanceOf(Translog.Index.class)); + assertThat(msg, ((Translog.Index) op).id(), isIn(liveDocs)); + } + } else { + Translog.Operation op = ops.get(seqno); + assertThat(msg, op, notNullValue()); + if (op instanceof Translog.Index) { + assertEquals(msg, ((Translog.Index) op).source(), B_1); + } + } + } + settings.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0); + indexSettings.updateIndexMetaData(IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build()); + engine.onSettingsChanged(); + // If the global checkpoint equals to the local checkpoint, the next force-merge will be a noop + // because all deleted documents are expunged in the previous force-merge already. We need to flush + // a new segment to make merge happen so that we can verify that all _recovery_source are pruned. + if (globalCheckpoint.get() == engine.getLocalCheckpoint() && liveDocs.isEmpty() == false) { + String deleteId = randomFrom(liveDocs); + engine.delete(new Engine.Delete("test", deleteId, newUid(deleteId), primaryTerm.get())); + liveDocsWithSource.remove(deleteId); + liveDocs.remove(deleteId); + engine.flush(); + } + globalCheckpoint.set(engine.getLocalCheckpoint()); + engine.syncTranslog(); + engine.forceMerge(true, 1, false, false, false); + assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService); + assertThat(readAllOperationsInLucene(engine, mapperService), hasSize(liveDocsWithSource.size())); + } + } + public void testForceMergeAndClose() throws IOException, InterruptedException { int numIters = randomIntBetween(2, 10); for (int j = 0; j < numIters; j++) { @@ -1384,126 +1607,10 @@ public class InternalEngineTests extends EngineTestCase { assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } - protected List generateSingleDocHistory(boolean forReplica, VersionType versionType, - long primaryTerm, - int minOpCount, int maxOpCount, String docId) { - final int numOfOps = randomIntBetween(minOpCount, maxOpCount); - final List ops = new ArrayList<>(); - final Term id = newUid(docId); - final int startWithSeqNo = 0; - final String valuePrefix = (forReplica ? "r_" : "p_" ) + docId + "_"; - final boolean incrementTermWhenIntroducingSeqNo = randomBoolean(); - for (int i = 0; i < numOfOps; i++) { - final Engine.Operation op; - final long version; - switch (versionType) { - case INTERNAL: - version = forReplica ? i : Versions.MATCH_ANY; - break; - case EXTERNAL: - version = i; - break; - case EXTERNAL_GTE: - version = randomBoolean() ? Math.max(i - 1, 0) : i; - break; - case FORCE: - version = randomNonNegativeLong(); - break; - default: - throw new UnsupportedOperationException("unknown version type: " + versionType); - } - if (randomBoolean()) { - op = new Engine.Index(id, testParsedDocument(docId, null, testDocumentWithTextField(valuePrefix + i), B_1, null), - forReplica && i >= startWithSeqNo ? i * 2 : SequenceNumbers.UNASSIGNED_SEQ_NO, - forReplica && i >= startWithSeqNo && incrementTermWhenIntroducingSeqNo ? primaryTerm + 1 : primaryTerm, - version, - forReplica ? null : versionType, - forReplica ? REPLICA : PRIMARY, - System.currentTimeMillis(), -1, false - ); - } else { - op = new Engine.Delete("test", docId, id, - forReplica && i >= startWithSeqNo ? i * 2 : SequenceNumbers.UNASSIGNED_SEQ_NO, - forReplica && i >= startWithSeqNo && incrementTermWhenIntroducingSeqNo ? primaryTerm + 1 : primaryTerm, - version, - forReplica ? null : versionType, - forReplica ? REPLICA : PRIMARY, - System.currentTimeMillis()); - } - ops.add(op); - } - return ops; - } - public void testOutOfOrderDocsOnReplica() throws IOException { final List ops = generateSingleDocHistory(true, randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL, VersionType.EXTERNAL_GTE, VersionType.FORCE), 2, 2, 20, "1"); - assertOpsOnReplica(ops, replicaEngine, true); - } - - private void assertOpsOnReplica(List ops, InternalEngine replicaEngine, boolean shuffleOps) throws IOException { - final Engine.Operation lastOp = ops.get(ops.size() - 1); - final String lastFieldValue; - if (lastOp instanceof Engine.Index) { - Engine.Index index = (Engine.Index) lastOp; - lastFieldValue = index.docs().get(0).get("value"); - } else { - // delete - lastFieldValue = null; - } - if (shuffleOps) { - int firstOpWithSeqNo = 0; - while (firstOpWithSeqNo < ops.size() && ops.get(firstOpWithSeqNo).seqNo() < 0) { - firstOpWithSeqNo++; - } - // shuffle ops but make sure legacy ops are first - shuffle(ops.subList(0, firstOpWithSeqNo), random()); - shuffle(ops.subList(firstOpWithSeqNo, ops.size()), random()); - } - boolean firstOp = true; - for (Engine.Operation op : ops) { - logger.info("performing [{}], v [{}], seq# [{}], term [{}]", - op.operationType().name().charAt(0), op.version(), op.seqNo(), op.primaryTerm()); - if (op instanceof Engine.Index) { - Engine.IndexResult result = replicaEngine.index((Engine.Index) op); - // replicas don't really care to about creation status of documents - // this allows to ignore the case where a document was found in the live version maps in - // a delete state and return false for the created flag in favor of code simplicity - // as deleted or not. This check is just signal regression so a decision can be made if it's - // intentional - assertThat(result.isCreated(), equalTo(firstOp)); - assertThat(result.getVersion(), equalTo(op.version())); - assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS)); - - } else { - Engine.DeleteResult result = replicaEngine.delete((Engine.Delete) op); - // Replicas don't really care to about found status of documents - // this allows to ignore the case where a document was found in the live version maps in - // a delete state and return true for the found flag in favor of code simplicity - // his check is just signal regression so a decision can be made if it's - // intentional - assertThat(result.isFound(), equalTo(firstOp == false)); - assertThat(result.getVersion(), equalTo(op.version())); - assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS)); - } - if (randomBoolean()) { - engine.refresh("test"); - } - if (randomBoolean()) { - engine.flush(); - engine.refresh("test"); - } - firstOp = false; - } - - assertVisibleCount(replicaEngine, lastFieldValue == null ? 0 : 1); - if (lastFieldValue != null) { - try (Searcher searcher = replicaEngine.acquireSearcher("test")) { - final TotalHitCountCollector collector = new TotalHitCountCollector(); - searcher.searcher().search(new TermQuery(new Term("value", lastFieldValue)), collector); - assertThat(collector.getTotalHits(), equalTo(1)); - } - } + assertOpsOnReplica(ops, replicaEngine, true, logger); } public void testConcurrentOutOfOrderDocsOnReplica() throws IOException, InterruptedException { @@ -1531,11 +1638,12 @@ public class InternalEngineTests extends EngineTestCase { } // randomly interleave final AtomicLong seqNoGenerator = new AtomicLong(); - Function seqNoUpdater = operation -> { - final long newSeqNo = seqNoGenerator.getAndIncrement(); + BiFunction seqNoUpdater = (operation, newSeqNo) -> { if (operation instanceof Engine.Index) { Engine.Index index = (Engine.Index) operation; - return new Engine.Index(index.uid(), index.parsedDoc(), newSeqNo, index.primaryTerm(), index.version(), + Document doc = testDocumentWithTextField(index.docs().get(0).get("value")); + ParsedDocument parsedDocument = testParsedDocument(index.id(), index.routing(), doc, index.source(), null); + return new Engine.Index(index.uid(), parsedDocument, newSeqNo, index.primaryTerm(), index.version(), index.versionType(), index.origin(), index.startTime(), index.getAutoGeneratedIdTimestamp(), index.isRetry()); } else { Engine.Delete delete = (Engine.Delete) operation; @@ -1548,12 +1656,12 @@ public class InternalEngineTests extends EngineTestCase { Iterator iter2 = opsDoc2.iterator(); while (iter1.hasNext() && iter2.hasNext()) { final Engine.Operation next = randomBoolean() ? iter1.next() : iter2.next(); - allOps.add(seqNoUpdater.apply(next)); + allOps.add(seqNoUpdater.apply(next, seqNoGenerator.getAndIncrement())); } - iter1.forEachRemaining(o -> allOps.add(seqNoUpdater.apply(o))); - iter2.forEachRemaining(o -> allOps.add(seqNoUpdater.apply(o))); + iter1.forEachRemaining(o -> allOps.add(seqNoUpdater.apply(o, seqNoGenerator.getAndIncrement()))); + iter2.forEachRemaining(o -> allOps.add(seqNoUpdater.apply(o, seqNoGenerator.getAndIncrement()))); // insert some duplicates - allOps.addAll(randomSubsetOf(allOps)); + randomSubsetOf(allOps).forEach(op -> allOps.add(seqNoUpdater.apply(op, op.seqNo()))); shuffle(allOps, random()); concurrentlyApplyOps(allOps, engine); @@ -1585,42 +1693,6 @@ public class InternalEngineTests extends EngineTestCase { assertVisibleCount(engine, totalExpectedOps); } - private void concurrentlyApplyOps(List ops, InternalEngine engine) throws InterruptedException { - Thread[] thread = new Thread[randomIntBetween(3, 5)]; - CountDownLatch startGun = new CountDownLatch(thread.length); - AtomicInteger offset = new AtomicInteger(-1); - for (int i = 0; i < thread.length; i++) { - thread[i] = new Thread(() -> { - startGun.countDown(); - try { - startGun.await(); - } catch (InterruptedException e) { - throw new AssertionError(e); - } - int docOffset; - while ((docOffset = offset.incrementAndGet()) < ops.size()) { - try { - final Engine.Operation op = ops.get(docOffset); - if (op instanceof Engine.Index) { - engine.index((Engine.Index) op); - } else { - engine.delete((Engine.Delete) op); - } - if ((docOffset + 1) % 4 == 0) { - engine.refresh("test"); - } - } catch (IOException e) { - throw new AssertionError(e); - } - } - }); - thread[i].start(); - } - for (int i = 0; i < thread.length; i++) { - thread[i].join(); - } - } - public void testInternalVersioningOnPrimary() throws IOException { final List ops = generateSingleDocHistory(false, VersionType.INTERNAL, 2, 2, 20, "1"); assertOpsOnPrimary(ops, Versions.NOT_FOUND, true, engine); @@ -1831,7 +1903,7 @@ public class InternalEngineTests extends EngineTestCase { final boolean deletedOnReplica = lastReplicaOp instanceof Engine.Delete; final long finalReplicaVersion = lastReplicaOp.version(); final long finalReplicaSeqNo = lastReplicaOp.seqNo(); - assertOpsOnReplica(replicaOps, replicaEngine, true); + assertOpsOnReplica(replicaOps, replicaEngine, true, logger); final int opsOnPrimary = assertOpsOnPrimary(primaryOps, finalReplicaVersion, deletedOnReplica, replicaEngine); final long currentSeqNo = getSequenceID(replicaEngine, new Engine.Get(false, false, "type", lastReplicaOp.uid().text(), lastReplicaOp.uid())).v1(); @@ -2125,7 +2197,7 @@ public class InternalEngineTests extends EngineTestCase { trimUnsafeCommits(initialEngine.engineConfig); try (InternalEngine recoveringEngine = new InternalEngine(initialEngine.config())){ - recoveringEngine.recoverFromTranslog(); + recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertEquals(primarySeqNo, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); assertThat( @@ -2446,7 +2518,7 @@ public class InternalEngineTests extends EngineTestCase { try (InternalEngine engine = createEngine(config)) { engine.index(firstIndexRequest); globalCheckpoint.set(engine.getLocalCheckpoint()); - expectThrows(IllegalStateException.class, () -> engine.recoverFromTranslog()); + expectThrows(IllegalStateException.class, () -> engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE)); Map userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); @@ -2468,7 +2540,7 @@ public class InternalEngineTests extends EngineTestCase { assertEquals("3", userData.get(Translog.TRANSLOG_GENERATION_KEY)); } assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("3", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); @@ -2485,7 +2557,7 @@ public class InternalEngineTests extends EngineTestCase { Map userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertEquals(2, engine.getTranslog().currentFileGeneration()); assertEquals(0L, engine.getTranslog().stats().getUncommittedOperations()); } @@ -2499,7 +2571,7 @@ public class InternalEngineTests extends EngineTestCase { Map userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("no changes - nothing to commit", "1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); @@ -2605,7 +2677,7 @@ public class InternalEngineTests extends EngineTestCase { } } }) { - engine.recoverFromTranslog(); + engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); final ParsedDocument doc1 = testParsedDocument("1", null, testDocumentWithTextField(), SOURCE, null); engine.index(indexForDoc(doc1)); globalCheckpoint.set(engine.getLocalCheckpoint()); @@ -2616,7 +2688,7 @@ public class InternalEngineTests extends EngineTestCase { try (InternalEngine engine = new InternalEngine(config(indexSettings, store, translogPath, newMergePolicy(), null, null, globalCheckpointSupplier))) { - engine.recoverFromTranslog(); + engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertVisibleCount(engine, 1); final long committedGen = Long.valueOf( engine.getLastCommittedSegmentInfos().getUserData().get(Translog.TRANSLOG_GENERATION_KEY)); @@ -2636,14 +2708,16 @@ public class InternalEngineTests extends EngineTestCase { Engine.IndexResult indexResult = engine.index(firstIndexRequest); assertThat(indexResult.getVersion(), equalTo(1L)); } + EngineConfig config = engine.config(); assertVisibleCount(engine, numDocs); engine.close(); - trimUnsafeCommits(engine.config()); - engine = new InternalEngine(engine.config()); - engine.skipTranslogRecovery(); - try (Engine.Searcher searcher = engine.acquireSearcher("test")) { - TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); - assertThat(topDocs.totalHits, equalTo(0L)); + trimUnsafeCommits(config); + try (InternalEngine engine = new InternalEngine(config)) { + engine.skipTranslogRecovery(); + try (Engine.Searcher searcher = engine.acquireSearcher("test")) { + TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); + assertThat(topDocs.totalHits.value, equalTo(0L)); + } } } @@ -2675,30 +2749,28 @@ public class InternalEngineTests extends EngineTestCase { assertThat(indexResult.getVersion(), equalTo(1L)); } assertVisibleCount(engine, numDocs); - - TranslogHandler parser = (TranslogHandler) engine.config().getTranslogRecoveryRunner(); - parser.mappingUpdate = dynamicUpdate(); + translogHandler = createTranslogHandler(engine.engineConfig.getIndexSettings()); + translogHandler.mappingUpdate = dynamicUpdate(); engine.close(); trimUnsafeCommits(copy(engine.config(), inSyncGlobalCheckpointSupplier)); engine = new InternalEngine(copy(engine.config(), inSyncGlobalCheckpointSupplier)); // we need to reuse the engine config unless the parser.mappingModified won't work - engine.recoverFromTranslog(); + engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertVisibleCount(engine, numDocs, false); - parser = (TranslogHandler) engine.config().getTranslogRecoveryRunner(); - assertEquals(numDocs, parser.appliedOperations()); - if (parser.mappingUpdate != null) { - assertEquals(1, parser.getRecoveredTypes().size()); - assertTrue(parser.getRecoveredTypes().containsKey("test")); + assertEquals(numDocs, translogHandler.appliedOperations()); + if (translogHandler.mappingUpdate != null) { + assertEquals(1, translogHandler.getRecoveredTypes().size()); + assertTrue(translogHandler.getRecoveredTypes().containsKey("test")); } else { - assertEquals(0, parser.getRecoveredTypes().size()); + assertEquals(0, translogHandler.getRecoveredTypes().size()); } engine.close(); + translogHandler = createTranslogHandler(engine.engineConfig.getIndexSettings()); engine = createEngine(store, primaryTranslogDir, inSyncGlobalCheckpointSupplier); assertVisibleCount(engine, numDocs, false); - parser = (TranslogHandler) engine.config().getTranslogRecoveryRunner(); - assertEquals(0, parser.appliedOperations()); + assertEquals(0, translogHandler.appliedOperations()); final boolean flush = randomBoolean(); int randomId = randomIntBetween(numDocs + 1, numDocs + 10); @@ -2718,17 +2790,17 @@ public class InternalEngineTests extends EngineTestCase { assertThat(result.getVersion(), equalTo(2L)); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs + 1); - assertThat(topDocs.totalHits, equalTo(numDocs + 1L)); + assertThat(topDocs.totalHits.value, equalTo(numDocs + 1L)); } engine.close(); + translogHandler = createTranslogHandler(engine.engineConfig.getIndexSettings()); engine = createEngine(store, primaryTranslogDir, inSyncGlobalCheckpointSupplier); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs + 1); - assertThat(topDocs.totalHits, equalTo(numDocs + 1L)); + assertThat(topDocs.totalHits.value, equalTo(numDocs + 1L)); } - parser = (TranslogHandler) engine.config().getTranslogRecoveryRunner(); - assertEquals(flush ? 1 : 2, parser.appliedOperations()); + assertEquals(flush ? 1 : 2, translogHandler.appliedOperations()); engine.delete(new Engine.Delete("test", Integer.toString(randomId), newUid(doc), primaryTerm.get())); if (randomBoolean()) { engine.refresh("test"); @@ -2738,7 +2810,7 @@ public class InternalEngineTests extends EngineTestCase { } try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs); - assertThat(topDocs.totalHits, equalTo((long) numDocs)); + assertThat(topDocs.totalHits.value, equalTo((long) numDocs)); } } @@ -2772,8 +2844,8 @@ public class InternalEngineTests extends EngineTestCase { threadPool, config.getIndexSettings(), null, store, newMergePolicy(), config.getAnalyzer(), config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), - config.getExternalRefreshListener(), config.getInternalRefreshListener(), null, config.getTranslogRecoveryRunner(), - new NoneCircuitBreakerService(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm::get); + config.getExternalRefreshListener(), config.getInternalRefreshListener(), null, + new NoneCircuitBreakerService(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm::get, tombstoneDocSupplier()); try { InternalEngine internalEngine = new InternalEngine(brokenConfig); fail("translog belongs to a different engine"); @@ -2902,6 +2974,12 @@ public class InternalEngineTests extends EngineTestCase { } } + @Override + public long softUpdateDocument(Term term, Iterable doc, Field... softDeletes) throws IOException { + maybeThrowFailure(); + return super.softUpdateDocument(term, doc, softDeletes); + } + @Override public long deleteDocuments(Term... terms) throws IOException { maybeThrowFailure(); @@ -3032,7 +3110,7 @@ public class InternalEngineTests extends EngineTestCase { engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); } operation = appendOnlyPrimary(doc, false, 1); retry = appendOnlyPrimary(doc, true, 1); @@ -3053,7 +3131,7 @@ public class InternalEngineTests extends EngineTestCase { engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); } } @@ -3097,15 +3175,15 @@ public class InternalEngineTests extends EngineTestCase { engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(0, topDocs.totalHits); + assertEquals(0, topDocs.totalHits.value); } } public void testDoubleDeliveryReplicaAppendingOnly() throws IOException { - final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), + final Supplier doc = () -> testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); - Engine.Index operation = appendOnlyReplica(doc, false, 1, randomIntBetween(0, 5)); - Engine.Index retry = appendOnlyReplica(doc, true, 1, randomIntBetween(0, 5)); + Engine.Index operation = appendOnlyReplica(doc.get(), false, 1, randomIntBetween(0, 5)); + Engine.Index retry = appendOnlyReplica(doc.get(), true, 1, randomIntBetween(0, 5)); // operations with a seq# equal or lower to the local checkpoint are not indexed to lucene // and the version lookup is skipped final boolean belowLckp = operation.seqNo() == 0 && retry.seqNo() == 0; @@ -3142,10 +3220,10 @@ public class InternalEngineTests extends EngineTestCase { engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); } - operation = randomAppendOnly(doc, false, 1); - retry = randomAppendOnly(doc, true, 1); + operation = randomAppendOnly(doc.get(), false, 1); + retry = randomAppendOnly(doc.get(), true, 1); if (randomBoolean()) { Engine.IndexResult indexResult = engine.index(operation); assertNotNull(indexResult.getTranslogLocation()); @@ -3163,7 +3241,7 @@ public class InternalEngineTests extends EngineTestCase { engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); } } @@ -3203,13 +3281,15 @@ public class InternalEngineTests extends EngineTestCase { engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); } engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); } + List ops = readAllOperationsInLucene(engine, createMapperService("test")); + assertThat(ops.stream().map(o -> o.seqNo()).collect(Collectors.toList()), hasItem(20L)); } public void testRetryWithAutogeneratedIdWorksAndNoDuplicateDocs() throws IOException { @@ -3233,7 +3313,7 @@ public class InternalEngineTests extends EngineTestCase { engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); } index = new Engine.Index(newUid(doc), doc, indexResult.getSeqNo(), index.primaryTerm(), indexResult.getVersion(), null, REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); @@ -3242,7 +3322,7 @@ public class InternalEngineTests extends EngineTestCase { replicaEngine.refresh("test"); try (Engine.Searcher searcher = replicaEngine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); } } @@ -3267,7 +3347,7 @@ public class InternalEngineTests extends EngineTestCase { engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); } Engine.Index secondIndexRequestReplica = new Engine.Index(newUid(doc), doc, result.getSeqNo(), secondIndexRequest.primaryTerm(), result.getVersion(), null, REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); @@ -3275,7 +3355,7 @@ public class InternalEngineTests extends EngineTestCase { replicaEngine.refresh("test"); try (Engine.Searcher searcher = replicaEngine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); } } @@ -3352,7 +3432,7 @@ public class InternalEngineTests extends EngineTestCase { engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(numDocs, topDocs.totalHits); + assertEquals(numDocs, topDocs.totalHits.value); } if (primary) { // primaries rely on lucene dedup and may index the same document twice @@ -3383,7 +3463,7 @@ public class InternalEngineTests extends EngineTestCase { } try (Store store = createStore(newFSDirectory(storeDir)); Engine engine = new InternalEngine(configSupplier.apply(store))) { assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertEquals(timestamp1, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); @@ -3452,7 +3532,7 @@ public class InternalEngineTests extends EngineTestCase { engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(docs.size(), topDocs.totalHits); + assertEquals(docs.size(), topDocs.totalHits.value); } assertEquals(0, engine.getNumVersionLookups()); assertEquals(0, engine.getNumIndexVersionsLookups()); @@ -3666,7 +3746,7 @@ public class InternalEngineTests extends EngineTestCase { } trimUnsafeCommits(initialEngine.config()); try (Engine recoveringEngine = new InternalEngine(initialEngine.config())) { - recoveringEngine.recoverFromTranslog(); + recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); recoveringEngine.fillSeqNoGaps(2); assertThat(recoveringEngine.getLocalCheckpoint(), greaterThanOrEqualTo((long) (docs - 1))); } @@ -3678,20 +3758,22 @@ public class InternalEngineTests extends EngineTestCase { final List operations = new ArrayList<>(); final int numberOfOperations = randomIntBetween(16, 32); - final Document document = testDocumentWithTextField(); final AtomicLong sequenceNumber = new AtomicLong(); final Engine.Operation.Origin origin = randomFrom(LOCAL_TRANSLOG_RECOVERY, PEER_RECOVERY, PRIMARY, REPLICA); final LongSupplier sequenceNumberSupplier = origin == PRIMARY ? () -> SequenceNumbers.UNASSIGNED_SEQ_NO : sequenceNumber::getAndIncrement; - document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); - final ParsedDocument doc = testParsedDocument("1", null, document, B_1, null); - final Term uid = newUid(doc); + final Supplier doc = () -> { + final Document document = testDocumentWithTextField(); + document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); + return testParsedDocument("1", null, document, B_1, null); + }; + final Term uid = newUid("1"); final BiFunction searcherFactory = engine::acquireSearcher; for (int i = 0; i < numberOfOperations; i++) { if (randomBoolean()) { final Engine.Index index = new Engine.Index( uid, - doc, + doc.get(), sequenceNumberSupplier.getAsLong(), 1, i, @@ -3767,15 +3849,17 @@ public class InternalEngineTests extends EngineTestCase { maxSeqNo, localCheckpoint); trimUnsafeCommits(engine.config()); - noOpEngine = new InternalEngine(engine.config(), supplier) { + EngineConfig noopEngineConfig = copy(engine.config(), new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, + () -> new MatchAllDocsQuery(), engine.config().getMergePolicy())); + noOpEngine = new InternalEngine(noopEngineConfig, supplier) { @Override protected long doGenerateSeqNoForOperation(Operation operation) { throw new UnsupportedOperationException(); } }; - noOpEngine.recoverFromTranslog(); + noOpEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); final int gapsFilled = noOpEngine.fillSeqNoGaps(primaryTerm.get()); - final String reason = randomAlphaOfLength(16); + final String reason = "filling gaps"; noOpEngine.noOp(new Engine.NoOp(maxSeqNo + 1, primaryTerm.get(), LOCAL_TRANSLOG_RECOVERY, System.nanoTime(), reason)); assertThat(noOpEngine.getLocalCheckpoint(), equalTo((long) (maxSeqNo + 1))); assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(gapsFilled)); @@ -3797,11 +3881,77 @@ public class InternalEngineTests extends EngineTestCase { assertThat(noOp.seqNo(), equalTo((long) (maxSeqNo + 2))); assertThat(noOp.primaryTerm(), equalTo(primaryTerm.get())); assertThat(noOp.reason(), equalTo(reason)); + if (engine.engineConfig.getIndexSettings().isSoftDeleteEnabled()) { + MapperService mapperService = createMapperService("test"); + List operationsFromLucene = readAllOperationsInLucene(noOpEngine, mapperService); + assertThat(operationsFromLucene, hasSize(maxSeqNo + 2 - localCheckpoint)); // fills n gap and 2 manual noop. + for (int i = 0; i < operationsFromLucene.size(); i++) { + assertThat(operationsFromLucene.get(i), equalTo(new Translog.NoOp(localCheckpoint + 1 + i, primaryTerm.get(), "filling gaps"))); + } + assertConsistentHistoryBetweenTranslogAndLuceneIndex(noOpEngine, mapperService); + } } finally { IOUtils.close(noOpEngine); } } + /** + * Verifies that a segment containing only no-ops can be used to look up _version and _seqno. + */ + public void testSegmentContainsOnlyNoOps() throws Exception { + Engine.NoOpResult noOpResult = engine.noOp(new Engine.NoOp(1, primaryTerm.get(), + randomFrom(Engine.Operation.Origin.values()), randomNonNegativeLong(), "test")); + assertThat(noOpResult.getFailure(), nullValue()); + engine.refresh("test"); + Engine.DeleteResult deleteResult = engine.delete(replicaDeleteForDoc("id", 1, 2, randomNonNegativeLong())); + assertThat(deleteResult.getFailure(), nullValue()); + engine.refresh("test"); + } + + /** + * A simple test to check that random combination of operations can coexist in segments and be lookup. + * This is needed as some fields in Lucene may not exist if a segment misses operation types and this code is to check for that. + * For example, a segment containing only no-ops does not have neither _uid or _version. + */ + public void testRandomOperations() throws Exception { + int numOps = between(10, 100); + for (int i = 0; i < numOps; i++) { + String id = Integer.toString(randomIntBetween(1, 10)); + ParsedDocument doc = createParsedDoc(id, null); + Engine.Operation.TYPE type = randomFrom(Engine.Operation.TYPE.values()); + switch (type) { + case INDEX: + Engine.IndexResult index = engine.index(replicaIndexForDoc(doc, between(1, 100), i, randomBoolean())); + assertThat(index.getFailure(), nullValue()); + break; + case DELETE: + Engine.DeleteResult delete = engine.delete(replicaDeleteForDoc(doc.id(), between(1, 100), i, randomNonNegativeLong())); + assertThat(delete.getFailure(), nullValue()); + break; + case NO_OP: + Engine.NoOpResult noOp = engine.noOp(new Engine.NoOp(i, primaryTerm.get(), + randomFrom(Engine.Operation.Origin.values()), randomNonNegativeLong(), "")); + assertThat(noOp.getFailure(), nullValue()); + break; + default: + throw new IllegalStateException("Invalid op [" + type + "]"); + } + if (randomBoolean()) { + engine.refresh("test"); + } + if (randomBoolean()) { + engine.flush(); + } + if (randomBoolean()) { + engine.forceMerge(randomBoolean(), between(1, 10), randomBoolean(), false, false); + } + } + if (engine.engineConfig.getIndexSettings().isSoftDeleteEnabled()) { + List operations = readAllOperationsInLucene(engine, createMapperService("test")); + assertThat(operations, hasSize(numOps)); + } + } + public void testMinGenerationForSeqNo() throws IOException, BrokenBarrierException, InterruptedException { engine.close(); final int numberOfTriplets = randomIntBetween(1, 32); @@ -3985,7 +4135,7 @@ public class InternalEngineTests extends EngineTestCase { trimUnsafeCommits(copy(replicaEngine.config(), globalCheckpoint::get)); recoveringEngine = new InternalEngine(copy(replicaEngine.config(), globalCheckpoint::get)); assertEquals(numDocsOnReplica, getTranslog(recoveringEngine).stats().getUncommittedOperations()); - recoveringEngine.recoverFromTranslog(); + recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertEquals(maxSeqIDOnReplica, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); assertEquals(checkpointOnReplica, recoveringEngine.getLocalCheckpoint()); assertEquals((maxSeqIDOnReplica + 1) - numDocsOnReplica, recoveringEngine.fillSeqNoGaps(2)); @@ -4021,7 +4171,7 @@ public class InternalEngineTests extends EngineTestCase { if (flushed) { assertThat(recoveringEngine.getTranslogStats().getUncommittedOperations(), equalTo(0)); } - recoveringEngine.recoverFromTranslog(); + recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertEquals(maxSeqIDOnReplica, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpoint()); assertEquals(0, recoveringEngine.fillSeqNoGaps(3)); @@ -4214,7 +4364,7 @@ public class InternalEngineTests extends EngineTestCase { super.commitIndexWriter(writer, translog, syncId); } }) { - engine.recoverFromTranslog(); + engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); int numDocs = scaledRandomIntBetween(10, 100); for (int docId = 0; docId < numDocs; docId++) { ParseContext.Document document = testDocumentWithTextField(); @@ -4342,13 +4492,18 @@ public class InternalEngineTests extends EngineTestCase { public void testCleanUpCommitsWhenGlobalCheckpointAdvanced() throws Exception { IOUtils.close(engine, store); - store = createStore(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", + Settings.builder().put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), -1) + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), -1).build()); final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - try (InternalEngine engine = createEngine(store, createTempDir(), globalCheckpoint::get)) { + try (Store store = createStore(); + InternalEngine engine = + createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get))) { final int numDocs = scaledRandomIntBetween(10, 100); for (int docId = 0; docId < numDocs; docId++) { index(engine, docId); - if (frequently()) { + if (rarely()) { engine.flush(randomBoolean(), randomBoolean()); } } @@ -4362,6 +4517,7 @@ public class InternalEngineTests extends EngineTestCase { globalCheckpoint.set(randomLongBetween(engine.getLocalCheckpoint(), Long.MAX_VALUE)); engine.syncTranslog(); assertThat(DirectoryReader.listCommits(store.directory()), contains(commits.get(commits.size() - 1))); + assertThat(engine.getTranslog().totalOperations(), equalTo(0)); } } @@ -4724,6 +4880,159 @@ public class InternalEngineTests extends EngineTestCase { } } + public void testLuceneHistoryOnPrimary() throws Exception { + final List operations = generateSingleDocHistory(false, + randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), 2, 10, 300, "1"); + assertOperationHistoryInLucene(operations); + } + + public void testLuceneHistoryOnReplica() throws Exception { + final List operations = generateSingleDocHistory(true, + randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), 2, 10, 300, "2"); + Randomness.shuffle(operations); + assertOperationHistoryInLucene(operations); + } + + private void assertOperationHistoryInLucene(List operations) throws IOException { + final MergePolicy keepSoftDeleteDocsMP = new SoftDeletesRetentionMergePolicy( + Lucene.SOFT_DELETES_FIELD, () -> new MatchAllDocsQuery(), engine.config().getMergePolicy()); + Settings.Builder settings = Settings.builder() + .put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), randomLongBetween(0, 10)); + final IndexMetaData indexMetaData = IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetaData); + Set expectedSeqNos = new HashSet<>(); + try (Store store = createStore(); + Engine engine = createEngine(config(indexSettings, store, createTempDir(), keepSoftDeleteDocsMP, null))) { + for (Engine.Operation op : operations) { + if (op instanceof Engine.Index) { + Engine.IndexResult indexResult = engine.index((Engine.Index) op); + assertThat(indexResult.getFailure(), nullValue()); + expectedSeqNos.add(indexResult.getSeqNo()); + } else { + Engine.DeleteResult deleteResult = engine.delete((Engine.Delete) op); + assertThat(deleteResult.getFailure(), nullValue()); + expectedSeqNos.add(deleteResult.getSeqNo()); + } + if (rarely()) { + engine.refresh("test"); + } + if (rarely()) { + engine.flush(); + } + if (rarely()) { + engine.forceMerge(true); + } + } + MapperService mapperService = createMapperService("test"); + List actualOps = readAllOperationsInLucene(engine, mapperService); + assertThat(actualOps.stream().map(o -> o.seqNo()).collect(Collectors.toList()), containsInAnyOrder(expectedSeqNos.toArray())); + assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService); + } + } + + public void testKeepMinRetainedSeqNoByMergePolicy() throws IOException { + IOUtils.close(engine, store); + Settings.Builder settings = Settings.builder() + .put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), randomLongBetween(0, 10)); + final IndexMetaData indexMetaData = IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetaData); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final List operations = generateSingleDocHistory(true, + randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), 2, 10, 300, "2"); + Randomness.shuffle(operations); + Set existingSeqNos = new HashSet<>(); + store = createStore(); + engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get)); + assertThat(engine.getMinRetainedSeqNo(), equalTo(0L)); + long lastMinRetainedSeqNo = engine.getMinRetainedSeqNo(); + for (Engine.Operation op : operations) { + final Engine.Result result; + if (op instanceof Engine.Index) { + result = engine.index((Engine.Index) op); + } else { + result = engine.delete((Engine.Delete) op); + } + existingSeqNos.add(result.getSeqNo()); + if (randomBoolean()) { + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpointTracker().getCheckpoint())); + } + if (rarely()) { + settings.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), randomLongBetween(0, 10)); + indexSettings.updateIndexMetaData(IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build()); + engine.onSettingsChanged(); + } + if (rarely()) { + engine.refresh("test"); + } + if (rarely()) { + engine.flush(true, true); + assertThat(Long.parseLong(engine.getLastCommittedSegmentInfos().userData.get(Engine.MIN_RETAINED_SEQNO)), + equalTo(engine.getMinRetainedSeqNo())); + } + if (rarely()) { + engine.forceMerge(randomBoolean()); + } + try (Closeable ignored = engine.acquireRetentionLockForPeerRecovery()) { + long minRetainSeqNos = engine.getMinRetainedSeqNo(); + assertThat(minRetainSeqNos, lessThanOrEqualTo(globalCheckpoint.get() + 1)); + Long[] expectedOps = existingSeqNos.stream().filter(seqno -> seqno >= minRetainSeqNos).toArray(Long[]::new); + Set actualOps = readAllOperationsInLucene(engine, createMapperService("test")).stream() + .map(Translog.Operation::seqNo).collect(Collectors.toSet()); + assertThat(actualOps, containsInAnyOrder(expectedOps)); + } + try (Engine.IndexCommitRef commitRef = engine.acquireSafeIndexCommit()) { + IndexCommit safeCommit = commitRef.getIndexCommit(); + if (safeCommit.getUserData().containsKey(Engine.MIN_RETAINED_SEQNO)) { + lastMinRetainedSeqNo = Long.parseLong(safeCommit.getUserData().get(Engine.MIN_RETAINED_SEQNO)); + } + } + } + if (randomBoolean()) { + engine.close(); + } else { + engine.flushAndClose(); + } + trimUnsafeCommits(engine.config()); + try (InternalEngine recoveringEngine = new InternalEngine(engine.config())) { + assertThat(recoveringEngine.getMinRetainedSeqNo(), equalTo(lastMinRetainedSeqNo)); + } + } + + public void testLastRefreshCheckpoint() throws Exception { + AtomicBoolean done = new AtomicBoolean(); + Thread[] refreshThreads = new Thread[between(1, 8)]; + CountDownLatch latch = new CountDownLatch(refreshThreads.length); + for (int i = 0; i < refreshThreads.length; i++) { + latch.countDown(); + refreshThreads[i] = new Thread(() -> { + while (done.get() == false) { + long checkPointBeforeRefresh = engine.getLocalCheckpoint(); + engine.refresh("test", randomFrom(Engine.SearcherScope.values())); + assertThat(engine.lastRefreshedCheckpoint(), greaterThanOrEqualTo(checkPointBeforeRefresh)); + } + }); + refreshThreads[i].start(); + } + latch.await(); + List ops = generateSingleDocHistory(true, VersionType.EXTERNAL, 1, 10, 1000, "1"); + concurrentlyApplyOps(ops, engine); + done.set(true); + for (Thread thread : refreshThreads) { + thread.join(); + } + engine.refresh("test"); + assertThat(engine.lastRefreshedCheckpoint(), equalTo(engine.getLocalCheckpoint())); + } + + public void testAcquireSearcherOnClosingEngine() throws Exception { + engine.close(); + expectThrows(AlreadyClosedException.class, () -> engine.acquireSearcher("test")); + } + private static void trimUnsafeCommits(EngineConfig config) throws IOException { final Store store = config.getStore(); final TranslogConfig translogConfig = config.getTranslogConfig(); diff --git a/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java new file mode 100644 index 00000000000..412b91aaef2 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java @@ -0,0 +1,296 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.translog.SnapshotMatchers; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.test.IndexSettingsModule; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class LuceneChangesSnapshotTests extends EngineTestCase { + private MapperService mapperService; + + @Before + public void createMapper() throws Exception { + mapperService = createMapperService("test"); + } + + @Override + protected Settings indexSettings() { + return Settings.builder().put(super.indexSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) // always enable soft-deletes + .build(); + } + + public void testBasics() throws Exception { + long fromSeqNo = randomNonNegativeLong(); + long toSeqNo = randomLongBetween(fromSeqNo, Long.MAX_VALUE); + // Empty engine + try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, true)) { + IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot)); + assertThat(error.getMessage(), + containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found")); + } + try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, false)) { + assertThat(snapshot, SnapshotMatchers.size(0)); + } + int numOps = between(1, 100); + int refreshedSeqNo = -1; + for (int i = 0; i < numOps; i++) { + String id = Integer.toString(randomIntBetween(i, i + 5)); + ParsedDocument doc = createParsedDoc(id, null, randomBoolean()); + if (randomBoolean()) { + engine.index(indexForDoc(doc)); + } else { + engine.delete(new Engine.Delete(doc.type(), doc.id(), newUid(doc.id()), primaryTerm.get())); + } + if (rarely()) { + if (randomBoolean()) { + engine.flush(); + } else { + engine.refresh("test"); + } + refreshedSeqNo = i; + } + } + if (refreshedSeqNo == -1) { + fromSeqNo = between(0, numOps); + toSeqNo = randomLongBetween(fromSeqNo, numOps * 2); + + Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); + try (Translog.Snapshot snapshot = new LuceneChangesSnapshot( + searcher, mapperService, between(1, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE), fromSeqNo, toSeqNo, false)) { + searcher = null; + assertThat(snapshot, SnapshotMatchers.size(0)); + } finally { + IOUtils.close(searcher); + } + + searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); + try (Translog.Snapshot snapshot = new LuceneChangesSnapshot( + searcher, mapperService, between(1, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE), fromSeqNo, toSeqNo, true)) { + searcher = null; + IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot)); + assertThat(error.getMessage(), + containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found")); + }finally { + IOUtils.close(searcher); + } + } else { + fromSeqNo = randomLongBetween(0, refreshedSeqNo); + toSeqNo = randomLongBetween(refreshedSeqNo + 1, numOps * 2); + Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); + try (Translog.Snapshot snapshot = new LuceneChangesSnapshot( + searcher, mapperService, between(1, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE), fromSeqNo, toSeqNo, false)) { + searcher = null; + assertThat(snapshot, SnapshotMatchers.containsSeqNoRange(fromSeqNo, refreshedSeqNo)); + } finally { + IOUtils.close(searcher); + } + searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); + try (Translog.Snapshot snapshot = new LuceneChangesSnapshot( + searcher, mapperService, between(1, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE), fromSeqNo, toSeqNo, true)) { + searcher = null; + IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot)); + assertThat(error.getMessage(), + containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found")); + }finally { + IOUtils.close(searcher); + } + toSeqNo = randomLongBetween(fromSeqNo, refreshedSeqNo); + searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); + try (Translog.Snapshot snapshot = new LuceneChangesSnapshot( + searcher, mapperService, between(1, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE), fromSeqNo, toSeqNo, true)) { + searcher = null; + assertThat(snapshot, SnapshotMatchers.containsSeqNoRange(fromSeqNo, toSeqNo)); + } finally { + IOUtils.close(searcher); + } + } + // Get snapshot via engine will auto refresh + fromSeqNo = randomLongBetween(0, numOps - 1); + toSeqNo = randomLongBetween(fromSeqNo, numOps - 1); + try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, randomBoolean())) { + assertThat(snapshot, SnapshotMatchers.containsSeqNoRange(fromSeqNo, toSeqNo)); + } + } + + public void testDedupByPrimaryTerm() throws Exception { + Map latestOperations = new HashMap<>(); + List terms = Arrays.asList(between(1, 1000), between(1000, 2000)); + int totalOps = 0; + for (long term : terms) { + final List ops = generateSingleDocHistory(true, + randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL, VersionType.EXTERNAL_GTE), term, 2, 20, "1"); + primaryTerm.set(Math.max(primaryTerm.get(), term)); + engine.rollTranslogGeneration(); + for (Engine.Operation op : ops) { + // We need to simulate a rollback here as only ops after local checkpoint get into the engine + if (op.seqNo() <= engine.getLocalCheckpointTracker().getCheckpoint()) { + engine.getLocalCheckpointTracker().resetCheckpoint(randomLongBetween(-1, op.seqNo() - 1)); + engine.rollTranslogGeneration(); + } + if (op instanceof Engine.Index) { + engine.index((Engine.Index) op); + } else if (op instanceof Engine.Delete) { + engine.delete((Engine.Delete) op); + } + latestOperations.put(op.seqNo(), op.primaryTerm()); + if (rarely()) { + engine.refresh("test"); + } + if (rarely()) { + engine.flush(); + } + totalOps++; + } + } + long maxSeqNo = engine.getLocalCheckpointTracker().getMaxSeqNo(); + engine.refresh("test"); + Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); + try (Translog.Snapshot snapshot = new LuceneChangesSnapshot(searcher, mapperService, between(1, 100), 0, maxSeqNo, false)) { + searcher = null; + Translog.Operation op; + while ((op = snapshot.next()) != null) { + assertThat(op.toString(), op.primaryTerm(), equalTo(latestOperations.get(op.seqNo()))); + } + assertThat(snapshot.skippedOperations(), equalTo(totalOps - latestOperations.size())); + } finally { + IOUtils.close(searcher); + } + } + + public void testUpdateAndReadChangesConcurrently() throws Exception { + Follower[] followers = new Follower[between(1, 3)]; + CountDownLatch readyLatch = new CountDownLatch(followers.length + 1); + AtomicBoolean isDone = new AtomicBoolean(); + for (int i = 0; i < followers.length; i++) { + followers[i] = new Follower(engine, isDone, readyLatch); + followers[i].start(); + } + boolean onPrimary = randomBoolean(); + List operations = new ArrayList<>(); + int numOps = scaledRandomIntBetween(1, 1000); + for (int i = 0; i < numOps; i++) { + String id = Integer.toString(randomIntBetween(1, 10)); + ParsedDocument doc = createParsedDoc(id, randomAlphaOfLengthBetween(1, 5), randomBoolean()); + final Engine.Operation op; + if (onPrimary) { + if (randomBoolean()) { + op = new Engine.Index(newUid(doc), primaryTerm.get(), doc); + } else { + op = new Engine.Delete(doc.type(), doc.id(), newUid(doc.id()), primaryTerm.get()); + } + } else { + if (randomBoolean()) { + op = replicaIndexForDoc(doc, randomNonNegativeLong(), i, randomBoolean()); + } else { + op = replicaDeleteForDoc(doc.id(), randomNonNegativeLong(), i, randomNonNegativeLong()); + } + } + operations.add(op); + } + readyLatch.countDown(); + readyLatch.await(); + concurrentlyApplyOps(operations, engine); + assertThat(engine.getLocalCheckpointTracker().getCheckpoint(), equalTo(operations.size() - 1L)); + isDone.set(true); + for (Follower follower : followers) { + follower.join(); + IOUtils.close(follower.engine, follower.engine.store); + } + } + + class Follower extends Thread { + private final Engine leader; + private final InternalEngine engine; + private final TranslogHandler translogHandler; + private final AtomicBoolean isDone; + private final CountDownLatch readLatch; + + Follower(Engine leader, AtomicBoolean isDone, CountDownLatch readLatch) throws IOException { + this.leader = leader; + this.isDone = isDone; + this.readLatch = readLatch; + this.translogHandler = new TranslogHandler(xContentRegistry(), IndexSettingsModule.newIndexSettings(shardId.getIndexName(), + leader.engineConfig.getIndexSettings().getSettings())); + this.engine = createEngine(createStore(), createTempDir()); + } + + void pullOperations(Engine follower) throws IOException { + long leaderCheckpoint = leader.getLocalCheckpoint(); + long followerCheckpoint = follower.getLocalCheckpoint(); + if (followerCheckpoint < leaderCheckpoint) { + long fromSeqNo = followerCheckpoint + 1; + long batchSize = randomLongBetween(0, 100); + long toSeqNo = Math.min(fromSeqNo + batchSize, leaderCheckpoint); + try (Translog.Snapshot snapshot = leader.newChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, true)) { + translogHandler.run(follower, snapshot); + } + } + } + + @Override + public void run() { + try { + readLatch.countDown(); + readLatch.await(); + while (isDone.get() == false || + engine.getLocalCheckpointTracker().getCheckpoint() < leader.getLocalCheckpoint()) { + pullOperations(engine); + } + assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService); + assertThat(getDocIds(engine, true), equalTo(getDocIds(leader, true))); + } catch (Exception ex) { + throw new AssertionError(ex); + } + } + } + + private List drainAll(Translog.Snapshot snapshot) throws IOException { + List operations = new ArrayList<>(); + Translog.Operation op; + while ((op = snapshot.next()) != null) { + final Translog.Operation newOp = op; + logger.error("Reading [{}]", op); + assert operations.stream().allMatch(o -> o.seqNo() < newOp.seqNo()) : "Operations [" + operations + "], op [" + op + "]"; + operations.add(newOp); + } + return operations; + } +} diff --git a/server/src/test/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicyTests.java new file mode 100644 index 00000000000..c46b47b87d0 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicyTests.java @@ -0,0 +1,161 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.StoredField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.CodecReader; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.SegmentCommitInfo; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.StandardDirectoryReader; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.InfoStream; +import org.apache.lucene.util.NullInfoStream; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.Set; +import java.util.stream.Collectors; + +public class RecoverySourcePruneMergePolicyTests extends ESTestCase { + + public void testPruneAll() throws IOException { + try (Directory dir = newDirectory()) { + IndexWriterConfig iwc = newIndexWriterConfig(); + RecoverySourcePruneMergePolicy mp = new RecoverySourcePruneMergePolicy("extra_source", MatchNoDocsQuery::new, + newLogMergePolicy()); + iwc.setMergePolicy(mp); + try (IndexWriter writer = new IndexWriter(dir, iwc)) { + for (int i = 0; i < 20; i++) { + if (i > 0 && randomBoolean()) { + writer.flush(); + } + Document doc = new Document(); + doc.add(new StoredField("source", "hello world")); + doc.add(new StoredField("extra_source", "hello world")); + doc.add(new NumericDocValuesField("extra_source", 1)); + writer.addDocument(doc); + } + writer.forceMerge(1); + writer.commit(); + try (DirectoryReader reader = DirectoryReader.open(writer)) { + for (int i = 0; i < reader.maxDoc(); i++) { + Document document = reader.document(i); + assertEquals(1, document.getFields().size()); + assertEquals("source", document.getFields().get(0).name()); + } + assertEquals(1, reader.leaves().size()); + LeafReader leafReader = reader.leaves().get(0).reader(); + NumericDocValues extra_source = leafReader.getNumericDocValues("extra_source"); + if (extra_source != null) { + assertEquals(DocIdSetIterator.NO_MORE_DOCS, extra_source.nextDoc()); + } + if (leafReader instanceof CodecReader && reader instanceof StandardDirectoryReader) { + CodecReader codecReader = (CodecReader) leafReader; + StandardDirectoryReader sdr = (StandardDirectoryReader) reader; + SegmentInfos segmentInfos = sdr.getSegmentInfos(); + MergePolicy.MergeSpecification forcedMerges = mp.findForcedDeletesMerges(segmentInfos, + new MergePolicy.MergeContext() { + @Override + public int numDeletesToMerge(SegmentCommitInfo info) { + return info.info.maxDoc() - 1; + } + + @Override + public int numDeletedDocs(SegmentCommitInfo info) { + return info.info.maxDoc() - 1; + } + + @Override + public InfoStream getInfoStream() { + return new NullInfoStream(); + } + + @Override + public Set getMergingSegments() { + return Collections.emptySet(); + } + }); + // don't wrap if there is nothing to do + assertSame(codecReader, forcedMerges.merges.get(0).wrapForMerge(codecReader)); + } + } + } + } + } + + + public void testPruneSome() throws IOException { + try (Directory dir = newDirectory()) { + IndexWriterConfig iwc = newIndexWriterConfig(); + iwc.setMergePolicy(new RecoverySourcePruneMergePolicy("extra_source", + () -> new TermQuery(new Term("even", "true")), iwc.getMergePolicy())); + try (IndexWriter writer = new IndexWriter(dir, iwc)) { + for (int i = 0; i < 20; i++) { + if (i > 0 && randomBoolean()) { + writer.flush(); + } + Document doc = new Document(); + doc.add(new StringField("even", Boolean.toString(i % 2 == 0), Field.Store.YES)); + doc.add(new StoredField("source", "hello world")); + doc.add(new StoredField("extra_source", "hello world")); + doc.add(new NumericDocValuesField("extra_source", 1)); + writer.addDocument(doc); + } + writer.forceMerge(1); + writer.commit(); + try (DirectoryReader reader = DirectoryReader.open(writer)) { + assertEquals(1, reader.leaves().size()); + NumericDocValues extra_source = reader.leaves().get(0).reader().getNumericDocValues("extra_source"); + assertNotNull(extra_source); + for (int i = 0; i < reader.maxDoc(); i++) { + Document document = reader.document(i); + Set collect = document.getFields().stream().map(IndexableField::name).collect(Collectors.toSet()); + assertTrue(collect.contains("source")); + assertTrue(collect.contains("even")); + if (collect.size() == 3) { + assertTrue(collect.contains("extra_source")); + assertEquals("true", document.getField("even").stringValue()); + assertEquals(i, extra_source.nextDoc()); + } else { + assertEquals(2, document.getFields().size()); + } + } + assertEquals(DocIdSetIterator.NO_MORE_DOCS, extra_source.nextDoc()); + } + } + } + } +} diff --git a/server/src/test/java/org/elasticsearch/index/engine/SegmentTests.java b/server/src/test/java/org/elasticsearch/index/engine/SegmentTests.java index f9641ba24d7..47946a6850c 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/SegmentTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/SegmentTests.java @@ -77,7 +77,7 @@ public class SegmentTests extends ESTestCase { segment.sizeInBytes = randomNonNegativeLong(); segment.docCount = randomIntBetween(1, Integer.MAX_VALUE); segment.delDocCount = randomIntBetween(0, segment.docCount); - segment.version = Version.LUCENE_6_5_0; + segment.version = Version.LUCENE_7_0_0; segment.compound = randomBoolean(); segment.mergeId = randomAlphaOfLengthBetween(1, 10); segment.memoryInBytes = randomNonNegativeLong(); diff --git a/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java new file mode 100644 index 00000000000..f3590100382 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; + +import static org.hamcrest.Matchers.equalTo; + +public class SoftDeletesPolicyTests extends ESTestCase { + /** + * Makes sure we won't advance the retained seq# if the retention lock is held + */ + public void testSoftDeletesRetentionLock() { + long retainedOps = between(0, 10000); + AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + long safeCommitCheckpoint = globalCheckpoint.get(); + SoftDeletesPolicy policy = new SoftDeletesPolicy(globalCheckpoint::get, between(1, 10000), retainedOps); + long minRetainedSeqNo = policy.getMinRetainedSeqNo(); + List locks = new ArrayList<>(); + int iters = scaledRandomIntBetween(10, 1000); + for (int i = 0; i < iters; i++) { + if (randomBoolean()) { + locks.add(policy.acquireRetentionLock()); + } + // Advances the global checkpoint and the local checkpoint of a safe commit + globalCheckpoint.addAndGet(between(0, 1000)); + safeCommitCheckpoint = randomLongBetween(safeCommitCheckpoint, globalCheckpoint.get()); + policy.setLocalCheckpointOfSafeCommit(safeCommitCheckpoint); + if (rarely()) { + retainedOps = between(0, 10000); + policy.setRetentionOperations(retainedOps); + } + // Release some locks + List releasingLocks = randomSubsetOf(locks); + locks.removeAll(releasingLocks); + releasingLocks.forEach(Releasable::close); + + // We only expose the seqno to the merge policy if the retention lock is not held. + policy.getRetentionQuery(); + if (locks.isEmpty()) { + long retainedSeqNo = Math.min(safeCommitCheckpoint, globalCheckpoint.get() - retainedOps) + 1; + minRetainedSeqNo = Math.max(minRetainedSeqNo, retainedSeqNo); + } + assertThat(policy.getMinRetainedSeqNo(), equalTo(minRetainedSeqNo)); + } + + locks.forEach(Releasable::close); + long retainedSeqNo = Math.min(safeCommitCheckpoint, globalCheckpoint.get() - retainedOps) + 1; + minRetainedSeqNo = Math.max(minRetainedSeqNo, retainedSeqNo); + assertThat(policy.getMinRetainedSeqNo(), equalTo(minRetainedSeqNo)); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTestCase.java b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTestCase.java index cd1dc01d9ef..048455ccb41 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTestCase.java @@ -115,7 +115,7 @@ public abstract class AbstractFieldDataImplTestCase extends AbstractFieldDataTes SortField sortField = indexFieldData.sortField(null, MultiValueMode.MIN, null, false); topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); - assertThat(topDocs.totalHits, equalTo(3L)); + assertThat(topDocs.totalHits.value, equalTo(3L)); assertThat(topDocs.scoreDocs[0].doc, equalTo(1)); assertThat(toString(((FieldDoc) topDocs.scoreDocs[0]).fields[0]), equalTo(one())); assertThat(topDocs.scoreDocs[1].doc, equalTo(0)); @@ -126,7 +126,7 @@ public abstract class AbstractFieldDataImplTestCase extends AbstractFieldDataTes sortField = indexFieldData.sortField(null, MultiValueMode.MAX, null, true); topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); - assertThat(topDocs.totalHits, equalTo(3L)); + assertThat(topDocs.totalHits.value, equalTo(3L)); assertThat(topDocs.scoreDocs[0].doc, equalTo(2)); assertThat(topDocs.scoreDocs[1].doc, equalTo(0)); assertThat(topDocs.scoreDocs[2].doc, equalTo(1)); @@ -192,7 +192,7 @@ public abstract class AbstractFieldDataImplTestCase extends AbstractFieldDataTes IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer)); SortField sortField = indexFieldData.sortField(null, MultiValueMode.MIN, null, false); TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); - assertThat(topDocs.totalHits, equalTo(3L)); + assertThat(topDocs.totalHits.value, equalTo(3L)); assertThat(topDocs.scoreDocs.length, equalTo(3)); assertThat(topDocs.scoreDocs[0].doc, equalTo(1)); assertThat(topDocs.scoreDocs[1].doc, equalTo(0)); @@ -200,7 +200,7 @@ public abstract class AbstractFieldDataImplTestCase extends AbstractFieldDataTes ; sortField = indexFieldData.sortField(null, MultiValueMode.MAX, null, true); topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); - assertThat(topDocs.totalHits, equalTo(3L)); + assertThat(topDocs.totalHits.value, equalTo(3L)); assertThat(topDocs.scoreDocs.length, equalTo(3)); assertThat(topDocs.scoreDocs[0].doc, equalTo(0)); assertThat(topDocs.scoreDocs[1].doc, equalTo(2)); @@ -259,7 +259,7 @@ public abstract class AbstractFieldDataImplTestCase extends AbstractFieldDataTes indexFieldData.sortField(null, MultiValueMode.MIN, null, false); TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); - assertThat(topDocs.totalHits, equalTo(8L)); + assertThat(topDocs.totalHits.value, equalTo(8L)); assertThat(topDocs.scoreDocs.length, equalTo(8)); assertThat(topDocs.scoreDocs[0].doc, equalTo(7)); assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("!08")); @@ -281,7 +281,7 @@ public abstract class AbstractFieldDataImplTestCase extends AbstractFieldDataTes sortField = indexFieldData.sortField(null, MultiValueMode.MAX, null, true); topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); - assertThat(topDocs.totalHits, equalTo(8L)); + assertThat(topDocs.totalHits.value, equalTo(8L)); assertThat(topDocs.scoreDocs.length, equalTo(8)); assertThat(topDocs.scoreDocs[0].doc, equalTo(6)); assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("10")); diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java index 04cd1376617..ef2a9b38735 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java @@ -265,7 +265,7 @@ public abstract class AbstractStringFieldDataTestCase extends AbstractFieldDataI IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer)); SortField sortField = indexFieldData.sortField(missingValue, MultiValueMode.MIN, null, reverse); TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(sortField)); - assertEquals(numDocs, topDocs.totalHits); + assertEquals(numDocs, topDocs.totalHits.value); BytesRef previousValue = reverse ? UnicodeUtil.BIG_TERM : new BytesRef(); for (int i = 0; i < topDocs.scoreDocs.length; ++i) { final String docValue = searcher.doc(topDocs.scoreDocs[i].doc).get("value"); @@ -319,7 +319,7 @@ public abstract class AbstractStringFieldDataTestCase extends AbstractFieldDataI IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer)); SortField sortField = indexFieldData.sortField(first ? "_first" : "_last", MultiValueMode.MIN, null, reverse); TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(sortField)); - assertEquals(numDocs, topDocs.totalHits); + assertEquals(numDocs, topDocs.totalHits.value); BytesRef previousValue = first ? null : reverse ? UnicodeUtil.BIG_TERM : new BytesRef(); for (int i = 0; i < topDocs.scoreDocs.length; ++i) { final String docValue = searcher.doc(topDocs.scoreDocs[i].doc).get("value"); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java index a01ddccc939..e3739eed336 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java @@ -448,6 +448,17 @@ public class CompletionFieldMapperTests extends ESSingleNodeTestCase { assertNotNull(doc.docs().get(0).getField("_ignored")); IndexableField ignoredFields = doc.docs().get(0).getField("_ignored"); assertThat(ignoredFields.stringValue(), equalTo("completion")); + + // null inputs are ignored + ParsedDocument nullDoc = defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .nullField("completion") + .endObject()), + XContentType.JSON)); + assertThat(nullDoc.docs().size(), equalTo(1)); + assertNull(nullDoc.docs().get(0).get("completion")); + assertNull(nullDoc.docs().get(0).getField("_ignored")); } public void testPrefixQueryType() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java index 51b27094099..d16bdc444e6 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java @@ -414,4 +414,22 @@ public class DateFieldMapperTests extends ESSingleNodeTestCase { () -> mapper.merge(update.mapping())); assertEquals("mapper [date] of different type, current_type [date], merged_type [text]", e.getMessage()); } + + public void testIllegalFormatField() throws Exception { + String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "date") + .array("format", "test_format") + .endObject() + .endObject() + .endObject() + .endObject()); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> parser.parse("type", new CompressedXContent(mapping))); + assertEquals("Invalid format: [[test_format]]: expected string value", e.getMessage()); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index 76ca6aa7ea8..5a46b9a889f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -311,15 +312,18 @@ public class DocumentParserTests extends ESSingleNodeTestCase { // creates an object mapper, which is about 100x harder than it should be.... ObjectMapper createObjectMapper(MapperService mapperService, String name) throws Exception { - ParseContext context = new ParseContext.InternalParseContext( - Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(), + IndexMetaData build = IndexMetaData.builder("") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(0).build(); + IndexSettings settings = new IndexSettings(build, Settings.EMPTY); + ParseContext context = new ParseContext.InternalParseContext(settings, mapperService.documentMapperParser(), mapperService.documentMapper("type"), null, null); String[] nameParts = name.split("\\."); for (int i = 0; i < nameParts.length - 1; ++i) { context.path().add(nameParts[i]); } Mapper.Builder builder = new ObjectMapper.Builder(nameParts[nameParts.length - 1]).enabled(true); - Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); + Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings().getSettings(), context.path()); return (ObjectMapper)builder.build(builderContext); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java index b7ee74fb773..23e205b8f58 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java @@ -71,25 +71,25 @@ public class DoubleIndexingDocTests extends ESSingleNodeTestCase { IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(mapperService.fullName("field1").termQuery("value1", context), 10); - assertThat(topDocs.totalHits, equalTo(2L)); + assertThat(topDocs.totalHits.value, equalTo(2L)); topDocs = searcher.search(mapperService.fullName("field2").termQuery("1", context), 10); - assertThat(topDocs.totalHits, equalTo(2L)); + assertThat(topDocs.totalHits.value, equalTo(2L)); topDocs = searcher.search(mapperService.fullName("field3").termQuery("1.1", context), 10); - assertThat(topDocs.totalHits, equalTo(2L)); + assertThat(topDocs.totalHits.value, equalTo(2L)); topDocs = searcher.search(mapperService.fullName("field4").termQuery("2010-01-01", context), 10); - assertThat(topDocs.totalHits, equalTo(2L)); + assertThat(topDocs.totalHits.value, equalTo(2L)); topDocs = searcher.search(mapperService.fullName("field5").termQuery("1", context), 10); - assertThat(topDocs.totalHits, equalTo(2L)); + assertThat(topDocs.totalHits.value, equalTo(2L)); topDocs = searcher.search(mapperService.fullName("field5").termQuery("2", context), 10); - assertThat(topDocs.totalHits, equalTo(2L)); + assertThat(topDocs.totalHits.value, equalTo(2L)); topDocs = searcher.search(mapperService.fullName("field5").termQuery("3", context), 10); - assertThat(topDocs.totalHits, equalTo(2L)); + assertThat(topDocs.totalHits.value, equalTo(2L)); writer.close(); reader.close(); dir.close(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index 7d022b55454..95175af5421 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -20,8 +20,8 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.index.IndexOptions; import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; @@ -33,6 +33,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.BooleanFieldMapper.BooleanFieldType; import org.elasticsearch.index.mapper.DateFieldMapper.DateFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberFieldType; @@ -188,25 +189,6 @@ public class DynamicMappingTests extends ESSingleNodeTestCase { assertNotNull(fieldType); } - public void testTypeNotCreatedOnIndexFailure() throws IOException, InterruptedException { - XContentBuilder mapping = jsonBuilder().startObject().startObject("_default_") - .field("dynamic", "strict") - .endObject().endObject(); - Settings settings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) - .build(); - createIndex("test", settings, "_default_", mapping); - try { - client().prepareIndex().setIndex("test").setType("type").setSource(jsonBuilder().startObject().field("test", "test").endObject()).get(); - fail(); - } catch (StrictDynamicMappingException e) { - - } - - GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").get(); - assertNull(getMappingsResponse.getMappings().get("test").get("type")); - } - private String serialize(ToXContent mapper) throws Exception { XContentBuilder builder = XContentFactory.jsonBuilder().startObject(); mapper.toXContent(builder, new ToXContent.MapParams(emptyMap())); @@ -214,7 +196,10 @@ public class DynamicMappingTests extends ESSingleNodeTestCase { } private Mapper parse(DocumentMapper mapper, DocumentMapperParser parser, XContentBuilder builder) throws Exception { - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); + IndexMetaData build = IndexMetaData.builder("") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(0).build(); + IndexSettings settings = new IndexSettings(build, Settings.EMPTY); SourceToParse source = SourceToParse.source("test", mapper.type(), "some_id", BytesReference.bytes(builder), builder.contentType()); try (XContentParser xContentParser = createParser(JsonXContent.jsonXContent, source.source())) { ParseContext.InternalParseContext ctx = new ParseContext.InternalParseContext(settings, parser, mapper, source, xContentParser); @@ -741,4 +726,13 @@ public class DynamicMappingTests extends ESSingleNodeTestCase { client().prepareIndex("test", "type", "1").setSource("foo", "abc").get(); assertThat(index.mapperService().fullName("foo"), instanceOf(KeywordFieldMapper.KeywordFieldType.class)); } + + public void testMappingVersionAfterDynamicMappingUpdate() { + createIndex("test", client().admin().indices().prepareCreate("test").addMapping("type")); + final ClusterService clusterService = getInstanceFromNode(ClusterService.class); + final long previousVersion = clusterService.state().metaData().index("test").getMappingVersion(); + client().prepareIndex("test", "type", "1").setSource("field", "text").get(); + assertThat(clusterService.state().metaData().index("test").getMappingVersion(), equalTo(1 + previousVersion)); + } + } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java index f48603d3051..a910c2c86ba 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java @@ -40,18 +40,11 @@ public class DynamicTemplateTests extends ESTestCase { templateDef.put("random_param", "random_value"); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha1)); + () -> DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1)); assertEquals("Illegal dynamic template parameter: [random_param]", e.getMessage()); } public void testParseUnknownMatchType() { - Map templateDef = new HashMap<>(); - templateDef.put("match_mapping_type", "short"); - templateDef.put("mapping", Collections.singletonMap("store", true)); - // if a wrong match type is specified, we ignore the template - assertNull(DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha5)); - assertWarnings("match_mapping_type [short] is invalid and will be ignored: No field type matched on [short], " + - "possible values are [object, string, long, double, boolean, date, binary]"); Map templateDef2 = new HashMap<>(); templateDef2.put("match_mapping_type", "text"); templateDef2.put("mapping", Collections.singletonMap("store", true)); @@ -79,7 +72,7 @@ public class DynamicTemplateTests extends ESTestCase { Map templateDef = new HashMap<>(); templateDef.put("match_mapping_type", "*"); templateDef.put("mapping", Collections.singletonMap("store", true)); - DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha5); + DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); assertTrue(template.match("a.b", "b", randomFrom(XContentFieldType.values()))); } @@ -87,7 +80,7 @@ public class DynamicTemplateTests extends ESTestCase { Map templateDef = new HashMap<>(); templateDef.put("match_mapping_type", "string"); templateDef.put("mapping", Collections.singletonMap("store", true)); - DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha5); + DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); assertTrue(template.match("a.b", "b", XContentFieldType.STRING)); assertFalse(template.match("a.b", "b", XContentFieldType.BOOLEAN)); } @@ -97,7 +90,7 @@ public class DynamicTemplateTests extends ESTestCase { Map templateDef = new HashMap<>(); templateDef.put("match_mapping_type", "string"); templateDef.put("mapping", Collections.singletonMap("store", true)); - DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha1); + DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); XContentBuilder builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"match_mapping_type\":\"string\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); @@ -107,7 +100,7 @@ public class DynamicTemplateTests extends ESTestCase { templateDef.put("match", "*name"); templateDef.put("unmatch", "first_name"); templateDef.put("mapping", Collections.singletonMap("store", true)); - template = DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha1); + template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"match\":\"*name\",\"unmatch\":\"first_name\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); @@ -117,7 +110,7 @@ public class DynamicTemplateTests extends ESTestCase { templateDef.put("path_match", "*name"); templateDef.put("path_unmatch", "first_name"); templateDef.put("mapping", Collections.singletonMap("store", true)); - template = DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha1); + template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"path_match\":\"*name\",\"path_unmatch\":\"first_name\",\"mapping\":{\"store\":true}}", @@ -128,7 +121,7 @@ public class DynamicTemplateTests extends ESTestCase { templateDef.put("match", "^a$"); templateDef.put("match_pattern", "regex"); templateDef.put("mapping", Collections.singletonMap("store", true)); - template = DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha1); + template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"match\":\"^a$\",\"match_pattern\":\"regex\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java index 8f2a51bbfc2..1d65fb27c55 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java @@ -45,6 +45,7 @@ import java.util.HashMap; import java.util.Map; import java.util.function.Supplier; +import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -56,15 +57,19 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase { return pluginList(InternalSettingsPlugin.class); } + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + public void testExternalValues() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); IndexService indexService = createIndex("test", settings); MapperRegistry mapperRegistry = new MapperRegistry( - Collections.singletonMap(ExternalMapperPlugin.EXTERNAL, new ExternalMapper.TypeParser(ExternalMapperPlugin.EXTERNAL, "foo")), - Collections.singletonMap(ExternalMetadataMapper.CONTENT_TYPE, new ExternalMetadataMapper.TypeParser()), - MapperPlugin.NOOP_FIELD_FILTER); + singletonMap(ExternalMapperPlugin.EXTERNAL, new ExternalMapper.TypeParser(ExternalMapperPlugin.EXTERNAL, "foo")), + singletonMap(ExternalMetadataMapper.CONTENT_TYPE, new ExternalMetadataMapper.TypeParser()), MapperPlugin.NOOP_FIELD_FILTER); Supplier queryShardContext = () -> { return indexService.newQueryShardContext(0, null, () -> { throw new UnsupportedOperationException(); }, null); @@ -107,10 +112,7 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase { } public void testExternalValuesWithMultifield() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, - Version.CURRENT); - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); - IndexService indexService = createIndex("test", settings); + IndexService indexService = createIndex("test"); Map mapperParsers = new HashMap<>(); mapperParsers.put(ExternalMapperPlugin.EXTERNAL, new ExternalMapper.TypeParser(ExternalMapperPlugin.EXTERNAL, "foo")); mapperParsers.put(TextFieldMapper.CONTENT_TYPE, new TextFieldMapper.TypeParser()); @@ -173,10 +175,7 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase { } public void testExternalValuesWithMultifieldTwoLevels() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, - Version.CURRENT); - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); - IndexService indexService = createIndex("test", settings); + IndexService indexService = createIndex("test"); Map mapperParsers = new HashMap<>(); mapperParsers.put(ExternalMapperPlugin.EXTERNAL, new ExternalMapper.TypeParser(ExternalMapperPlugin.EXTERNAL, "foo")); mapperParsers.put(ExternalMapperPlugin.EXTERNAL_BIS, new ExternalMapper.TypeParser(ExternalMapperPlugin.EXTERNAL, "bar")); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java index 8c2e6d47541..79f01288fa8 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java @@ -168,7 +168,7 @@ public class ExternalMapper extends FieldMapper { } @Override - public Mapper parse(ParseContext context) throws IOException { + public void parse(ParseContext context) throws IOException { byte[] bytes = "Hello world".getBytes(Charset.defaultCharset()); binMapper.parse(context.createExternalValueContext(bytes)); @@ -190,7 +190,6 @@ public class ExternalMapper extends FieldMapper { stringMapper.parse(context); multiFields.parse(this, context); - return null; } @Override diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index 56e587dc995..8e5c81e58f1 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -321,11 +321,16 @@ public class KeywordFieldMapperTests extends ESSingleNodeTestCase { public void testEnableNorms() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field").field("type", "keyword").field("norms", true).endObject().endObject() - .endObject().endObject()); + .startObject("properties") + .startObject("field") + .field("type", "keyword") + .field("doc_values", false) + .field("norms", true) + .endObject() + .endObject() + .endObject().endObject()); DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); - assertEquals(mapping, mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference @@ -336,8 +341,11 @@ public class KeywordFieldMapperTests extends ESSingleNodeTestCase { XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); - assertEquals(2, fields.length); + assertEquals(1, fields.length); assertFalse(fields[0].fieldType().omitNorms()); + + IndexableField[] fieldNamesFields = doc.rootDoc().getFields(FieldNamesFieldMapper.NAME); + assertEquals(0, fieldNamesFields.length); } public void testNormalizer() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java index a291062c7a5..eae5b4ac7d2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.mapper; import com.carrotsearch.randomizedtesting.generators.RandomStrings; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.LowerCaseFilter; import org.apache.lucene.analysis.TokenFilter; @@ -28,9 +27,11 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; -import org.apache.lucene.search.TermInSetQuery; +import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.NormsFieldExistsQuery; import org.apache.lucene.search.RegexpQuery; +import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lucene.Lucene; @@ -132,6 +133,23 @@ public class KeywordFieldTypeTests extends FieldTypeTestCase { assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); } + public void testExistsQuery() { + MappedFieldType ft = createDefaultFieldType(); + ft.setName("field"); + + ft.setHasDocValues(true); + ft.setOmitNorms(true); + assertEquals(new DocValuesFieldExistsQuery("field"), ft.existsQuery(null)); + + ft.setHasDocValues(false); + ft.setOmitNorms(false); + assertEquals(new NormsFieldExistsQuery("field"), ft.existsQuery(null)); + + ft.setHasDocValues(false); + ft.setOmitNorms(true); + assertEquals(new TermQuery(new Term(FieldNamesFieldMapper.NAME, "field")), ft.existsQuery(null)); + } + public void testRegexpQuery() { MappedFieldType ft = createDefaultFieldType(); ft.setName("field"); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/LegacyDynamicMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/LegacyDynamicMappingTests.java new file mode 100644 index 00000000000..42d6aa8951c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/mapper/LegacyDynamicMappingTests.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESSingleNodeTestCase; + +import java.io.IOException; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +public class LegacyDynamicMappingTests extends ESSingleNodeTestCase { + + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + + public void testTypeNotCreatedOnIndexFailure() throws IOException { + final Settings settings = Settings.builder().put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.V_6_3_0).build(); + try (XContentBuilder mapping = jsonBuilder()) { + mapping.startObject(); + { + mapping.startObject("_default_"); + { + mapping.field("dynamic", "strict"); + } + mapping.endObject(); + } + mapping.endObject(); + createIndex("test", settings, "_default_", mapping); + } + try (XContentBuilder sourceBuilder = jsonBuilder().startObject().field("test", "test").endObject()) { + expectThrows(StrictDynamicMappingException.class, () -> client() + .prepareIndex() + .setIndex("test") + .setType("type") + .setSource(sourceBuilder) + .get()); + + GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").get(); + assertNull(getMappingsResponse.getMappings().get("test").get("type")); + } + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/LegacyMapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/LegacyMapperServiceTests.java new file mode 100644 index 00000000000..33f9bd51f33 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/mapper/LegacyMapperServiceTests.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.test.ESSingleNodeTestCase; + +import java.io.IOException; + +public class LegacyMapperServiceTests extends ESSingleNodeTestCase { + + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + + public void testIndexMetaDataUpdateDoesNotLoseDefaultMapper() throws IOException { + final IndexService indexService = + createIndex("test", Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0).build()); + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + { + builder.startObject(MapperService.DEFAULT_MAPPING); + { + builder.field("date_detection", false); + } + builder.endObject(); + } + builder.endObject(); + final PutMappingRequest putMappingRequest = new PutMappingRequest(); + putMappingRequest.indices("test"); + putMappingRequest.type(MapperService.DEFAULT_MAPPING); + putMappingRequest.source(builder); + client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource(builder).get(); + } + assertNotNull(indexService.mapperService().documentMapper(MapperService.DEFAULT_MAPPING)); + final Settings zeroReplicasSettings = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build(); + client().admin().indices().prepareUpdateSettings("test").setSettings(zeroReplicasSettings).get(); + /* + * This assertion is a guard against a previous bug that would lose the default mapper when applying a metadata update that did not + * update the default mapping. + */ + assertNotNull(indexService.mapperService().documentMapper(MapperService.DEFAULT_MAPPING)); + } + + public void testDefaultMappingIsDeprecatedOn6() throws IOException { + final Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0).build(); + final String mapping; + try (XContentBuilder defaultMapping = XContentFactory.jsonBuilder()) { + defaultMapping.startObject(); + { + defaultMapping.startObject("_default_"); + { + + } + defaultMapping.endObject(); + } + defaultMapping.endObject(); + mapping = Strings.toString(defaultMapping); + } + final MapperService mapperService = createIndex("test", settings).mapperService(); + mapperService.merge("_default_", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); + assertWarnings("[_default_] mapping is deprecated since it is not useful anymore now that indexes cannot have more than one type"); + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/LegacyTypeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/LegacyTypeFieldMapperTests.java new file mode 100644 index 00000000000..9566e1afa6d --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/mapper/LegacyTypeFieldMapperTests.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESSingleNodeTestCase; + +public class LegacyTypeFieldMapperTests extends ESSingleNodeTestCase { + + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + + public void testDocValuesMultipleTypes() throws Exception { + TypeFieldMapperTests.testDocValues(index -> { + final Settings settings = Settings.builder().put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.V_6_0_0).build(); + return this.createIndex(index, settings); + }); + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index 51b6e9d7168..cfd92db37f0 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -20,12 +20,11 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; @@ -39,13 +38,11 @@ import org.elasticsearch.test.InternalSettingsPlugin; import org.hamcrest.Matchers; import java.io.IOException; -import java.io.UncheckedIOException; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.concurrent.ExecutionException; -import java.util.function.Function; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.instanceOf; @@ -119,35 +116,41 @@ public class MapperServiceTests extends ESSingleNodeTestCase { assertNull(indexService.mapperService().documentMapper(MapperService.DEFAULT_MAPPING)); } - public void testTotalFieldsExceedsLimit() throws Throwable { - Function mapping = type -> { - try { - return Strings.toString(XContentFactory.jsonBuilder().startObject().startObject(type).startObject("properties") - .startObject("field1").field("type", "keyword") - .endObject().endObject().endObject().endObject()); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - }; - createIndex("test1").mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE); - //set total number of fields to 1 to trigger an exception + /** + * Test that we can have at least the number of fields in new mappings that are defined by "index.mapping.total_fields.limit". + * Any additional field should trigger an IllegalArgumentException. + */ + public void testTotalFieldsLimit() throws Throwable { + int totalFieldsLimit = randomIntBetween(1, 10); + Settings settings = Settings.builder().put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), totalFieldsLimit).build(); + createIndex("test1", settings).mapperService().merge("type", createMappingSpecifyingNumberOfFields(totalFieldsLimit), + MergeReason.MAPPING_UPDATE); + + // adding one more field should trigger exception IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { - createIndex("test2", Settings.builder().put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), 1).build()) - .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE); + createIndex("test2", settings).mapperService().merge("type", createMappingSpecifyingNumberOfFields(totalFieldsLimit + 1), + MergeReason.MAPPING_UPDATE); }); - assertTrue(e.getMessage(), e.getMessage().contains("Limit of total fields [1] in index [test2] has been exceeded")); + assertTrue(e.getMessage(), + e.getMessage().contains("Limit of total fields [" + totalFieldsLimit + "] in index [test2] has been exceeded")); + } + + private CompressedXContent createMappingSpecifyingNumberOfFields(int numberOfFields) throws IOException { + XContentBuilder mappingBuilder = XContentFactory.jsonBuilder().startObject() + .startObject("properties"); + for (int i = 0; i < numberOfFields; i++) { + mappingBuilder.startObject("field" + i); + mappingBuilder.field("type", randomFrom("long", "integer", "date", "keyword", "text")); + mappingBuilder.endObject(); + } + mappingBuilder.endObject().endObject(); + return new CompressedXContent(BytesReference.bytes(mappingBuilder)); } public void testMappingDepthExceedsLimit() throws Throwable { - CompressedXContent simpleMapping = new CompressedXContent(BytesReference.bytes(XContentFactory.jsonBuilder().startObject() - .startObject("properties") - .startObject("field") - .field("type", "text") - .endObject() - .endObject().endObject())); IndexService indexService1 = createIndex("test1", Settings.builder().put(MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING.getKey(), 1).build()); // no exception - indexService1.mapperService().merge("type", simpleMapping, MergeReason.MAPPING_UPDATE); + indexService1.mapperService().merge("type", createMappingSpecifyingNumberOfFields(1), MergeReason.MAPPING_UPDATE); CompressedXContent objectMapping = new CompressedXContent(BytesReference.bytes(XContentFactory.jsonBuilder().startObject() .startObject("properties") @@ -283,22 +286,20 @@ public class MapperServiceTests extends ESSingleNodeTestCase { .endObject() .endObject().endObject()); - DocumentMapper documentMapper = createIndex("test1").mapperService() - .merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); + int numberOfFieldsIncludingAlias = 2; + createIndex("test1", Settings.builder() + .put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), numberOfFieldsIncludingAlias).build()).mapperService() + .merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); // Set the total fields limit to the number of non-alias fields, to verify that adding // a field alias pushes the mapping over the limit. - int numFields = documentMapper.mapping().metadataMappers.length + 2; - int numNonAliasFields = numFields - 1; - + int numberOfNonAliasFields = 1; IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { - Settings settings = Settings.builder() - .put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), numNonAliasFields) - .build(); - createIndex("test2", settings).mapperService() - .merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); + createIndex("test2", + Settings.builder().put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), numberOfNonAliasFields).build()) + .mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); }); - assertEquals("Limit of total fields [" + numNonAliasFields + "] in index [test2] has been exceeded", e.getMessage()); + assertEquals("Limit of total fields [" + numberOfNonAliasFields + "] in index [test2] has been exceeded", e.getMessage()); } public void testForbidMultipleTypes() throws IOException { @@ -338,12 +339,4 @@ public class MapperServiceTests extends ESSingleNodeTestCase { "can have at most one type.", e.getMessage()); } - public void testDefaultMappingIsDeprecatedOn6() throws IOException { - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0).build(); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_default_").endObject().endObject()); - MapperService mapperService = createIndex("test", settings).mapperService(); - mapperService.merge("_default_", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); - assertWarnings("[_default_] mapping is deprecated since it is not useful anymore now that indexes " + - "cannot have more than one type"); - } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java index 54418850e5d..00068f76e75 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java @@ -443,4 +443,22 @@ public class RangeFieldMapperTests extends AbstractNumericFieldMapperTestCase { } } + public void testIllegalFormatField() throws Exception { + String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "date_range") + .array("format", "test_format") + .endObject() + .endObject() + .endObject() + .endObject()); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> parser.parse("type", new CompressedXContent(mapping))); + assertEquals("Invalid format: [[test_format]]: expected string value", e.getMessage()); + } + } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java index ec21a1f7286..574d4eee70a 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java @@ -159,4 +159,30 @@ public class RootObjectMapperTests extends ESSingleNodeTestCase { mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE); assertEquals(mapping3, mapper.mappingSource().toString()); } + + public void testIllegalFormatField() throws Exception { + String dynamicMapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startArray("dynamic_date_formats") + .startArray().value("test_format").endArray() + .endArray() + .endObject() + .endObject()); + String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startArray("date_formats") + .startArray().value("test_format").endArray() + .endArray() + .endObject() + .endObject()); + + DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); + for (String m : Arrays.asList(mapping, dynamicMapping)) { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> parser.parse("type", new CompressedXContent(m))); + assertEquals("Invalid format: [[test_format]]: expected string value", e.getMessage()); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java index 0af66321990..26f2b7fc923 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java @@ -25,11 +25,11 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; @@ -43,6 +43,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.function.Function; public class TypeFieldMapperTests extends ESSingleNodeTestCase { @@ -51,19 +52,12 @@ public class TypeFieldMapperTests extends ESSingleNodeTestCase { return pluginList(InternalSettingsPlugin.class); } - public void testDocValuesMultipleTypes() throws Exception { - testDocValues(false); - } - public void testDocValuesSingleType() throws Exception { - testDocValues(true); + testDocValues(this::createIndex); } - public void testDocValues(boolean singleType) throws IOException { - Settings indexSettings = singleType ? Settings.EMPTY : Settings.builder() - .put("index.version.created", Version.V_5_6_0) - .build(); - MapperService mapperService = createIndex("test", indexSettings).mapperService(); + public static void testDocValues(Function createIndex) throws IOException { + MapperService mapperService = createIndex.apply("test").mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingOnClusterIT.java b/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingOnClusterIT.java deleted file mode 100644 index 7de97d88a5e..00000000000 --- a/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingOnClusterIT.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.mapper; - -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; - -import java.util.Arrays; -import java.util.Collection; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; - -public class UpdateMappingOnClusterIT extends ESIntegTestCase { - private static final String INDEX = "index"; - private static final String TYPE = "type"; - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class); // uses index.version.created - } - - protected void testConflict(String mapping, String mappingUpdate, Version idxVersion, String... errorMessages) throws InterruptedException { - assertAcked(prepareCreate(INDEX).setSource(mapping, XContentType.JSON) - .setSettings(Settings.builder().put("index.version.created", idxVersion.id))); - ensureGreen(INDEX); - GetMappingsResponse mappingsBeforeUpdateResponse = client().admin().indices().prepareGetMappings(INDEX).addTypes(TYPE).get(); - try { - client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(mappingUpdate, XContentType.JSON).get(); - fail(); - } catch (IllegalArgumentException e) { - for (String errorMessage : errorMessages) { - assertThat(e.getMessage(), containsString(errorMessage)); - } - } - compareMappingOnNodes(mappingsBeforeUpdateResponse); - - } - - private void compareMappingOnNodes(GetMappingsResponse previousMapping) { - // make sure all nodes have same cluster state - for (Client client : cluster().getClients()) { - GetMappingsResponse currentMapping = client.admin().indices().prepareGetMappings(INDEX).addTypes(TYPE).setLocal(true).get(); - assertThat(previousMapping.getMappings().get(INDEX).get(TYPE).source(), equalTo(currentMapping.getMappings().get(INDEX).get(TYPE).source())); - } - } -} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java index 3f8e8e9efec..d8650331d23 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.index.mapper; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; @@ -30,6 +32,7 @@ import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; +import org.hamcrest.Matchers; import java.io.IOException; import java.util.Collection; @@ -188,4 +191,30 @@ public class UpdateMappingTests extends ESSingleNodeTestCase { () -> mapperService2.merge("type", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), equalTo("mapper [foo] of different type, current_type [long], merged_type [ObjectMapper]")); } + + public void testMappingVersion() { + createIndex("test", client().admin().indices().prepareCreate("test").addMapping("type")); + final ClusterService clusterService = getInstanceFromNode(ClusterService.class); + { + final long previousVersion = clusterService.state().metaData().index("test").getMappingVersion(); + final PutMappingRequest request = new PutMappingRequest(); + request.indices("test"); + request.type("type"); + request.source("field", "type=text"); + client().admin().indices().putMapping(request).actionGet(); + assertThat(clusterService.state().metaData().index("test").getMappingVersion(), Matchers.equalTo(1 + previousVersion)); + } + + { + final long previousVersion = clusterService.state().metaData().index("test").getMappingVersion(); + final PutMappingRequest request = new PutMappingRequest(); + request.indices("test"); + request.type("type"); + request.source("field", "type=text"); + client().admin().indices().putMapping(request).actionGet(); + // the version should be unchanged after putting the same mapping again + assertThat(clusterService.state().metaData().index("test").getMappingVersion(), Matchers.equalTo(previousVersion)); + } + } + } diff --git a/server/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java index 49cb4442beb..cdc65cce927 100644 --- a/server/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query; -import org.apache.lucene.queries.BoostingQuery; +import org.apache.lucene.queries.function.FunctionScoreQuery; import org.apache.lucene.search.Query; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; @@ -45,7 +45,7 @@ public class BoostingQueryBuilderTests extends AbstractQueryTestCase Double.compare(d, 0) < 0, ESTestCase::randomDouble); + values[i] = rand * randomInt(100) + 1.e-5d; } return values; } @@ -478,8 +479,8 @@ public class FunctionScoreTests extends ESTestCase { public void testSimpleWeightedFunction() throws IOException, ExecutionException, InterruptedException { int numFunctions = randomIntBetween(1, 3); - float[] weights = randomFloats(numFunctions); - double[] scores = randomDoubles(numFunctions); + float[] weights = randomPositiveFloats(numFunctions); + double[] scores = randomPositiveDoubles(numFunctions); ScoreFunctionStub[] scoreFunctionStubs = new ScoreFunctionStub[numFunctions]; for (int i = 0; i < numFunctions; i++) { scoreFunctionStubs[i] = new ScoreFunctionStub(scores[i]); @@ -502,7 +503,7 @@ public class FunctionScoreTests extends ESTestCase { score *= weights[i] * scores[i]; } assertThat(scoreWithWeight / (float) score, is(1f)); - float explainedScore = getExplanation(searcher, functionScoreQueryWithWeights).getValue(); + float explainedScore = getExplanation(searcher, functionScoreQueryWithWeights).getValue().floatValue(); assertThat(explainedScore / scoreWithWeight, is(1f)); functionScoreQueryWithWeights = getFiltersFunctionScoreQuery( @@ -518,7 +519,7 @@ public class FunctionScoreTests extends ESTestCase { sum += weights[i] * scores[i]; } assertThat(scoreWithWeight / (float) sum, is(1f)); - explainedScore = getExplanation(searcher, functionScoreQueryWithWeights).getValue(); + explainedScore = getExplanation(searcher, functionScoreQueryWithWeights).getValue().floatValue(); assertThat(explainedScore / scoreWithWeight, is(1f)); functionScoreQueryWithWeights = getFiltersFunctionScoreQuery( @@ -536,7 +537,7 @@ public class FunctionScoreTests extends ESTestCase { sum += weights[i] * scores[i]; } assertThat(scoreWithWeight / (float) (sum / norm), is(1f)); - explainedScore = getExplanation(searcher, functionScoreQueryWithWeights).getValue(); + explainedScore = getExplanation(searcher, functionScoreQueryWithWeights).getValue().floatValue(); assertThat(explainedScore / scoreWithWeight, is(1f)); functionScoreQueryWithWeights = getFiltersFunctionScoreQuery( @@ -552,7 +553,7 @@ public class FunctionScoreTests extends ESTestCase { min = Math.min(min, weights[i] * scores[i]); } assertThat(scoreWithWeight / (float) min, is(1f)); - explainedScore = getExplanation(searcher, functionScoreQueryWithWeights).getValue(); + explainedScore = getExplanation(searcher, functionScoreQueryWithWeights).getValue().floatValue(); assertThat(explainedScore / scoreWithWeight, is(1f)); functionScoreQueryWithWeights = getFiltersFunctionScoreQuery( @@ -568,7 +569,7 @@ public class FunctionScoreTests extends ESTestCase { max = Math.max(max, weights[i] * scores[i]); } assertThat(scoreWithWeight / (float) max, is(1f)); - explainedScore = getExplanation(searcher, functionScoreQueryWithWeights).getValue(); + explainedScore = getExplanation(searcher, functionScoreQueryWithWeights).getValue().floatValue(); assertThat(explainedScore / scoreWithWeight, is(1f)); } @@ -587,7 +588,7 @@ public class FunctionScoreTests extends ESTestCase { FunctionScoreQuery fsq = new FunctionScoreQuery(query,0f, Float.POSITIVE_INFINITY); Explanation fsqExpl = searcher.explain(fsq, 0); assertTrue(fsqExpl.isMatch()); - assertEquals(queryExpl.getValue(), fsqExpl.getValue(), 0f); + assertEquals(queryExpl.getValue(), fsqExpl.getValue()); assertEquals(queryExpl.getDescription(), fsqExpl.getDescription()); fsq = new FunctionScoreQuery(query, 10f, Float.POSITIVE_INFINITY); @@ -598,7 +599,7 @@ public class FunctionScoreTests extends ESTestCase { FunctionScoreQuery ffsq = new FunctionScoreQuery(query, 0f, Float.POSITIVE_INFINITY); Explanation ffsqExpl = searcher.explain(ffsq, 0); assertTrue(ffsqExpl.isMatch()); - assertEquals(queryExpl.getValue(), ffsqExpl.getValue(), 0f); + assertEquals(queryExpl.getValue(), ffsqExpl.getValue()); assertEquals(queryExpl.getDescription(), ffsqExpl.getDescription()); ffsq = new FunctionScoreQuery(query, 10f, Float.POSITIVE_INFINITY); @@ -613,8 +614,8 @@ public class FunctionScoreTests extends ESTestCase { searcher.setQueryCache(null); // otherwise we could get a cached entry that does not have approximations FunctionScoreQuery fsq = new FunctionScoreQuery(query, null, Float.POSITIVE_INFINITY); - for (boolean needsScores : new boolean[] {true, false}) { - Weight weight = searcher.createWeight(fsq, needsScores, 1f); + for (org.apache.lucene.search.ScoreMode scoreMode : org.apache.lucene.search.ScoreMode.values()) { + Weight weight = searcher.createWeight(fsq, scoreMode, 1f); Scorer scorer = weight.scorer(reader.leaves().get(0)); assertNotNull(scorer.twoPhaseIterator()); } diff --git a/server/src/test/java/org/elasticsearch/index/query/plugin/DummyQueryParserPlugin.java b/server/src/test/java/org/elasticsearch/index/query/plugin/DummyQueryParserPlugin.java index 3d0eee79595..02653dcfd0e 100644 --- a/server/src/test/java/org/elasticsearch/index/query/plugin/DummyQueryParserPlugin.java +++ b/server/src/test/java/org/elasticsearch/index/query/plugin/DummyQueryParserPlugin.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.query.plugin; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; @@ -52,8 +53,8 @@ public class DummyQueryParserPlugin extends Plugin implements SearchPlugin { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { - return matchAllDocsQuery.createWeight(searcher, needsScores, boost); + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + return matchAllDocsQuery.createWeight(searcher, scoreMode, boost); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java index a288328391a..71aab8ca9f9 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java @@ -24,17 +24,26 @@ import org.elasticsearch.Version; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.index.reindex.BulkByScrollTask.Status; import java.io.IOException; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.apache.lucene.util.TestUtil.randomSimpleString; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; +import static org.hamcrest.Matchers.containsString; -public class BulkByScrollResponseTests extends ESTestCase { +public class BulkByScrollResponseTests extends AbstractXContentTestCase { + + private boolean includeUpdated; + private boolean includeCreated; public void testRountTrip() throws IOException { BulkByScrollResponse response = new BulkByScrollResponse(timeValueMillis(randomNonNegativeLong()), @@ -94,4 +103,73 @@ public class BulkByScrollResponseTests extends ESTestCase { assertEquals(expectedFailure.getReason().getMessage(), actualFailure.getReason().getMessage()); } } + + public static void assertEqualBulkResponse(BulkByScrollResponse expected, BulkByScrollResponse actual, + boolean includeUpdated, boolean includeCreated) { + assertEquals(expected.getTook(), actual.getTook()); + BulkByScrollTaskStatusTests + .assertEqualStatus(expected.getStatus(), actual.getStatus(), includeUpdated, includeCreated); + assertEquals(expected.getBulkFailures().size(), actual.getBulkFailures().size()); + for (int i = 0; i < expected.getBulkFailures().size(); i++) { + Failure expectedFailure = expected.getBulkFailures().get(i); + Failure actualFailure = actual.getBulkFailures().get(i); + assertEquals(expectedFailure.getIndex(), actualFailure.getIndex()); + assertEquals(expectedFailure.getType(), actualFailure.getType()); + assertEquals(expectedFailure.getId(), actualFailure.getId()); + assertThat(expectedFailure.getMessage(), containsString(actualFailure.getMessage())); + assertEquals(expectedFailure.getStatus(), actualFailure.getStatus()); + } + assertEquals(expected.getSearchFailures().size(), actual.getSearchFailures().size()); + for (int i = 0; i < expected.getSearchFailures().size(); i++) { + ScrollableHitSource.SearchFailure expectedFailure = expected.getSearchFailures().get(i); + ScrollableHitSource.SearchFailure actualFailure = actual.getSearchFailures().get(i); + assertEquals(expectedFailure.getIndex(), actualFailure.getIndex()); + assertEquals(expectedFailure.getShardId(), actualFailure.getShardId()); + assertEquals(expectedFailure.getNodeId(), actualFailure.getNodeId()); + assertThat(expectedFailure.getReason().getMessage(), containsString(actualFailure.getReason().getMessage())); + } + } + + @Override + protected void assertEqualInstances(BulkByScrollResponse expected, BulkByScrollResponse actual) { + assertEqualBulkResponse(expected, actual, includeUpdated, includeCreated); + } + + @Override + protected BulkByScrollResponse createTestInstance() { + // failures are tested separately, so we can test XContent equivalence at least when we have no failures + return + new BulkByScrollResponse( + timeValueMillis(randomNonNegativeLong()), BulkByScrollTaskStatusTests.randomStatusWithoutException(), + emptyList(), emptyList(), randomBoolean() + ); + } + + @Override + protected BulkByScrollResponse doParseInstance(XContentParser parser) throws IOException { + return BulkByScrollResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected ToXContent.Params getToXContentParams() { + Map params = new HashMap<>(); + if (randomBoolean()) { + includeUpdated = false; + params.put(Status.INCLUDE_UPDATED, "false"); + } else { + includeUpdated = true; + } + if (randomBoolean()) { + includeCreated = false; + params.put(Status.INCLUDE_CREATED, "false"); + } else { + includeCreated = true; + } + return new ToXContent.MapParams(params); + } } diff --git a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusOrExceptionTests.java b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusOrExceptionTests.java new file mode 100644 index 00000000000..0d84b0e1412 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusOrExceptionTests.java @@ -0,0 +1,104 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.index.reindex.BulkByScrollTask.StatusOrException; + +import java.io.IOException; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.containsString; + +public class BulkByScrollTaskStatusOrExceptionTests extends AbstractXContentTestCase { + @Override + protected StatusOrException createTestInstance() { + // failures are tested separately, so we can test XContent equivalence at least when we have no failures + return createTestInstanceWithoutExceptions(); + } + + static StatusOrException createTestInstanceWithoutExceptions() { + return new StatusOrException(BulkByScrollTaskStatusTests.randomStatusWithoutException()); + } + + static StatusOrException createTestInstanceWithExceptions() { + if (randomBoolean()) { + return new StatusOrException(new ElasticsearchException("test_exception")); + } else { + return new StatusOrException(BulkByScrollTaskStatusTests.randomStatus()); + } + } + + @Override + protected StatusOrException doParseInstance(XContentParser parser) throws IOException { + return StatusOrException.fromXContent(parser); + } + + public static void assertEqualStatusOrException(StatusOrException expected, StatusOrException actual, + boolean includeUpdated, boolean includeCreated) { + if (expected != null && actual != null) { + assertNotSame(expected, actual); + if (expected.getException() == null) { + BulkByScrollTaskStatusTests + // we test includeCreated params in the Status tests + .assertEqualStatus(expected.getStatus(), actual.getStatus(), includeUpdated, includeCreated); + } else { + assertThat( + actual.getException().getMessage(), + containsString(expected.getException().getMessage()) + ); + } + } else { + // If one of them is null both of them should be null + assertSame(expected, actual); + } + } + + @Override + protected void assertEqualInstances(StatusOrException expected, StatusOrException actual) { + assertEqualStatusOrException(expected, actual, true, true); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + /** + * Test parsing {@link StatusOrException} with inner failures as they don't support asserting on xcontent equivalence, given that + * exceptions are not parsed back as the same original class. We run the usual {@link AbstractXContentTestCase#testFromXContent()} + * without failures, and this other test with failures where we disable asserting on xcontent equivalence at the end. + */ + public void testFromXContentWithFailures() throws IOException { + Supplier instanceSupplier = BulkByScrollTaskStatusOrExceptionTests::createTestInstanceWithExceptions; + //with random fields insertion in the inner exceptions, some random stuff may be parsed back as metadata, + //but that does not bother our assertions, as we only want to test that we don't break. + boolean supportsUnknownFields = true; + //exceptions are not of the same type whenever parsed back + boolean assertToXContentEquivalence = false; + AbstractXContentTestCase.testFromXContent(NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields, Strings.EMPTY_ARRAY, + getRandomFieldsExcludeFilter(), this::createParser, this::doParseInstance, + this::assertEqualInstances, assertToXContentEquivalence, ToXContent.EMPTY_PARAMS); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java index 9e5383a259a..13db9f4766e 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java @@ -23,37 +23,39 @@ import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; import org.hamcrest.Matchers; +import org.elasticsearch.index.reindex.BulkByScrollTask.Status; import java.io.IOException; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; import java.util.stream.IntStream; import static java.lang.Math.abs; -import static java.util.Collections.emptyList; import static java.util.stream.Collectors.toList; import static org.apache.lucene.util.TestUtil.randomSimpleString; -import static org.elasticsearch.common.unit.TimeValue.parseTimeValue; +import static org.hamcrest.Matchers.equalTo; + +public class BulkByScrollTaskStatusTests extends AbstractXContentTestCase { + + private boolean includeUpdated; + private boolean includeCreated; -public class BulkByScrollTaskStatusTests extends ESTestCase { public void testBulkByTaskStatus() throws IOException { BulkByScrollTask.Status status = randomStatus(); BytesStreamOutput out = new BytesStreamOutput(); status.writeTo(out); BulkByScrollTask.Status tripped = new BulkByScrollTask.Status(out.bytes().streamInput()); assertTaskStatusEquals(out.getVersion(), status, tripped); - - // Also check round tripping pre-5.1 which is the first version to support parallelized scroll - out = new BytesStreamOutput(); - out.setVersion(Version.V_5_0_0_rc1); // This can be V_5_0_0 - status.writeTo(out); - StreamInput in = out.bytes().streamInput(); - in.setVersion(Version.V_5_0_0_rc1); - tripped = new BulkByScrollTask.Status(in); - assertTaskStatusEquals(Version.V_5_0_0_rc1, status, tripped); } /** @@ -74,23 +76,19 @@ public class BulkByScrollTaskStatusTests extends ESTestCase { assertEquals(expected.getRequestsPerSecond(), actual.getRequestsPerSecond(), 0f); assertEquals(expected.getReasonCancelled(), actual.getReasonCancelled()); assertEquals(expected.getThrottledUntil(), actual.getThrottledUntil()); - if (version.onOrAfter(Version.V_5_1_1)) { - assertThat(actual.getSliceStatuses(), Matchers.hasSize(expected.getSliceStatuses().size())); - for (int i = 0; i < expected.getSliceStatuses().size(); i++) { - BulkByScrollTask.StatusOrException sliceStatus = expected.getSliceStatuses().get(i); - if (sliceStatus == null) { - assertNull(actual.getSliceStatuses().get(i)); - } else if (sliceStatus.getException() == null) { - assertNull(actual.getSliceStatuses().get(i).getException()); - assertTaskStatusEquals(version, sliceStatus.getStatus(), actual.getSliceStatuses().get(i).getStatus()); - } else { - assertNull(actual.getSliceStatuses().get(i).getStatus()); - // Just check the message because we're not testing exception serialization in general here. - assertEquals(sliceStatus.getException().getMessage(), actual.getSliceStatuses().get(i).getException().getMessage()); - } + assertThat(actual.getSliceStatuses(), Matchers.hasSize(expected.getSliceStatuses().size())); + for (int i = 0; i < expected.getSliceStatuses().size(); i++) { + BulkByScrollTask.StatusOrException sliceStatus = expected.getSliceStatuses().get(i); + if (sliceStatus == null) { + assertNull(actual.getSliceStatuses().get(i)); + } else if (sliceStatus.getException() == null) { + assertNull(actual.getSliceStatuses().get(i).getException()); + assertTaskStatusEquals(version, sliceStatus.getStatus(), actual.getSliceStatuses().get(i).getStatus()); + } else { + assertNull(actual.getSliceStatuses().get(i).getStatus()); + // Just check the message because we're not testing exception serialization in general here. + assertEquals(sliceStatus.getException().getMessage(), actual.getSliceStatuses().get(i).getException().getMessage()); } - } else { - assertEquals(emptyList(), actual.getSliceStatuses()); } } @@ -113,6 +111,22 @@ public class BulkByScrollTaskStatusTests extends ESTestCase { return new BulkByScrollTask.Status(statuses, randomBoolean() ? "test" : null); } + public static BulkByScrollTask.Status randomStatusWithoutException() { + if (randomBoolean()) { + return randomWorkingStatus(null); + } + boolean canHaveNullStatues = randomBoolean(); + List statuses = IntStream.range(0, between(0, 10)) + .mapToObj(i -> { + if (canHaveNullStatues && LuceneTestCase.rarely()) { + return null; + } + return new BulkByScrollTask.StatusOrException(randomWorkingStatus(i)); + }) + .collect(toList()); + return new BulkByScrollTask.Status(statuses, randomBoolean() ? "test" : null); + } + private static BulkByScrollTask.Status randomWorkingStatus(Integer sliceId) { // These all should be believably small because we sum them if we have multiple workers int total = between(0, 10000000); @@ -124,8 +138,83 @@ public class BulkByScrollTaskStatusTests extends ESTestCase { long versionConflicts = between(0, total); long bulkRetries = between(0, 10000000); long searchRetries = between(0, 100000); - return new BulkByScrollTask.Status(sliceId, total, updated, created, deleted, batches, versionConflicts, noops, bulkRetries, - searchRetries, parseTimeValue(randomPositiveTimeValue(), "test"), abs(Randomness.get().nextFloat()), - randomBoolean() ? null : randomSimpleString(Randomness.get()), parseTimeValue(randomPositiveTimeValue(), "test")); + // smallest unit of time during toXContent is Milliseconds + TimeUnit[] timeUnits = {TimeUnit.MILLISECONDS, TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS, TimeUnit.DAYS}; + TimeValue throttled = new TimeValue(randomIntBetween(0, 1000), randomFrom(timeUnits)); + TimeValue throttledUntil = new TimeValue(randomIntBetween(0, 1000), randomFrom(timeUnits)); + return + new BulkByScrollTask.Status( + sliceId, total, updated, created, deleted, batches, versionConflicts, noops, + bulkRetries, searchRetries, throttled, abs(Randomness.get().nextFloat()), + randomBoolean() ? null : randomSimpleString(Randomness.get()), throttledUntil + ); + } + + public static void assertEqualStatus(BulkByScrollTask.Status expected, BulkByScrollTask.Status actual, + boolean includeUpdated, boolean includeCreated) { + assertNotSame(expected, actual); + assertTrue(expected.equalsWithoutSliceStatus(actual, includeUpdated, includeCreated)); + assertThat(expected.getSliceStatuses().size(), equalTo(actual.getSliceStatuses().size())); + for (int i = 0; i< expected.getSliceStatuses().size(); i++) { + BulkByScrollTaskStatusOrExceptionTests.assertEqualStatusOrException( + expected.getSliceStatuses().get(i), actual.getSliceStatuses().get(i), includeUpdated, includeCreated + ); + } + } + + @Override + protected void assertEqualInstances(BulkByScrollTask.Status first, BulkByScrollTask.Status second) { + assertEqualStatus(first, second, includeUpdated, includeCreated); + } + + @Override + protected BulkByScrollTask.Status createTestInstance() { + // failures are tested separately, so we can test xcontent equivalence at least when we have no failures + return randomStatusWithoutException(); + } + + @Override + protected BulkByScrollTask.Status doParseInstance(XContentParser parser) throws IOException { + return BulkByScrollTask.Status.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + /** + * Test parsing {@link Status} with inner failures as they don't support asserting on xcontent equivalence, given that + * exceptions are not parsed back as the same original class. We run the usual {@link AbstractXContentTestCase#testFromXContent()} + * without failures, and this other test with failures where we disable asserting on xcontent equivalence at the end. + */ + public void testFromXContentWithFailures() throws IOException { + Supplier instanceSupplier = BulkByScrollTaskStatusTests::randomStatus; + //with random fields insertion in the inner exceptions, some random stuff may be parsed back as metadata, + //but that does not bother our assertions, as we only want to test that we don't break. + boolean supportsUnknownFields = true; + //exceptions are not of the same type whenever parsed back + boolean assertToXContentEquivalence = false; + AbstractXContentTestCase.testFromXContent(NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields, Strings.EMPTY_ARRAY, + getRandomFieldsExcludeFilter(), this::createParser, this::doParseInstance, + this::assertEqualInstances, assertToXContentEquivalence, ToXContent.EMPTY_PARAMS); + } + + @Override + protected ToXContent.Params getToXContentParams() { + Map params = new HashMap<>(); + if (randomBoolean()) { + includeUpdated = false; + params.put(Status.INCLUDE_UPDATED, "false"); + } else { + includeUpdated = true; + } + if (randomBoolean()) { + includeCreated = false; + params.put(Status.INCLUDE_CREATED, "false"); + } else { + includeCreated = true; + } + return new ToXContent.MapParams(params); } } diff --git a/server/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java b/server/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java index 6c1988a1440..1c3d539263e 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java @@ -20,8 +20,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.search.slice.SliceBuilder; @@ -89,9 +87,9 @@ public class ReindexRequestTests extends AbstractBulkByScrollRequestTestCase InternalEngineTests.createInternalEngine((dir, iwc) -> + new IndexWriter(dir, iwc) { + final AtomicBoolean throwAfterIndexedOneDoc = new AtomicBoolean(); // need one document to trigger delete in IW. + @Override + public long addDocument(Iterable doc) throws IOException { + boolean isTombstone = false; + for (IndexableField field : doc) { + if (SeqNoFieldMapper.TOMBSTONE_NAME.equals(field.name())) { + isTombstone = true; + } + } + if (isTombstone == false && throwAfterIndexedOneDoc.getAndSet(true)) { + throw indexException; + } else { + return super.addDocument(doc); + } + } + @Override + public long deleteDocuments(Term... terms) throws IOException { + throw deleteException; + } + @Override + public long softUpdateDocument(Term term, Iterable doc, Field...fields) throws IOException { + throw deleteException; // a delete uses softUpdateDocument API if soft-deletes enabled + } + }, null, null, config); try (ReplicationGroup shards = new ReplicationGroup(buildIndexMetaData(0)) { @Override - protected EngineFactory getEngineFactory(ShardRouting routing) { - return throwingDocumentFailureEngineFactory; - }}) { + protected EngineFactory getEngineFactory(ShardRouting routing) { return engineFactory; }}) { - // test only primary + // start with the primary only so two first failures are replicated to replicas via recovery from the translog of the primary. shards.startPrimary(); - BulkItemResponse response = shards.index( - new IndexRequest(index.getName(), "type", "1") - .source("{}", XContentType.JSON) - ); - assertTrue(response.isFailed()); - assertNoOpTranslogOperationForDocumentFailure(shards, 1, shards.getPrimary().getPendingPrimaryTerm(), failureMessage); - shards.assertAllEqual(0); + long primaryTerm = shards.getPrimary().getPendingPrimaryTerm(); + List expectedTranslogOps = new ArrayList<>(); + BulkItemResponse indexResp = shards.index(new IndexRequest(index.getName(), "type", "1").source("{}", XContentType.JSON)); + assertThat(indexResp.isFailed(), equalTo(false)); + expectedTranslogOps.add(new Translog.Index("type", "1", 0, primaryTerm, 1, "{}".getBytes(StandardCharsets.UTF_8), null, -1)); + try (Translog.Snapshot snapshot = getTranslog(shards.getPrimary()).newSnapshot()) { + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); + } + + indexResp = shards.index(new IndexRequest(index.getName(), "type", "any").source("{}", XContentType.JSON)); + assertThat(indexResp.getFailure().getCause(), equalTo(indexException)); + expectedTranslogOps.add(new Translog.NoOp(1, primaryTerm, indexException.toString())); + + BulkItemResponse deleteResp = shards.delete(new DeleteRequest(index.getName(), "type", "1")); + assertThat(deleteResp.getFailure().getCause(), equalTo(deleteException)); + expectedTranslogOps.add(new Translog.NoOp(2, primaryTerm, deleteException.toString())); + shards.assertAllEqual(1); - // add some replicas int nReplica = randomIntBetween(1, 3); for (int i = 0; i < nReplica; i++) { shards.addReplica(); } shards.startReplicas(nReplica); - response = shards.index( - new IndexRequest(index.getName(), "type", "1") - .source("{}", XContentType.JSON) - ); - assertTrue(response.isFailed()); - assertNoOpTranslogOperationForDocumentFailure(shards, 2, shards.getPrimary().getPendingPrimaryTerm(), failureMessage); - shards.assertAllEqual(0); + for (IndexShard shard : shards) { + try (Translog.Snapshot snapshot = getTranslog(shard).newSnapshot()) { + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); + } + try (Translog.Snapshot snapshot = shard.getHistoryOperations("test", 0)) { + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); + } + } + // unlike previous failures, these two failures replicated directly from the replication channel. + indexResp = shards.index(new IndexRequest(index.getName(), "type", "any").source("{}", XContentType.JSON)); + assertThat(indexResp.getFailure().getCause(), equalTo(indexException)); + expectedTranslogOps.add(new Translog.NoOp(3, primaryTerm, indexException.toString())); + + deleteResp = shards.delete(new DeleteRequest(index.getName(), "type", "1")); + assertThat(deleteResp.getFailure().getCause(), equalTo(deleteException)); + expectedTranslogOps.add(new Translog.NoOp(4, primaryTerm, deleteException.toString())); + + for (IndexShard shard : shards) { + try (Translog.Snapshot snapshot = getTranslog(shard).newSnapshot()) { + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); + } + try (Translog.Snapshot snapshot = shard.getHistoryOperations("test", 0)) { + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); + } + } + shards.assertAllEqual(1); } } @@ -465,8 +521,9 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase recoverReplica(replica3, replica2, true); try (Translog.Snapshot snapshot = getTranslog(replica3).newSnapshot()) { assertThat(snapshot.totalOperations(), equalTo(initDocs + 1)); - assertThat(snapshot.next(), equalTo(op2)); - assertThat("Remaining of snapshot should contain init operations", snapshot, containsOperationsInAnyOrder(initOperations)); + final List expectedOps = new ArrayList<>(initOperations); + expectedOps.add(op2); + assertThat(snapshot, containsOperationsInAnyOrder(expectedOps)); assertThat("Peer-recovery should not send overridden operations", snapshot.skippedOperations(), equalTo(0)); } // TODO: We should assert the content of shards in the ReplicationGroup. @@ -541,47 +598,4 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase shards.assertAllEqual(0); } } - - /** Throws documentFailure on every indexing operation */ - static class ThrowingDocumentFailureEngineFactory implements EngineFactory { - final String documentFailureMessage; - - ThrowingDocumentFailureEngineFactory(String documentFailureMessage) { - this.documentFailureMessage = documentFailureMessage; - } - - @Override - public Engine newReadWriteEngine(EngineConfig config) { - return InternalEngineTests.createInternalEngine((directory, writerConfig) -> - new IndexWriter(directory, writerConfig) { - @Override - public long addDocument(Iterable doc) throws IOException { - assert documentFailureMessage != null; - throw new IOException(documentFailureMessage); - } - }, null, null, config); - } - } - - private static void assertNoOpTranslogOperationForDocumentFailure( - Iterable replicationGroup, - int expectedOperation, - long expectedPrimaryTerm, - String failureMessage) throws IOException { - for (IndexShard indexShard : replicationGroup) { - try(Translog.Snapshot snapshot = getTranslog(indexShard).newSnapshot()) { - assertThat(snapshot.totalOperations(), equalTo(expectedOperation)); - long expectedSeqNo = 0L; - Translog.Operation op = snapshot.next(); - do { - assertThat(op.opType(), equalTo(Translog.Operation.Type.NO_OP)); - assertThat(op.seqNo(), equalTo(expectedSeqNo)); - assertThat(op.primaryTerm(), equalTo(expectedPrimaryTerm)); - assertThat(((Translog.NoOp) op).reason(), containsString(failureMessage)); - op = snapshot.next(); - expectedSeqNo++; - } while (op != null); - } - } - } } diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 2d198c32ba7..28122665e9b 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -98,7 +98,8 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC } public void testRecoveryOfDisconnectedReplica() throws Exception { - try (ReplicationGroup shards = createGroup(1)) { + Settings settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false).build(); + try (ReplicationGroup shards = createGroup(1, settings)) { shards.startAll(); int docs = shards.indexDocs(randomInt(50)); shards.flush(); @@ -266,6 +267,7 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC builder.settings(Settings.builder().put(newPrimary.indexSettings().getSettings()) .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), "-1") .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), "-1") + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0) ); newPrimary.indexSettings().updateIndexMetaData(builder.build()); newPrimary.onSettingsChanged(); @@ -275,7 +277,12 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC shards.syncGlobalCheckpoint(); assertThat(newPrimary.getLastSyncedGlobalCheckpoint(), equalTo(newPrimary.seqNoStats().getMaxSeqNo())); }); - newPrimary.flush(new FlushRequest()); + newPrimary.flush(new FlushRequest().force(true)); + if (replica.indexSettings().isSoftDeleteEnabled()) { + // We need an extra flush to advance the min_retained_seqno on the new primary so ops-based won't happen. + // The min_retained_seqno only advances when a merge asks for the retention query. + newPrimary.flush(new FlushRequest().force(true)); + } uncommittedOpsOnPrimary = shards.indexDocs(randomIntBetween(0, 10)); totalDocs += uncommittedOpsOnPrimary; } diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java b/server/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java index d4dc71388ac..f64a9e38b87 100644 --- a/server/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java @@ -226,7 +226,7 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); TopFieldDocs topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(7L)); + assertThat(topDocs.totalHits.value, equalTo(7L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(11)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(7)); @@ -241,7 +241,7 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD sort = new Sort(new SortField("field2", nestedComparatorSource, true)); topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(7L)); + assertThat(topDocs.totalHits.value, equalTo(7L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(28)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(13)); @@ -263,7 +263,7 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD ); sort = new Sort(new SortField("field2", nestedComparatorSource, true)); topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(6L)); + assertThat(topDocs.totalHits.value, equalTo(6L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(23)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(12)); @@ -278,7 +278,7 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD sort = new Sort(new SortField("field2", nestedComparatorSource)); topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(6L)); + assertThat(topDocs.totalHits.value, equalTo(6L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(15)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(3)); @@ -294,7 +294,7 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD nestedComparatorSource = createFieldComparator("field2", sortMode, 127, createNested(searcher, parentFilter, childFilter)); sort = new Sort(new SortField("field2", nestedComparatorSource, true)); topDocs = searcher.search(new TermQuery(new Term("__type", "parent")), 5, sort); - assertThat(topDocs.totalHits, equalTo(8L)); + assertThat(topDocs.totalHits.value, equalTo(8L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(19)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(127)); @@ -310,7 +310,7 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter)); sort = new Sort(new SortField("field2", nestedComparatorSource)); topDocs = searcher.search(new TermQuery(new Term("__type", "parent")), 5, sort); - assertThat(topDocs.totalHits, equalTo(8L)); + assertThat(topDocs.totalHits.value, equalTo(8L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(19)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(-127)); @@ -336,7 +336,7 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); TopDocs topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(7L)); + assertThat(topDocs.totalHits.value, equalTo(7L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(11)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(2)); diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java b/server/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java index c643ea6cee0..93945231e2b 100644 --- a/server/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java @@ -69,7 +69,7 @@ public class DoubleNestedSortingTests extends AbstractNumberNestedSortingTestCas Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); TopDocs topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(7L)); + assertThat(topDocs.totalHits.value, equalTo(7L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(11)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(2)); diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java b/server/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java index 13d0e83e37e..2d1ffb1e1a3 100644 --- a/server/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java @@ -68,7 +68,7 @@ public class FloatNestedSortingTests extends DoubleNestedSortingTests { Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); TopDocs topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(7L)); + assertThat(topDocs.totalHits.value, equalTo(7L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(11)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(2)); diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java b/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java index 1300debd5eb..0bee6eeb6ed 100644 --- a/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java @@ -304,7 +304,7 @@ public class NestedSortingTests extends AbstractFieldDataTestCase { Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); TopFieldDocs topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(7L)); + assertThat(topDocs.totalHits.value, equalTo(7L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(3)); assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("a")); @@ -321,7 +321,7 @@ public class NestedSortingTests extends AbstractFieldDataTestCase { nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter)); sort = new Sort(new SortField("field2", nestedComparatorSource, true)); topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(7L)); + assertThat(topDocs.totalHits.value, equalTo(7L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(28)); assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("o")); @@ -347,7 +347,7 @@ public class NestedSortingTests extends AbstractFieldDataTestCase { ); sort = new Sort(new SortField("field2", nestedComparatorSource, true)); topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(6L)); + assertThat(topDocs.totalHits.value, equalTo(6L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(23)); assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("m")); @@ -614,7 +614,7 @@ public class NestedSortingTests extends AbstractFieldDataTestCase { sortBuilder.setNestedSort(new NestedSortBuilder("chapters").setNestedSort(new NestedSortBuilder("chapters.paragraphs"))); QueryBuilder queryBuilder = new MatchAllQueryBuilder(); TopFieldDocs topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(5L)); + assertThat(topFields.totalHits.value, equalTo(5L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("2")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(76L)); assertThat(searcher.doc(topFields.scoreDocs[1].doc).get("_id"), equalTo("4")); @@ -630,25 +630,25 @@ public class NestedSortingTests extends AbstractFieldDataTestCase { { queryBuilder = new TermQueryBuilder("genre", "romance"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("2")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(76L)); queryBuilder = new TermQueryBuilder("genre", "science fiction"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("1")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(234L)); queryBuilder = new TermQueryBuilder("genre", "horror"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("3")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(976L)); queryBuilder = new TermQueryBuilder("genre", "cooking"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(87L)); } @@ -658,7 +658,7 @@ public class NestedSortingTests extends AbstractFieldDataTestCase { sortBuilder.order(SortOrder.DESC); queryBuilder = new MatchAllQueryBuilder(); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(5L)); + assertThat(topFields.totalHits.value, equalTo(5L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("3")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(976L)); assertThat(searcher.doc(topFields.scoreDocs[1].doc).get("_id"), equalTo("1")); @@ -675,25 +675,25 @@ public class NestedSortingTests extends AbstractFieldDataTestCase { { queryBuilder = new TermQueryBuilder("genre", "romance"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("2")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(76L)); queryBuilder = new TermQueryBuilder("genre", "science fiction"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("1")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(849L)); queryBuilder = new TermQueryBuilder("genre", "horror"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("3")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(976L)); queryBuilder = new TermQueryBuilder("genre", "cooking"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(180L)); } @@ -708,7 +708,7 @@ public class NestedSortingTests extends AbstractFieldDataTestCase { .setNestedSort(new NestedSortBuilder("chapters.paragraphs")) ); topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None), sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(2L)); + assertThat(topFields.totalHits.value, equalTo(2L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("2")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(76L)); assertThat(searcher.doc(topFields.scoreDocs[1].doc).get("_id"), equalTo("4")); @@ -716,7 +716,7 @@ public class NestedSortingTests extends AbstractFieldDataTestCase { sortBuilder.order(SortOrder.DESC); topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None), sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(2L)); + assertThat(topFields.totalHits.value, equalTo(2L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(87L)); assertThat(searcher.doc(topFields.scoreDocs[1].doc).get("_id"), equalTo("2")); @@ -736,7 +736,7 @@ public class NestedSortingTests extends AbstractFieldDataTestCase { ) ); topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None), sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(2L)); + assertThat(topFields.totalHits.value, equalTo(2L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(87L)); assertThat(searcher.doc(topFields.scoreDocs[1].doc).get("_id"), equalTo("2")); @@ -744,7 +744,7 @@ public class NestedSortingTests extends AbstractFieldDataTestCase { sortBuilder.order(SortOrder.DESC); topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None), sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(2L)); + assertThat(topFields.totalHits.value, equalTo(2L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(87L)); assertThat(searcher.doc(topFields.scoreDocs[1].doc).get("_id"), equalTo("2")); @@ -762,25 +762,25 @@ public class NestedSortingTests extends AbstractFieldDataTestCase { queryBuilder = new TermQueryBuilder("genre", "romance"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("2")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(76L)); queryBuilder = new TermQueryBuilder("genre", "science fiction"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("1")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(Long.MAX_VALUE)); queryBuilder = new TermQueryBuilder("genre", "horror"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("3")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(Long.MAX_VALUE)); queryBuilder = new TermQueryBuilder("genre", "cooking"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(87L)); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexSearcherWrapperTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexSearcherWrapperTests.java index 4479c7b3909..e9f52d7c319 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexSearcherWrapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexSearcherWrapperTests.java @@ -56,7 +56,7 @@ public class IndexSearcherWrapperTests extends ESTestCase { writer.addDocument(doc); DirectoryReader open = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "_na_", 1)); IndexSearcher searcher = new IndexSearcher(open); - assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits); + assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits.value); final AtomicInteger closeCalls = new AtomicInteger(0); IndexSearcherWrapper wrapper = new IndexSearcherWrapper() { @Override @@ -82,7 +82,7 @@ public class IndexSearcherWrapperTests extends ESTestCase { } outerCount.incrementAndGet(); }); - assertEquals(0, wrap.searcher().search(new TermQuery(new Term("field", "doc")), 1).totalHits); + assertEquals(0, wrap.searcher().search(new TermQuery(new Term("field", "doc")), 1).totalHits.value); wrap.close(); assertFalse("wrapped reader is closed", wrap.reader().tryIncRef()); assertEquals(sourceRefCount, open.getRefCount()); @@ -106,7 +106,7 @@ public class IndexSearcherWrapperTests extends ESTestCase { writer.addDocument(doc); DirectoryReader open = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "_na_", 1)); IndexSearcher searcher = new IndexSearcher(open); - assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits); + assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits.value); searcher.setSimilarity(iwc.getSimilarity()); final AtomicInteger closeCalls = new AtomicInteger(0); IndexSearcherWrapper wrapper = new IndexSearcherWrapper() { @@ -148,7 +148,7 @@ public class IndexSearcherWrapperTests extends ESTestCase { writer.addDocument(doc); DirectoryReader open = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "_na_", 1)); IndexSearcher searcher = new IndexSearcher(open); - assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits); + assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits.value); searcher.setSimilarity(iwc.getSimilarity()); IndexSearcherWrapper wrapper = new IndexSearcherWrapper(); try (Engine.Searcher engineSearcher = new Engine.Searcher("foo", searcher)) { diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index 182747e7dda..87edfcfccb1 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -637,7 +637,7 @@ public class IndexShardIT extends ESSingleNodeTestCase { existingShardRouting.currentNodeId(), null, existingShardRouting.primary(), ShardRoutingState.INITIALIZING, existingShardRouting.allocationId()); shardRouting = shardRouting.updateUnassigned(new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "fake recovery"), - RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE); + RecoverySource.ExistingStoreRecoverySource.INSTANCE); return shardRouting; } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 2228e1b017f..7f37846d3f0 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -22,6 +22,8 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; @@ -30,6 +32,7 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; +import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Constants; import org.elasticsearch.Assertions; import org.elasticsearch.Version; @@ -89,8 +92,13 @@ import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; @@ -111,6 +119,7 @@ import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotShardFailure; +import org.elasticsearch.test.CorruptionUtils; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.FieldMaskingReader; import org.elasticsearch.test.VersionUtils; @@ -119,7 +128,11 @@ import org.elasticsearch.ElasticsearchException; import java.io.IOException; import java.nio.charset.Charset; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -160,10 +173,12 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -237,7 +252,8 @@ public class IndexShardTests extends IndexShardTestCase { assertNotNull(shardPath); // fail shard shard.failShard("test shard fail", new CorruptIndexException("", "")); - closeShards(shard); + shard.close("do not assert history", false); + shard.store().close(); // check state file still exists ShardStateMetaData shardStateMetaData = load(logger, shardPath.getShardStatePath()); assertEquals(shardStateMetaData, getShardStateMetadata(shard)); @@ -828,7 +844,7 @@ public class IndexShardTests extends IndexShardTestCase { randomAlphaOfLength(8), true, ShardRoutingState.INITIALIZING, - RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE); + RecoverySource.EmptyStoreRecoverySource.INSTANCE); final Settings settings = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2) @@ -1184,7 +1200,8 @@ public class IndexShardTests extends IndexShardTestCase { public void testShardStatsWithFailures() throws IOException { allowShardFailures(); final ShardId shardId = new ShardId("index", "_na_", 0); - final ShardRouting shardRouting = newShardRouting(shardId, "node", true, RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE, ShardRoutingState.INITIALIZING); + final ShardRouting shardRouting = + newShardRouting(shardId, "node", true, ShardRoutingState.INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE); final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir()); @@ -1230,7 +1247,7 @@ public class IndexShardTests extends IndexShardTestCase { }; try (Store store = createStore(shardId, new IndexSettings(metaData, Settings.EMPTY), directory)) { - IndexShard shard = newShard(shardRouting, shardPath, metaData, store, + IndexShard shard = newShard(shardRouting, shardPath, metaData, i -> store, null, new InternalEngineFactory(), () -> { }, EMPTY_EVENT_LISTENER); AtomicBoolean failureCallbackTriggered = new AtomicBoolean(false); @@ -1644,7 +1661,7 @@ public class IndexShardTests extends IndexShardTestCase { final ShardRouting replicaRouting = shard.routingEntry(); IndexShard newShard = reinitShard(shard, newShardRouting(replicaRouting.shardId(), replicaRouting.currentNodeId(), true, ShardRoutingState.INITIALIZING, - RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE)); + RecoverySource.ExistingStoreRecoverySource.INSTANCE)); DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); assertTrue(newShard.recoverFromStore()); @@ -1669,6 +1686,7 @@ public class IndexShardTests extends IndexShardTestCase { flushShard(shard); translogOps = 0; } + String historyUUID = shard.getHistoryUUID(); IndexShard newShard = reinitShard(shard); DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); @@ -1683,6 +1701,29 @@ public class IndexShardTests extends IndexShardTestCase { assertThat(newShard.getReplicationTracker().getTrackedLocalCheckpointForShard(newShard.routingEntry().allocationId().getId()) .getLocalCheckpoint(), equalTo(totalOps - 1L)); assertDocCount(newShard, totalOps); + assertThat(newShard.getHistoryUUID(), equalTo(historyUUID)); + closeShards(newShard); + } + + public void testRecoverFromStalePrimaryForceNewHistoryUUID() throws IOException { + final IndexShard shard = newStartedShard(true); + int totalOps = randomInt(10); + for (int i = 0; i < totalOps; i++) { + indexDoc(shard, "_doc", Integer.toString(i)); + } + if (randomBoolean()) { + shard.updateLocalCheckpointForShard(shard.shardRouting.allocationId().getId(), totalOps - 1); + flushShard(shard); + } + String historyUUID = shard.getHistoryUUID(); + IndexShard newShard = reinitShard(shard, newShardRouting(shard.shardId(), shard.shardRouting.currentNodeId(), true, + ShardRoutingState.INITIALIZING, RecoverySource.ExistingStoreRecoverySource.FORCE_STALE_PRIMARY_INSTANCE)); + DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); + newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); + assertTrue(newShard.recoverFromStore()); + IndexShardTestCase.updateRoutingEntry(newShard, newShard.routingEntry().moveToStarted()); + assertDocCount(newShard, totalOps); + assertThat(newShard.getHistoryUUID(), not(equalTo(historyUUID))); closeShards(newShard); } @@ -1719,7 +1760,7 @@ public class IndexShardTests extends IndexShardTestCase { final ShardRouting primaryShardRouting = shard.routingEntry(); IndexShard newShard = reinitShard(otherShard, ShardRoutingHelper.initWithSameId(primaryShardRouting, - RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE)); + RecoverySource.ExistingStoreRecoverySource.INSTANCE)); DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); assertTrue(newShard.recoverFromStore()); @@ -1745,7 +1786,7 @@ public class IndexShardTests extends IndexShardTestCase { for (int i = 0; i < 2; i++) { newShard = reinitShard(newShard, ShardRoutingHelper.initWithSameId(primaryShardRouting, - RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE)); + RecoverySource.ExistingStoreRecoverySource.INSTANCE)); newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); assertTrue(newShard.recoverFromStore()); try (Translog.Snapshot snapshot = getTranslog(newShard).newSnapshot()) { @@ -1763,7 +1804,7 @@ public class IndexShardTests extends IndexShardTestCase { } final ShardRouting shardRouting = shard.routingEntry(); IndexShard newShard = reinitShard(shard, - ShardRoutingHelper.initWithSameId(shardRouting, RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE) + ShardRoutingHelper.initWithSameId(shardRouting, RecoverySource.EmptyStoreRecoverySource.INSTANCE) ); DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); @@ -1812,7 +1853,7 @@ public class IndexShardTests extends IndexShardTestCase { } newShard = reinitShard(newShard, - ShardRoutingHelper.initWithSameId(routing, RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE)); + ShardRoutingHelper.initWithSameId(routing, RecoverySource.EmptyStoreRecoverySource.INSTANCE)); newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); assertTrue("recover even if there is nothing to recover", newShard.recoverFromStore()); @@ -1850,7 +1891,7 @@ public class IndexShardTests extends IndexShardTestCase { final ShardRouting replicaRouting = shard.routingEntry(); IndexShard newShard = reinitShard(shard, newShardRouting(replicaRouting.shardId(), replicaRouting.currentNodeId(), true, ShardRoutingState.INITIALIZING, - RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE)); + RecoverySource.ExistingStoreRecoverySource.INSTANCE)); newShard.pendingPrimaryTerm++; newShard.operationPrimaryTerm++; DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); @@ -1890,7 +1931,7 @@ public class IndexShardTests extends IndexShardTestCase { assertDocs(target, "1"); flushShard(source); // only flush source ShardRouting routing = ShardRoutingHelper.initWithSameId(target.routingEntry(), - RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE); + RecoverySource.ExistingStoreRecoverySource.INSTANCE); final Snapshot snapshot = new Snapshot("foo", new SnapshotId("bar", UUIDs.randomBase64UUID())); routing = ShardRoutingHelper.newWithRestoreSource(routing, new RecoverySource.SnapshotRecoverySource(snapshot, Version.CURRENT, "test")); @@ -1942,9 +1983,9 @@ public class IndexShardTests extends IndexShardTestCase { } try (Engine.Searcher searcher = shard.acquireSearcher("test")) { TopDocs search = searcher.searcher().search(new TermQuery(new Term("foo", "bar")), 10); - assertEquals(search.totalHits, 1); + assertEquals(search.totalHits.value, 1); search = searcher.searcher().search(new TermQuery(new Term("foobar", "bar")), 10); - assertEquals(search.totalHits, 1); + assertEquals(search.totalHits.value, 1); } IndexSearcherWrapper wrapper = new IndexSearcherWrapper() { @Override @@ -1959,7 +2000,7 @@ public class IndexShardTests extends IndexShardTestCase { }; closeShards(shard); IndexShard newShard = newShard( - ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE), + ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.ExistingStoreRecoverySource.INSTANCE), shard.shardPath(), shard.indexSettings().getIndexMetaData(), null, @@ -1972,9 +2013,9 @@ public class IndexShardTests extends IndexShardTestCase { try (Engine.Searcher searcher = newShard.acquireSearcher("test")) { TopDocs search = searcher.searcher().search(new TermQuery(new Term("foo", "bar")), 10); - assertEquals(search.totalHits, 0); + assertEquals(search.totalHits.value, 0); search = searcher.searcher().search(new TermQuery(new Term("foobar", "bar")), 10); - assertEquals(search.totalHits, 1); + assertEquals(search.totalHits.value, 1); } try (Engine.GetResult getResult = newShard .get(new Engine.Get(false, false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1"))))) { @@ -2112,7 +2153,7 @@ public class IndexShardTests extends IndexShardTestCase { closeShards(shard); IndexShard newShard = newShard( - ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE), + ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.ExistingStoreRecoverySource.INSTANCE), shard.shardPath(), shard.indexSettings().getIndexMetaData(), null, @@ -2394,7 +2435,8 @@ public class IndexShardTests extends IndexShardTestCase { public void testDocStats() throws IOException, InterruptedException { IndexShard indexShard = null; try { - indexShard = newStartedShard(); + indexShard = newStartedShard( + Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0).build()); final long numDocs = randomIntBetween(2, 32); // at least two documents so we have docs to delete final long numDocsToDelete = randomLongBetween(1, numDocs); for (int i = 0; i < numDocs; i++) { @@ -2424,7 +2466,16 @@ public class IndexShardTests extends IndexShardTestCase { deleteDoc(indexShard, "_doc", id); indexDoc(indexShard, "_doc", id); } - + // Need to update and sync the global checkpoint as the soft-deletes retention MergePolicy depends on it. + if (indexShard.indexSettings.isSoftDeleteEnabled()) { + if (indexShard.routingEntry().primary()) { + indexShard.updateGlobalCheckpointForShard(indexShard.routingEntry().allocationId().getId(), + indexShard.getLocalCheckpoint()); + } else { + indexShard.updateGlobalCheckpointOnReplica(indexShard.getLocalCheckpoint(), "test"); + } + indexShard.sync(); + } // flush the buffered deletes final FlushRequest flushRequest = new FlushRequest(); flushRequest.force(false); @@ -2571,6 +2622,143 @@ public class IndexShardTests extends IndexShardTestCase { closeShards(newShard); } + public void testIndexCheckOnStartup() throws Exception { + final IndexShard indexShard = newStartedShard(true); + + final long numDocs = between(10, 100); + for (long i = 0; i < numDocs; i++) { + indexDoc(indexShard, "_doc", Long.toString(i), "{}"); + } + indexShard.flush(new FlushRequest()); + closeShards(indexShard); + + final ShardPath shardPath = indexShard.shardPath(); + + final Path indexPath = corruptIndexFile(shardPath); + + final AtomicInteger corruptedMarkerCount = new AtomicInteger(); + final SimpleFileVisitor corruptedVisitor = new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + if (Files.isRegularFile(file) && file.getFileName().toString().startsWith(Store.CORRUPTED)) { + corruptedMarkerCount.incrementAndGet(); + } + return FileVisitResult.CONTINUE; + } + }; + Files.walkFileTree(indexPath, corruptedVisitor); + + assertThat("corruption marker should not be there", corruptedMarkerCount.get(), equalTo(0)); + + final ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(indexShard.routingEntry(), + RecoverySource.ExistingStoreRecoverySource.INSTANCE + ); + // start shard and perform index check on startup. It enforce shard to fail due to corrupted index files + final IndexMetaData indexMetaData = IndexMetaData.builder(indexShard.indexSettings().getIndexMetaData()) + .settings(Settings.builder() + .put(indexShard.indexSettings.getSettings()) + .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("true", "checksum"))) + .build(); + + IndexShard corruptedShard = newShard(shardRouting, shardPath, indexMetaData, + null, null, indexShard.engineFactory, + indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); + + final IndexShardRecoveryException indexShardRecoveryException = + expectThrows(IndexShardRecoveryException.class, () -> newStartedShard(p -> corruptedShard, true)); + assertThat(indexShardRecoveryException.getMessage(), equalTo("failed recovery")); + + // check that corrupt marker is there + Files.walkFileTree(indexPath, corruptedVisitor); + assertThat("store has to be marked as corrupted", corruptedMarkerCount.get(), equalTo(1)); + + try { + closeShards(corruptedShard); + } catch (RuntimeException e) { + // Ignored because corrupted shard can throw various exceptions on close + } + } + + public void testShardDoesNotStartIfCorruptedMarkerIsPresent() throws Exception { + final IndexShard indexShard = newStartedShard(true); + + final long numDocs = between(10, 100); + for (long i = 0; i < numDocs; i++) { + indexDoc(indexShard, "_doc", Long.toString(i), "{}"); + } + indexShard.flush(new FlushRequest()); + closeShards(indexShard); + + final ShardPath shardPath = indexShard.shardPath(); + + final ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(indexShard.routingEntry(), + RecoverySource.ExistingStoreRecoverySource.INSTANCE + ); + final IndexMetaData indexMetaData = indexShard.indexSettings().getIndexMetaData(); + + final Path indexPath = shardPath.getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); + + // create corrupted marker + final String corruptionMessage = "fake ioexception"; + try(Store store = createStore(indexShard.indexSettings(), shardPath)) { + store.markStoreCorrupted(new IOException(corruptionMessage)); + } + + // try to start shard on corrupted files + final IndexShard corruptedShard = newShard(shardRouting, shardPath, indexMetaData, + null, null, indexShard.engineFactory, + indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); + + final IndexShardRecoveryException exception1 = expectThrows(IndexShardRecoveryException.class, + () -> newStartedShard(p -> corruptedShard, true)); + assertThat(exception1.getCause().getMessage(), equalTo(corruptionMessage + " (resource=preexisting_corruption)")); + closeShards(corruptedShard); + + final AtomicInteger corruptedMarkerCount = new AtomicInteger(); + final SimpleFileVisitor corruptedVisitor = new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + if (Files.isRegularFile(file) && file.getFileName().toString().startsWith(Store.CORRUPTED)) { + corruptedMarkerCount.incrementAndGet(); + } + return FileVisitResult.CONTINUE; + } + }; + Files.walkFileTree(indexPath, corruptedVisitor); + assertThat("store has to be marked as corrupted", corruptedMarkerCount.get(), equalTo(1)); + + // try to start another time shard on corrupted files + final IndexShard corruptedShard2 = newShard(shardRouting, shardPath, indexMetaData, + null, null, indexShard.engineFactory, + indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); + + final IndexShardRecoveryException exception2 = expectThrows(IndexShardRecoveryException.class, + () -> newStartedShard(p -> corruptedShard2, true)); + assertThat(exception2.getCause().getMessage(), equalTo(corruptionMessage + " (resource=preexisting_corruption)")); + closeShards(corruptedShard2); + + // check that corrupt marker is there + corruptedMarkerCount.set(0); + Files.walkFileTree(indexPath, corruptedVisitor); + assertThat("store still has a single corrupt marker", corruptedMarkerCount.get(), equalTo(1)); + } + + private Path corruptIndexFile(ShardPath shardPath) throws IOException { + final Path indexPath = shardPath.getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); + final Path[] filesToCorrupt = + Files.walk(indexPath) + .filter(p -> { + final String name = p.getFileName().toString(); + return Files.isRegularFile(p) + && name.startsWith("extra") == false // Skip files added by Lucene's ExtrasFS + && IndexWriter.WRITE_LOCK_NAME.equals(name) == false + && name.startsWith("segments_") == false && name.endsWith(".si") == false; + }) + .toArray(Path[]::new); + CorruptionUtils.corruptFile(random(), filesToCorrupt); + return indexPath; + } + /** * Simulates a scenario that happens when we are async fetching snapshot metadata from GatewayService * and checking index concurrently. This should always be possible without any exception. @@ -2589,12 +2777,12 @@ public class IndexShardTests extends IndexShardTestCase { closeShards(indexShard); final ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(indexShard.routingEntry(), - isPrimary ? RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE + isPrimary ? RecoverySource.ExistingStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE ); final IndexMetaData indexMetaData = IndexMetaData.builder(indexShard.indexSettings().getIndexMetaData()) .settings(Settings.builder() .put(indexShard.indexSettings.getSettings()) - .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("false", "true", "checksum", "fix"))) + .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("false", "true", "checksum"))) .build(); final IndexShard newShard = newShard(shardRouting, indexShard.shardPath(), indexMetaData, null, null, indexShard.engineFactory, indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); @@ -2962,6 +3150,7 @@ public class IndexShardTests extends IndexShardTestCase { assertThat(breaker.getUsed(), greaterThan(preRefreshBytes)); indexDoc(primary, "_doc", "4", "{\"foo\": \"potato\"}"); + indexDoc(primary, "_doc", "5", "{\"foo\": \"potato\"}"); // Forces a refresh with the INTERNAL scope ((InternalEngine) primary.getEngine()).writeIndexingBuffer(); @@ -2973,6 +3162,13 @@ public class IndexShardTests extends IndexShardTestCase { // Deleting a doc causes its memory to be freed from the breaker deleteDoc(primary, "_doc", "0"); + // Here we are testing that a fully deleted segment should be dropped and its memory usage is freed. + // In order to instruct the merge policy not to keep a fully deleted segment, + // we need to flush and make that commit safe so that the SoftDeletesPolicy can drop everything. + if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(settings)) { + primary.sync(); + flushShard(primary); + } primary.refresh("force refresh"); ss = primary.segmentStats(randomBoolean()); @@ -3064,6 +3260,7 @@ public class IndexShardTests extends IndexShardTestCase { // Close remaining searchers IOUtils.close(searchers); + primary.refresh("test"); SegmentsStats ss = primary.segmentStats(randomBoolean()); CircuitBreaker breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING); @@ -3090,7 +3287,7 @@ public class IndexShardTests extends IndexShardTestCase { .settings(settings) .primaryTerm(0, 1).build(); ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(metaData.getIndex(), 0), "n1", true, ShardRoutingState - .INITIALIZING, RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE); + .INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE); final ShardId shardId = shardRouting.shardId(); final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir()); ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); @@ -3181,4 +3378,28 @@ public class IndexShardTests extends IndexShardTestCase { } + public void testSupplyTombstoneDoc() throws Exception { + IndexShard shard = newStartedShard(); + String id = randomRealisticUnicodeOfLengthBetween(1, 10); + ParsedDocument deleteTombstone = shard.getEngine().config().getTombstoneDocSupplier().newDeleteTombstoneDoc("doc", id); + assertThat(deleteTombstone.docs(), hasSize(1)); + ParseContext.Document deleteDoc = deleteTombstone.docs().get(0); + assertThat(deleteDoc.getFields().stream().map(IndexableField::name).collect(Collectors.toList()), + containsInAnyOrder(IdFieldMapper.NAME, VersionFieldMapper.NAME, + SeqNoFieldMapper.NAME, SeqNoFieldMapper.NAME, SeqNoFieldMapper.PRIMARY_TERM_NAME, SeqNoFieldMapper.TOMBSTONE_NAME)); + assertThat(deleteDoc.getField(IdFieldMapper.NAME).binaryValue(), equalTo(Uid.encodeId(id))); + assertThat(deleteDoc.getField(SeqNoFieldMapper.TOMBSTONE_NAME).numericValue().longValue(), equalTo(1L)); + + final String reason = randomUnicodeOfLength(200); + ParsedDocument noopTombstone = shard.getEngine().config().getTombstoneDocSupplier().newNoopTombstoneDoc(reason); + assertThat(noopTombstone.docs(), hasSize(1)); + ParseContext.Document noopDoc = noopTombstone.docs().get(0); + assertThat(noopDoc.getFields().stream().map(IndexableField::name).collect(Collectors.toList()), + containsInAnyOrder(VersionFieldMapper.NAME, SourceFieldMapper.NAME, SeqNoFieldMapper.TOMBSTONE_NAME, + SeqNoFieldMapper.NAME, SeqNoFieldMapper.NAME, SeqNoFieldMapper.PRIMARY_TERM_NAME)); + assertThat(noopDoc.getField(SeqNoFieldMapper.TOMBSTONE_NAME).numericValue().longValue(), equalTo(1L)); + assertThat(noopDoc.getField(SourceFieldMapper.NAME).binaryValue(), equalTo(new BytesRef(reason))); + + closeShards(shard); + } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/NewPathForShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/NewPathForShardTests.java index 4e6e3036f4c..d539b716694 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/NewPathForShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/NewPathForShardTests.java @@ -178,7 +178,7 @@ public class NewPathForShardTests extends ESTestCase { Settings settings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), path) .putList(Environment.PATH_DATA_SETTING.getKey(), paths).build(); - NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)); + NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings), nodeId -> {}); // Make sure all our mocking above actually worked: NodePath[] nodePaths = nodeEnv.nodePaths(); @@ -233,7 +233,7 @@ public class NewPathForShardTests extends ESTestCase { Settings settings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), path) .putList(Environment.PATH_DATA_SETTING.getKey(), paths).build(); - NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)); + NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings), nodeId -> {}); // Make sure all our mocking above actually worked: NodePath[] nodePaths = nodeEnv.nodePaths(); @@ -290,7 +290,7 @@ public class NewPathForShardTests extends ESTestCase { Settings settings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), path) .putList(Environment.PATH_DATA_SETTING.getKey(), paths).build(); - NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)); + NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings), nodeId -> {}); aFileStore.usableSpace = 100000; bFileStore.usableSpace = 1000; @@ -315,7 +315,7 @@ public class NewPathForShardTests extends ESTestCase { Settings settings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), path) .putList(Environment.PATH_DATA_SETTING.getKey(), paths).build(); - NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)); + NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings), nodeId -> {}); // Make sure all our mocking above actually worked: NodePath[] nodePaths = nodeEnv.nodePaths(); diff --git a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java index ae2cc84e487..29b16ca28f4 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java @@ -106,17 +106,22 @@ public class PrimaryReplicaSyncerTests extends IndexShardTestCase { .isPresent(), is(false)); } - - assertEquals(globalCheckPoint == numDocs - 1 ? 0 : numDocs, resyncTask.getTotalOperations()); if (syncNeeded && globalCheckPoint < numDocs - 1) { - long skippedOps = globalCheckPoint + 1; // everything up to global checkpoint included - assertEquals(skippedOps, resyncTask.getSkippedOperations()); - assertEquals(numDocs - skippedOps, resyncTask.getResyncedOperations()); + if (shard.indexSettings.isSoftDeleteEnabled()) { + assertThat(resyncTask.getSkippedOperations(), equalTo(0)); + assertThat(resyncTask.getResyncedOperations(), equalTo(resyncTask.getTotalOperations())); + assertThat(resyncTask.getTotalOperations(), equalTo(Math.toIntExact(numDocs - 1 - globalCheckPoint))); + } else { + int skippedOps = Math.toIntExact(globalCheckPoint + 1); // everything up to global checkpoint included + assertThat(resyncTask.getSkippedOperations(), equalTo(skippedOps)); + assertThat(resyncTask.getResyncedOperations(), equalTo(numDocs - skippedOps)); + assertThat(resyncTask.getTotalOperations(), equalTo(globalCheckPoint == numDocs - 1 ? 0 : numDocs)); + } } else { - assertEquals(0, resyncTask.getSkippedOperations()); - assertEquals(0, resyncTask.getResyncedOperations()); + assertThat(resyncTask.getSkippedOperations(), equalTo(0)); + assertThat(resyncTask.getResyncedOperations(), equalTo(0)); + assertThat(resyncTask.getTotalOperations(), equalTo(0)); } - closeShards(shard); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 2d1c1d4e15a..2492ab4cd8a 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -42,6 +42,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; +import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -49,7 +50,6 @@ import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.seqno.SequenceNumbers; -import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogConfig; @@ -105,13 +105,7 @@ public class RefreshListenersTests extends ESTestCase { ShardId shardId = new ShardId(new Index("index", "_na_"), 1); String allocationId = UUIDs.randomBase64UUID(random()); Directory directory = newDirectory(); - DirectoryService directoryService = new DirectoryService(shardId, indexSettings) { - @Override - public Directory newDirectory() throws IOException { - return directory; - } - }; - store = new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId)); + store = new Store(shardId, indexSettings, directory, new DummyShardLock(shardId)); IndexWriterConfig iwc = newIndexWriterConfig(); TranslogConfig translogConfig = new TranslogConfig(shardId, createTempDir("translog"), indexSettings, BigArrays.NON_RECYCLING_INSTANCE); @@ -130,9 +124,10 @@ public class RefreshListenersTests extends ESTestCase { indexSettings, null, store, newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), eventListener, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), Collections.singletonList(listeners), Collections.emptyList(), null, - (e, s) -> 0, new NoneCircuitBreakerService(), () -> SequenceNumbers.NO_OPS_PERFORMED, () -> primaryTerm); + new NoneCircuitBreakerService(), () -> SequenceNumbers.NO_OPS_PERFORMED, () -> primaryTerm, + EngineTestCase.tombstoneDocSupplier()); engine = new InternalEngine(config); - engine.recoverFromTranslog(); + engine.recoverFromTranslog((e, s) -> 0, Long.MAX_VALUE); listeners.setCurrentRefreshLocationSupplier(engine::getTranslogLastWriteLocation); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java index 95772910747..04d15d39b58 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java @@ -77,48 +77,4 @@ public class ShardGetServiceTests extends IndexShardTestCase { closeShards(primary); } - - public void testGetForUpdateWithParentField() throws IOException { - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put("index.version.created", Version.V_5_6_0) // for parent field mapper - .build(); - IndexMetaData metaData = IndexMetaData.builder("test") - .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") - .settings(settings) - .primaryTerm(0, 1).build(); - IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); - recoverShardFromStore(primary); - Engine.IndexResult test = indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}"); - assertTrue(primary.getEngine().refreshNeeded()); - GetResult testGet = primary.getService().getForUpdate("test", "0", test.getVersion(), VersionType.INTERNAL); - assertFalse(testGet.getFields().containsKey(RoutingFieldMapper.NAME)); - assertEquals(new String(testGet.source(), StandardCharsets.UTF_8), "{\"foo\" : \"bar\"}"); - try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { - assertEquals(searcher.reader().maxDoc(), 1); // we refreshed - } - - Engine.IndexResult test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, null); - assertTrue(primary.getEngine().refreshNeeded()); - GetResult testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); - assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); - assertFalse(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); - try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { - assertEquals(searcher.reader().maxDoc(), 1); // we read from the translog - } - primary.getEngine().refresh("test"); - try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { - assertEquals(searcher.reader().maxDoc(), 2); - } - - // now again from the reader - test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, null); - assertTrue(primary.getEngine().refreshNeeded()); - testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); - assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); - assertFalse(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); - - closeShards(primary); - } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java index 9dcb712a05d..9296b4f3111 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java @@ -29,6 +29,7 @@ import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; @@ -260,9 +261,8 @@ public class ShardSplittingQueryTests extends ESTestCase { try (IndexReader reader = DirectoryReader.open(dir)) { IndexSearcher searcher = new IndexSearcher(reader); searcher.setQueryCache(null); - final boolean needsScores = false; - final Weight splitWeight = searcher.createNormalizedWeight(new ShardSplittingQuery(metaData, targetShardId, hasNested), - needsScores); + final Weight splitWeight = searcher.createWeight(searcher.rewrite(new ShardSplittingQuery(metaData, targetShardId, hasNested)), + ScoreMode.COMPLETE_NO_SCORES, 1f); final List leaves = reader.leaves(); for (final LeafReaderContext ctx : leaves) { Scorer scorer = splitWeight.scorer(ctx); diff --git a/server/src/test/java/org/elasticsearch/index/similarity/LegacySimilarityTests.java b/server/src/test/java/org/elasticsearch/index/similarity/LegacySimilarityTests.java new file mode 100644 index 00000000000..69ad3bd128f --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/similarity/LegacySimilarityTests.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.similarity; + +import org.apache.lucene.search.similarities.BM25Similarity; +import org.apache.lucene.search.similarities.BooleanSimilarity; +import org.apache.lucene.search.similarities.ClassicSimilarity; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.test.ESSingleNodeTestCase; + +import java.io.IOException; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; + +public class LegacySimilarityTests extends ESSingleNodeTestCase { + + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + + public void testResolveDefaultSimilaritiesOn6xIndex() { + final Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) // otherwise classic is forbidden + .build(); + final SimilarityService similarityService = createIndex("foo", indexSettings).similarityService(); + assertThat(similarityService.getSimilarity("classic").get(), instanceOf(ClassicSimilarity.class)); + assertWarnings("The [classic] similarity is now deprecated in favour of BM25, which is generally " + + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " + + "instead."); + assertThat(similarityService.getSimilarity("BM25").get(), instanceOf(BM25Similarity.class)); + assertThat(similarityService.getSimilarity("boolean").get(), instanceOf(BooleanSimilarity.class)); + assertThat(similarityService.getSimilarity("default"), equalTo(null)); + } + + public void testResolveSimilaritiesFromMappingClassic() throws IOException { + try (XContentBuilder mapping = XContentFactory.jsonBuilder()) { + mapping.startObject(); + { + mapping.startObject("type"); + { + mapping.startObject("properties"); + { + mapping.startObject("field1"); + { + mapping.field("type", "text"); + mapping.field("similarity", "my_similarity"); + } + mapping.endObject(); + } + mapping.endObject(); + } + mapping.endObject(); + } + mapping.endObject(); + + final Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.V_6_3_0) // otherwise classic is forbidden + .put("index.similarity.my_similarity.type", "classic") + .put("index.similarity.my_similarity.discount_overlaps", false) + .build(); + final MapperService mapperService = createIndex("foo", indexSettings, "type", mapping).mapperService(); + assertThat(mapperService.fullName("field1").similarity().get(), instanceOf(ClassicSimilarity.class)); + + final ClassicSimilarity similarity = (ClassicSimilarity) mapperService.fullName("field1").similarity().get(); + assertThat(similarity.getDiscountOverlaps(), equalTo(false)); + } + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/similarity/ScriptedSimilarityTests.java b/server/src/test/java/org/elasticsearch/index/similarity/ScriptedSimilarityTests.java index cc1d0e827c7..22089bc40e4 100644 --- a/server/src/test/java/org/elasticsearch/index/similarity/ScriptedSimilarityTests.java +++ b/server/src/test/java/org/elasticsearch/index/similarity/ScriptedSimilarityTests.java @@ -25,6 +25,7 @@ import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.FieldInvertState; +import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; @@ -45,6 +46,7 @@ import org.elasticsearch.script.SimilarityWeightScript; import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.util.Arrays; import java.util.concurrent.atomic.AtomicBoolean; public class ScriptedSimilarityTests extends ESTestCase { @@ -65,7 +67,10 @@ public class ScriptedSimilarityTests extends ESTestCase { final int length = TestUtil.nextInt(random(), 1, 100); final int position = random().nextInt(length); final int numOverlaps = random().nextInt(length); - FieldInvertState state = new FieldInvertState(Version.LATEST.major, "foo", position, length, numOverlaps, 100); + int maxTermFrequency = TestUtil.nextInt(random(), 1, 10); + int uniqueTermCount = TestUtil.nextInt(random(), 1, 10); + FieldInvertState state = new FieldInvertState(Version.LATEST.major, "foo", IndexOptions.DOCS_AND_FREQS, position, length, + numOverlaps, 100, maxTermFrequency, uniqueTermCount); assertEquals( sim2.computeNorm(state), sim1.computeNorm(state), @@ -81,7 +86,17 @@ public class ScriptedSimilarityTests extends ESTestCase { @Override public double execute(double weight, ScriptedSimilarity.Query query, ScriptedSimilarity.Field field, ScriptedSimilarity.Term term, - ScriptedSimilarity.Doc doc) throws IOException { + ScriptedSimilarity.Doc doc) { + + StackTraceElement[] stackTraceElements = Thread.currentThread().getStackTrace(); + if (Arrays.stream(stackTraceElements).anyMatch(ste -> { + return ste.getClassName().endsWith(".TermScorer") && + ste.getMethodName().equals("score"); + }) == false) { + // this might happen when computing max scores + return Float.MAX_VALUE; + } + assertEquals(1, weight, 0); assertNotNull(doc); assertEquals(2f, doc.getFreq(), 0); @@ -129,7 +144,7 @@ public class ScriptedSimilarityTests extends ESTestCase { .add(new TermQuery(new Term("match", "yes")), Occur.FILTER) .build(), 3.2f); TopDocs topDocs = searcher.search(query, 1); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); assertTrue(called.get()); assertEquals(42, topDocs.scoreDocs[0].score, 0); w.close(); @@ -143,14 +158,13 @@ public class ScriptedSimilarityTests extends ESTestCase { @Override public double execute(ScriptedSimilarity.Query query, ScriptedSimilarity.Field field, - ScriptedSimilarity.Term term) throws IOException { - assertNotNull(field); + ScriptedSimilarity.Term term) { assertEquals(3, field.getDocCount()); assertEquals(5, field.getSumDocFreq()); assertEquals(6, field.getSumTotalTermFreq()); assertNotNull(term); - assertEquals(2, term.getDocFreq()); - assertEquals(3, term.getTotalTermFreq()); + assertEquals(1, term.getDocFreq()); + assertEquals(2, term.getTotalTermFreq()); assertNotNull(query); assertEquals(3.2f, query.getBoost(), 0); initCalled.set(true); @@ -166,7 +180,17 @@ public class ScriptedSimilarityTests extends ESTestCase { @Override public double execute(double weight, ScriptedSimilarity.Query query, ScriptedSimilarity.Field field, ScriptedSimilarity.Term term, - ScriptedSimilarity.Doc doc) throws IOException { + ScriptedSimilarity.Doc doc) { + + StackTraceElement[] stackTraceElements = Thread.currentThread().getStackTrace(); + if (Arrays.stream(stackTraceElements).anyMatch(ste -> { + return ste.getClassName().endsWith(".TermScorer") && + ste.getMethodName().equals("score"); + }) == false) { + // this might happen when computing max scores + return Float.MAX_VALUE; + } + assertEquals(28, weight, 0d); assertNotNull(doc); assertEquals(2f, doc.getFreq(), 0); @@ -176,8 +200,8 @@ public class ScriptedSimilarityTests extends ESTestCase { assertEquals(5, field.getSumDocFreq()); assertEquals(6, field.getSumTotalTermFreq()); assertNotNull(term); - assertEquals(2, term.getDocFreq()); - assertEquals(3, term.getTotalTermFreq()); + assertEquals(1, term.getDocFreq()); + assertEquals(2, term.getTotalTermFreq()); assertNotNull(query); assertEquals(3.2f, query.getBoost(), 0); called.set(true); @@ -191,8 +215,7 @@ public class ScriptedSimilarityTests extends ESTestCase { IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setSimilarity(sim)); Document doc = new Document(); - doc.add(new TextField("f", "foo bar", Store.NO)); - doc.add(new StringField("match", "no", Store.NO)); + doc.add(new TextField("f", "bar baz", Store.NO)); w.addDocument(doc); doc = new Document(); @@ -202,19 +225,15 @@ public class ScriptedSimilarityTests extends ESTestCase { doc = new Document(); doc.add(new TextField("f", "bar", Store.NO)); - doc.add(new StringField("match", "no", Store.NO)); w.addDocument(doc); IndexReader r = DirectoryReader.open(w); w.close(); IndexSearcher searcher = new IndexSearcher(r); searcher.setSimilarity(sim); - Query query = new BoostQuery(new BooleanQuery.Builder() - .add(new TermQuery(new Term("f", "foo")), Occur.SHOULD) - .add(new TermQuery(new Term("match", "yes")), Occur.FILTER) - .build(), 3.2f); + Query query = new BoostQuery(new TermQuery(new Term("f", "foo")), 3.2f); TopDocs topDocs = searcher.search(query, 1); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); assertTrue(initCalled.get()); assertTrue(called.get()); assertEquals(42, topDocs.scoreDocs[0].score, 0); diff --git a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java index 6102a1b55f1..d2244eb8a5a 100644 --- a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java +++ b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java @@ -23,7 +23,6 @@ import org.apache.lucene.search.similarities.AfterEffectL; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.BasicModelG; import org.apache.lucene.search.similarities.BooleanSimilarity; -import org.apache.lucene.search.similarities.ClassicSimilarity; import org.apache.lucene.search.similarities.DFISimilarity; import org.apache.lucene.search.similarities.DFRSimilarity; import org.apache.lucene.search.similarities.DistributionSPL; @@ -33,8 +32,6 @@ import org.apache.lucene.search.similarities.LMDirichletSimilarity; import org.apache.lucene.search.similarities.LMJelinekMercerSimilarity; import org.apache.lucene.search.similarities.LambdaTTF; import org.apache.lucene.search.similarities.NormalizationH2; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; @@ -72,39 +69,6 @@ public class SimilarityTests extends ESSingleNodeTestCase { + "similarity instead.", e.getMessage()); } - public void testResolveDefaultSimilaritiesOn6xIndex() { - Settings indexSettings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) // otherwise classic is forbidden - .build(); - SimilarityService similarityService = createIndex("foo", indexSettings).similarityService(); - assertThat(similarityService.getSimilarity("classic").get(), instanceOf(ClassicSimilarity.class)); - assertWarnings("The [classic] similarity is now deprecated in favour of BM25, which is generally " - + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " - + "instead."); - assertThat(similarityService.getSimilarity("BM25").get(), instanceOf(BM25Similarity.class)); - assertThat(similarityService.getSimilarity("boolean").get(), instanceOf(BooleanSimilarity.class)); - assertThat(similarityService.getSimilarity("default"), equalTo(null)); - } - - public void testResolveSimilaritiesFromMapping_classic() throws IOException { - XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties") - .startObject("field1").field("type", "text").field("similarity", "my_similarity").endObject() - .endObject() - .endObject().endObject(); - - Settings indexSettings = Settings.builder() - .put("index.similarity.my_similarity.type", "classic") - .put("index.similarity.my_similarity.discount_overlaps", false) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) // otherwise classic is forbidden - .build(); - MapperService mapperService = createIndex("foo", indexSettings, "type", mapping).mapperService(); - assertThat(mapperService.fullName("field1").similarity().get(), instanceOf(ClassicSimilarity.class)); - - ClassicSimilarity similarity = (ClassicSimilarity) mapperService.fullName("field1").similarity().get(); - assertThat(similarity.getDiscountOverlaps(), equalTo(false)); - } - public void testResolveSimilaritiesFromMapping_classicIsForbidden() throws IOException { Settings indexSettings = Settings.builder() .put("index.similarity.my_similarity.type", "classic") diff --git a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index fdadb5999f7..14ef65c368c 100644 --- a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -121,8 +121,11 @@ public class CorruptedFileIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class, MockIndexEventListener.TestPlugin.class, MockFSIndexStore.TestPlugin.class, - InternalSettingsPlugin.class); // uses index.version.created + return Arrays.asList( + MockTransportService.TestPlugin.class, + MockIndexEventListener.TestPlugin.class, + MockFSIndexStore.TestPlugin.class, + InternalSettingsPlugin.class); } /** diff --git a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java index 2cea9bb3646..584ce9b0642 100644 --- a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java @@ -104,12 +104,10 @@ public class StoreTests extends ESTestCase { private static final Version MIN_SUPPORTED_LUCENE_VERSION = org.elasticsearch.Version.CURRENT .minimumIndexCompatibilityVersion().luceneVersion; - public void testRefCount() throws IOException { + public void testRefCount() { final ShardId shardId = new ShardId("index", "_na_", 1); - DirectoryService directoryService = new LuceneManagedDirectoryService(random()); IndexSettings indexSettings = INDEX_SETTINGS; - - Store store = new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId)); + Store store = new Store(shardId, indexSettings, StoreTests.newDirectory(random()), new DummyShardLock(shardId)); int incs = randomIntBetween(1, 100); for (int i = 0; i < incs; i++) { if (randomBoolean()) { @@ -296,8 +294,7 @@ public class StoreTests extends ESTestCase { public void testNewChecksums() throws IOException { final ShardId shardId = new ShardId("index", "_na_", 1); - DirectoryService directoryService = new LuceneManagedDirectoryService(random()); - Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId)); + Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId)); // set default codec - all segments need checksums IndexWriter writer = new IndexWriter(store.directory(), newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec())); int docs = 1 + random().nextInt(100); @@ -347,7 +344,7 @@ public class StoreTests extends ESTestCase { assertConsistent(store, metadata); TestUtil.checkIndex(store.directory()); - assertDeleteContent(store, directoryService); + assertDeleteContent(store, store.directory()); IOUtils.close(store); } @@ -455,32 +452,11 @@ public class StoreTests extends ESTestCase { } - public void assertDeleteContent(Store store, DirectoryService service) throws IOException { + public void assertDeleteContent(Store store, Directory dir) throws IOException { deleteContent(store.directory()); assertThat(Arrays.toString(store.directory().listAll()), store.directory().listAll().length, equalTo(0)); assertThat(store.stats().sizeInBytes(), equalTo(0L)); - assertThat(service.newDirectory().listAll().length, equalTo(0)); - } - - private static final class LuceneManagedDirectoryService extends DirectoryService { - private final Directory dir; - private final Random random; - - LuceneManagedDirectoryService(Random random) { - this(random, true); - } - - LuceneManagedDirectoryService(Random random, boolean preventDoubleWrite) { - super(new ShardId(INDEX_SETTINGS.getIndex(), 1), INDEX_SETTINGS); - dir = StoreTests.newDirectory(random); - this.random = random; - } - - @Override - public Directory newDirectory() throws IOException { - return dir; - } - + assertThat(dir.listAll().length, equalTo(0)); } public static void assertConsistent(Store store, Store.MetadataSnapshot metadata) throws IOException { @@ -511,8 +487,7 @@ public class StoreTests extends ESTestCase { iwc.setMergePolicy(NoMergePolicy.INSTANCE); iwc.setUseCompoundFile(random.nextBoolean()); final ShardId shardId = new ShardId("index", "_na_", 1); - DirectoryService directoryService = new LuceneManagedDirectoryService(random); - Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId)); + Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId)); IndexWriter writer = new IndexWriter(store.directory(), iwc); final boolean lotsOfSegments = rarely(random); for (Document d : docs) { @@ -526,7 +501,7 @@ public class StoreTests extends ESTestCase { writer.commit(); writer.close(); first = store.getMetadata(null); - assertDeleteContent(store, directoryService); + assertDeleteContent(store, store.directory()); store.close(); } long time = new Date().getTime(); @@ -541,8 +516,7 @@ public class StoreTests extends ESTestCase { iwc.setMergePolicy(NoMergePolicy.INSTANCE); iwc.setUseCompoundFile(random.nextBoolean()); final ShardId shardId = new ShardId("index", "_na_", 1); - DirectoryService directoryService = new LuceneManagedDirectoryService(random); - store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId)); + store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId)); IndexWriter writer = new IndexWriter(store.directory(), iwc); final boolean lotsOfSegments = rarely(random); for (Document d : docs) { @@ -639,8 +613,7 @@ public class StoreTests extends ESTestCase { public void testCleanupFromSnapshot() throws IOException { final ShardId shardId = new ShardId("index", "_na_", 1); - DirectoryService directoryService = new LuceneManagedDirectoryService(random()); - Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId)); + Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId)); // this time random codec.... IndexWriterConfig indexWriterConfig = newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()); // we keep all commits and that allows us clean based on multiple snapshots @@ -727,11 +700,10 @@ public class StoreTests extends ESTestCase { public void testOnCloseCallback() throws IOException { final ShardId shardId = new ShardId(new Index(randomRealisticUnicodeOfCodepointLengthBetween(1, 10), "_na_"), randomIntBetween(0, 100)); - DirectoryService directoryService = new LuceneManagedDirectoryService(random()); final AtomicInteger count = new AtomicInteger(0); final ShardLock lock = new DummyShardLock(shardId); - Store store = new Store(shardId, INDEX_SETTINGS, directoryService, lock, theLock -> { + Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), lock, theLock -> { assertEquals(shardId, theLock.getShardId()); assertEquals(lock, theLock); count.incrementAndGet(); @@ -748,11 +720,10 @@ public class StoreTests extends ESTestCase { public void testStoreStats() throws IOException { final ShardId shardId = new ShardId("index", "_na_", 1); - DirectoryService directoryService = new LuceneManagedDirectoryService(random()); Settings settings = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT) .put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMinutes(0)).build(); - Store store = new Store(shardId, IndexSettingsModule.newIndexSettings("index", settings), directoryService, + Store store = new Store(shardId, IndexSettingsModule.newIndexSettings("index", settings), StoreTests.newDirectory(random()), new DummyShardLock(shardId)); long initialStoreSize = 0; for (String extraFiles : store.directory().listAll()) { @@ -843,8 +814,7 @@ public class StoreTests extends ESTestCase { public void testUserDataRead() throws IOException { final ShardId shardId = new ShardId("index", "_na_", 1); - DirectoryService directoryService = new LuceneManagedDirectoryService(random()); - Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId)); + Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId)); IndexWriterConfig config = newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()); SnapshotDeletionPolicy deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); config.setIndexDeletionPolicy(deletionPolicy); @@ -867,7 +837,7 @@ public class StoreTests extends ESTestCase { assertThat(metadata.getCommitUserData().get(Engine.SYNC_COMMIT_ID), equalTo(syncId)); assertThat(metadata.getCommitUserData().get(Translog.TRANSLOG_GENERATION_KEY), equalTo(translogId)); TestUtil.checkIndex(store.directory()); - assertDeleteContent(store, directoryService); + assertDeleteContent(store, store.directory()); IOUtils.close(store); } @@ -893,8 +863,7 @@ public class StoreTests extends ESTestCase { public void testMarkCorruptedOnTruncatedSegmentsFile() throws IOException { IndexWriterConfig iwc = newIndexWriterConfig(); final ShardId shardId = new ShardId("index", "_na_", 1); - DirectoryService directoryService = new LuceneManagedDirectoryService(random()); - Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId)); + Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId)); IndexWriter writer = new IndexWriter(store.directory(), iwc); int numDocs = 1 + random().nextInt(10); @@ -945,15 +914,7 @@ public class StoreTests extends ESTestCase { writer.commit(); writer.close(); assertTrue(Store.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id))); - - DirectoryService directoryService = new DirectoryService(shardId, INDEX_SETTINGS) { - - @Override - public Directory newDirectory() throws IOException { - return dir; - } - }; - Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId)); + Store store = new Store(shardId, INDEX_SETTINGS, dir, new DummyShardLock(shardId)); store.markStoreCorrupted(new CorruptIndexException("foo", "bar")); assertFalse(Store.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id))); store.close(); @@ -962,14 +923,7 @@ public class StoreTests extends ESTestCase { public void testDeserializeCorruptionException() throws IOException { final ShardId shardId = new ShardId("index", "_na_", 1); final Directory dir = new RAMDirectory(); // I use ram dir to prevent that virusscanner being a PITA - DirectoryService directoryService = new DirectoryService(shardId, INDEX_SETTINGS) { - - @Override - public Directory newDirectory() throws IOException { - return dir; - } - }; - Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId)); + Store store = new Store(shardId, INDEX_SETTINGS, dir, new DummyShardLock(shardId)); CorruptIndexException ex = new CorruptIndexException("foo", "bar"); store.markStoreCorrupted(ex); try { @@ -998,14 +952,7 @@ public class StoreTests extends ESTestCase { public void testCanReadOldCorruptionMarker() throws IOException { final ShardId shardId = new ShardId("index", "_na_", 1); final Directory dir = new RAMDirectory(); // I use ram dir to prevent that virusscanner being a PITA - DirectoryService directoryService = new DirectoryService(shardId, INDEX_SETTINGS) { - - @Override - public Directory newDirectory() throws IOException { - return dir; - } - }; - Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId)); + Store store = new Store(shardId, INDEX_SETTINGS, dir, new DummyShardLock(shardId)); CorruptIndexException exception = new CorruptIndexException("foo", "bar"); String uuid = Store.CORRUPTED + UUIDs.randomBase64UUID(); @@ -1065,8 +1012,7 @@ public class StoreTests extends ESTestCase { public void testEnsureIndexHasHistoryUUID() throws IOException { final ShardId shardId = new ShardId("index", "_na_", 1); - DirectoryService directoryService = new LuceneManagedDirectoryService(random()); - try (Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId))) { + try (Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId))) { store.createEmpty(); @@ -1098,8 +1044,7 @@ public class StoreTests extends ESTestCase { public void testHistoryUUIDCanBeForced() throws IOException { final ShardId shardId = new ShardId("index", "_na_", 1); - DirectoryService directoryService = new LuceneManagedDirectoryService(random()); - try (Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId))) { + try (Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId))) { store.createEmpty(); diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java index 9ae502fecb5..c8d4dbd43df 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java @@ -171,7 +171,7 @@ public class TranslogDeletionPolicyTests extends ESTestCase { } writer = TranslogWriter.create(new ShardId("index", "uuid", 0), translogUUID, gen, tempDir.resolve(Translog.getFilename(gen)), FileChannel::open, TranslogConfig.DEFAULT_BUFFER_SIZE, 1L, 1L, () -> 1L, - () -> 1L, randomNonNegativeLong()); + () -> 1L, randomNonNegativeLong(), new TragicExceptionHolder()); writer = Mockito.spy(writer); Mockito.doReturn(now - (numberOfReaders - gen + 1) * 1000).when(writer).getLastModifiedTime(); diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 1c27a59e0ec..a0e0c481e5f 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -33,6 +33,7 @@ import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.Assertions; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; @@ -108,6 +109,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.LongSupplier; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.LongStream; @@ -358,7 +360,8 @@ public class TranslogTests extends ESTestCase { } markCurrentGenAsCommitted(translog); - try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(firstId + 1)) { + try (Translog.Snapshot snapshot = translog.newSnapshotFromGen( + new Translog.TranslogGeneration(translog.getTranslogUUID(), firstId + 1), randomNonNegativeLong())) { assertThat(snapshot, SnapshotMatchers.size(0)); assertThat(snapshot.totalOperations(), equalTo(0)); } @@ -643,6 +646,82 @@ public class TranslogTests extends ESTestCase { } } + public void testSnapshotFromMinGen() throws Exception { + Map> operationsByGen = new HashMap<>(); + try (Translog.Snapshot snapshot = translog.newSnapshotFromGen( + new Translog.TranslogGeneration(translog.getTranslogUUID(), 1), randomNonNegativeLong())) { + assertThat(snapshot, SnapshotMatchers.size(0)); + } + int iters = between(1, 10); + for (int i = 0; i < iters; i++) { + long currentGeneration = translog.currentFileGeneration(); + operationsByGen.putIfAbsent(currentGeneration, new ArrayList<>()); + int numOps = between(0, 20); + for (int op = 0; op < numOps; op++) { + long seqNo = randomLongBetween(0, 1000); + addToTranslogAndList(translog, operationsByGen.get(currentGeneration), new Translog.Index("test", + Long.toString(seqNo), seqNo, primaryTerm.get(), new byte[]{1})); + } + long minGen = randomLongBetween(translog.getMinFileGeneration(), translog.currentFileGeneration()); + try (Translog.Snapshot snapshot = translog.newSnapshotFromGen( + new Translog.TranslogGeneration(translog.getTranslogUUID(), minGen), Long.MAX_VALUE)) { + List expectedOps = operationsByGen.entrySet().stream() + .filter(e -> e.getKey() >= minGen) + .flatMap(e -> e.getValue().stream()) + .collect(Collectors.toList()); + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedOps)); + } + long upToSeqNo = randomLongBetween(0, 2000); + try (Translog.Snapshot snapshot = translog.newSnapshotFromGen( + new Translog.TranslogGeneration(translog.getTranslogUUID(), minGen), upToSeqNo)) { + List expectedOps = operationsByGen.entrySet().stream() + .filter(e -> e.getKey() >= minGen) + .flatMap(e -> e.getValue().stream().filter(op -> op.seqNo() <= upToSeqNo)) + .collect(Collectors.toList()); + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedOps)); + } + translog.rollGeneration(); + } + } + + public void testSeqNoFilterSnapshot() throws Exception { + final int generations = between(2, 20); + for (int gen = 0; gen < generations; gen++) { + List batch = LongStream.rangeClosed(0, between(0, 100)).boxed().collect(Collectors.toList()); + Randomness.shuffle(batch); + for (long seqNo : batch) { + Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo, primaryTerm.get(), new byte[]{1}); + translog.add(op); + } + translog.rollGeneration(); + } + List operations = new ArrayList<>(); + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + Translog.Operation op; + while ((op = snapshot.next()) != null) { + operations.add(op); + } + } + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + Translog.Snapshot filter = new Translog.SeqNoFilterSnapshot(snapshot, between(200, 300), between(300, 400)); // out range + assertThat(filter, SnapshotMatchers.size(0)); + assertThat(filter.totalOperations(), equalTo(snapshot.totalOperations())); + assertThat(filter.overriddenOperations(), equalTo(snapshot.overriddenOperations())); + assertThat(filter.skippedOperations(), equalTo(snapshot.totalOperations())); + } + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + int fromSeqNo = between(-2, 500); + int toSeqNo = between(fromSeqNo, 500); + List selectedOps = operations.stream() + .filter(op -> fromSeqNo <= op.seqNo() && op.seqNo() <= toSeqNo).collect(Collectors.toList()); + Translog.Snapshot filter = new Translog.SeqNoFilterSnapshot(snapshot, fromSeqNo, toSeqNo); + assertThat(filter, SnapshotMatchers.containsOperationsInAnyOrder(selectedOps)); + assertThat(filter.totalOperations(), equalTo(snapshot.totalOperations())); + assertThat(filter.overriddenOperations(), equalTo(snapshot.overriddenOperations())); + assertThat(filter.skippedOperations(), equalTo(snapshot.skippedOperations() + operations.size() - selectedOps.size())); + } + } + public void assertFileIsPresent(Translog translog, long id) { if (Files.exists(translog.location().resolve(Translog.getFilename(id)))) { return; @@ -1302,7 +1381,7 @@ public class TranslogTests extends ESTestCase { translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); - try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(translogGeneration.translogFileGeneration)) { + try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(translogGeneration, Long.MAX_VALUE)) { for (int i = minUncommittedOp; i < translogOperations; i++) { assertEquals("expected operation" + i + " to be in the previous translog but wasn't", translog.currentFileGeneration() - 1, locations.get(i).generation); @@ -1655,7 +1734,7 @@ public class TranslogTests extends ESTestCase { } assertThat(expectedException, is(not(nullValue()))); - + assertThat(failableTLog.getTragicException(), equalTo(expectedException)); assertThat(fileChannels, is(not(empty()))); assertThat("all file channels have to be closed", fileChannels.stream().filter(f -> f.isOpen()).findFirst().isPresent(), is(false)); @@ -1733,7 +1812,7 @@ public class TranslogTests extends ESTestCase { } this.translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); - try (Translog.Snapshot snapshot = this.translog.newSnapshotFromGen(translogGeneration.translogFileGeneration)) { + try (Translog.Snapshot snapshot = this.translog.newSnapshotFromGen(translogGeneration, Long.MAX_VALUE)) { for (int i = firstUncommitted; i < translogOperations; i++) { Translog.Operation next = snapshot.next(); assertNotNull("" + i, next); @@ -2505,11 +2584,13 @@ public class TranslogTests extends ESTestCase { syncedDocs.addAll(unsynced); unsynced.clear(); } catch (TranslogException | MockDirectoryWrapper.FakeIOException ex) { - // fair enough + assertEquals(failableTLog.getTragicException(), ex); } catch (IOException ex) { assertEquals(ex.getMessage(), "__FAKE__ no space left on device"); + assertEquals(failableTLog.getTragicException(), ex); } catch (RuntimeException ex) { assertEquals(ex.getMessage(), "simulated"); + assertEquals(failableTLog.getTragicException(), ex); } finally { Checkpoint checkpoint = Translog.readCheckpoint(config.getTranslogPath()); if (checkpoint.numOps == unsynced.size() + syncedDocs.size()) { @@ -2553,7 +2634,8 @@ public class TranslogTests extends ESTestCase { generationUUID = Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); } try (Translog translog = new Translog(config, generationUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); - Translog.Snapshot snapshot = translog.newSnapshotFromGen(minGenForRecovery)) { + Translog.Snapshot snapshot = translog.newSnapshotFromGen( + new Translog.TranslogGeneration(generationUUID, minGenForRecovery), Long.MAX_VALUE)) { assertEquals(syncedDocs.size(), snapshot.totalOperations()); for (int i = 0; i < syncedDocs.size(); i++) { Translog.Operation next = snapshot.next(); @@ -2931,6 +3013,47 @@ public class TranslogTests extends ESTestCase { } } + // close method should never be called directly from Translog (the only exception is closeOnTragicEvent) + public void testTranslogCloseInvariant() throws IOException { + assumeTrue("test only works with assertions enabled", Assertions.ENABLED); + class MisbehavingTranslog extends Translog { + MisbehavingTranslog(TranslogConfig config, String translogUUID, TranslogDeletionPolicy deletionPolicy, LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier) throws IOException { + super(config, translogUUID, deletionPolicy, globalCheckpointSupplier, primaryTermSupplier); + } + + void callCloseDirectly() throws IOException { + close(); + } + + void callCloseUsingIOUtilsWithExceptionHandling() { + IOUtils.closeWhileHandlingException(this); + } + + void callCloseUsingIOUtils() throws IOException { + IOUtils.close(this); + } + + void callCloseOnTragicEvent() { + Exception e = new Exception("test tragic exception"); + tragedy.setTragicException(e); + closeOnTragicEvent(e); + } + } + + + globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + Path path = createTempDir(); + final TranslogConfig translogConfig = getTranslogConfig(path); + final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings()); + final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); + MisbehavingTranslog misbehavingTranslog = new MisbehavingTranslog(translogConfig, translogUUID, deletionPolicy, () -> globalCheckpoint.get(), primaryTerm::get); + + expectThrows(AssertionError.class, () -> misbehavingTranslog.callCloseDirectly()); + expectThrows(AssertionError.class, () -> misbehavingTranslog.callCloseUsingIOUtils()); + expectThrows(AssertionError.class, () -> misbehavingTranslog.callCloseUsingIOUtilsWithExceptionHandling()); + misbehavingTranslog.callCloseOnTragicEvent(); + } + static class SortedSnapshot implements Translog.Snapshot { private final Translog.Snapshot snapshot; private List operations = null; diff --git a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java index c9cc771370e..01d7dc2a535 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java @@ -258,7 +258,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { Exception e = expectThrows(IllegalArgumentException.class, () -> new MockController(Settings.builder() .put("indices.memory.interval", "-42s").build())); - assertEquals("Failed to parse value [-42s] for setting [indices.memory.interval] must be >= 0s", e.getMessage()); + assertEquals("failed to parse value [-42s] for setting [indices.memory.interval], must be >= [0ms]", e.getMessage()); } @@ -266,7 +266,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { Exception e = expectThrows(IllegalArgumentException.class, () -> new MockController(Settings.builder() .put("indices.memory.shard_inactive_time", "-42s").build())); - assertEquals("Failed to parse value [-42s] for setting [indices.memory.shard_inactive_time] must be >= 0s", e.getMessage()); + assertEquals("failed to parse value [-42s] for setting [indices.memory.shard_inactive_time], must be >= [0ms]", e.getMessage()); } diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java index aa06f9e9b7d..769cdfc8a9b 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -128,7 +128,7 @@ public class IndicesLifecycleListenerSingleNodeTests extends ESSingleNodeTestCas String nodeId = newRouting.currentNodeId(); UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "boom"); newRouting = newRouting.moveToUnassigned(unassignedInfo) - .updateUnassigned(unassignedInfo, RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE); + .updateUnassigned(unassignedInfo, RecoverySource.EmptyStoreRecoverySource.INSTANCE); newRouting = ShardRoutingHelper.initialize(newRouting, nodeId); IndexShard shard = index.createShard(newRouting, s -> {}); IndexShardTestCase.updateRoutingEntry(shard, newRouting); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java index e155639f143..88bc4381626 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java @@ -32,6 +32,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.Weight; @@ -72,7 +73,7 @@ public class IndicesQueryCacheTests extends ESTestCase { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { return new ConstantScoreWeight(this, boost) { @Override @@ -414,7 +415,7 @@ public class IndicesQueryCacheTests extends ESTestCase { IndicesQueryCache cache = new IndicesQueryCache(settings); s.setQueryCache(cache); Query query = new MatchAllDocsQuery(); - final DummyWeight weight = new DummyWeight(s.createNormalizedWeight(query, false)); + final DummyWeight weight = new DummyWeight(s.createWeight(s.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f)); final Weight cached = cache.doCache(weight, s.getQueryCachingPolicy()); assertNotSame(weight, cached); assertFalse(weight.scorerCalled); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java index 0f12305f239..3f6feb23286 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java @@ -20,21 +20,25 @@ package org.elasticsearch.indices; import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.cache.request.RequestCacheStats; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.joda.time.DateTimeZone; import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; +import java.util.Arrays; import java.util.List; import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram; @@ -107,41 +111,35 @@ public class IndicesRequestCacheIT extends ESIntegTestCase { client.prepareIndex("index", "type", "8").setRouting("3").setSource("s", "2016-03-26"), client.prepareIndex("index", "type", "9").setRouting("3").setSource("s", "2016-03-27")); ensureSearchable("index"); + assertCacheState(client, "index", 0, 0); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(0L)); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); + ElasticsearchAssertions.assertAllSuccessful(forceMergeResponse); + refresh(); + ensureSearchable("index"); + + assertCacheState(client, "index", 0, 0); final SearchResponse r1 = client.prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-25")).setPreFilterShardSize(Integer.MAX_VALUE) - .get(); - assertSearchResponse(r1); + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-25")).setPreFilterShardSize(Integer.MAX_VALUE).get(); + ElasticsearchAssertions.assertAllSuccessful(r1); assertThat(r1.getHits().getTotalHits(), equalTo(7L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(5L)); + assertCacheState(client, "index", 0, 5); final SearchResponse r2 = client.prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) .setPreFilterShardSize(Integer.MAX_VALUE).get(); - assertSearchResponse(r2); + ElasticsearchAssertions.assertAllSuccessful(r2); assertThat(r2.getHits().getTotalHits(), equalTo(7L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(3L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(7L)); + assertCacheState(client, "index", 3, 7); final SearchResponse r3 = client.prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-21").lte("2016-03-27")).setPreFilterShardSize(Integer.MAX_VALUE) .get(); - assertSearchResponse(r3); + ElasticsearchAssertions.assertAllSuccessful(r3); assertThat(r3.getHits().getTotalHits(), equalTo(7L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(6L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(9L)); + assertCacheState(client, "index", 6, 9); } public void testQueryRewriteMissingValues() throws Exception { @@ -159,38 +157,33 @@ public class IndicesRequestCacheIT extends ESIntegTestCase { client.prepareIndex("index", "type", "8").setSource("s", "2016-03-26"), client.prepareIndex("index", "type", "9").setSource("s", "2016-03-27")); ensureSearchable("index"); + assertCacheState(client, "index", 0, 0); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(0L)); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); + ElasticsearchAssertions.assertAllSuccessful(forceMergeResponse); + refresh(); + ensureSearchable("index"); + + assertCacheState(client, "index", 0, 0); final SearchResponse r1 = client.prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-28")).get(); - assertSearchResponse(r1); + ElasticsearchAssertions.assertAllSuccessful(r1); assertThat(r1.getHits().getTotalHits(), equalTo(8L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(1L)); + assertCacheState(client, "index", 0, 1); final SearchResponse r2 = client.prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-28")).get(); - assertSearchResponse(r2); + ElasticsearchAssertions.assertAllSuccessful(r2); assertThat(r2.getHits().getTotalHits(), equalTo(8L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(1L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(1L)); + assertCacheState(client, "index", 1, 1); final SearchResponse r3 = client.prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-28")).get(); - assertSearchResponse(r3); + ElasticsearchAssertions.assertAllSuccessful(r3); assertThat(r3.getHits().getTotalHits(), equalTo(8L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(2L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(1L)); + assertCacheState(client, "index", 2, 1); } public void testQueryRewriteDates() throws Exception { @@ -208,41 +201,36 @@ public class IndicesRequestCacheIT extends ESIntegTestCase { client.prepareIndex("index", "type", "8").setSource("d", "2014-08-01T00:00:00"), client.prepareIndex("index", "type", "9").setSource("d", "2014-09-01T00:00:00")); ensureSearchable("index"); + assertCacheState(client, "index", 0, 0); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(0L)); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); + ElasticsearchAssertions.assertAllSuccessful(forceMergeResponse); + refresh(); + ensureSearchable("index"); + + assertCacheState(client, "index", 0, 0); final SearchResponse r1 = client.prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) .setQuery(QueryBuilders.rangeQuery("d").gte("2013-01-01T00:00:00").lte("now")) .get(); - assertSearchResponse(r1); + ElasticsearchAssertions.assertAllSuccessful(r1); assertThat(r1.getHits().getTotalHits(), equalTo(9L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(1L)); + assertCacheState(client, "index", 0, 1); final SearchResponse r2 = client.prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) .setQuery(QueryBuilders.rangeQuery("d").gte("2013-01-01T00:00:00").lte("now")) .get(); - assertSearchResponse(r2); + ElasticsearchAssertions.assertAllSuccessful(r2); assertThat(r2.getHits().getTotalHits(), equalTo(9L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(1L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(1L)); + assertCacheState(client, "index", 1, 1); final SearchResponse r3 = client.prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) .setQuery(QueryBuilders.rangeQuery("d").gte("2013-01-01T00:00:00").lte("now")) .get(); - assertSearchResponse(r3); + ElasticsearchAssertions.assertAllSuccessful(r3); assertThat(r3.getHits().getTotalHits(), equalTo(9L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(2L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(1L)); + assertCacheState(client, "index", 2, 1); } public void testQueryRewriteDatesWithNow() throws Exception { @@ -266,98 +254,47 @@ public class IndicesRequestCacheIT extends ESIntegTestCase { client.prepareIndex("index-3", "type", "8").setSource("d", now.minusDays(7)), client.prepareIndex("index-3", "type", "9").setSource("d", now.minusDays(8))); ensureSearchable("index-1", "index-2", "index-3"); + assertCacheState(client, "index-1", 0, 0); + assertCacheState(client, "index-2", 0, 0); + assertCacheState(client, "index-3", 0, 0); - assertThat( - client.admin().indices().prepareStats("index-1").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat( - client.admin().indices().prepareStats("index-1").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(0L)); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index-1", "index-2", "index-3").setFlush(true) + .get(); + ElasticsearchAssertions.assertAllSuccessful(forceMergeResponse); + refresh(); + ensureSearchable("index-1", "index-2", "index-3"); - assertThat( - client.admin().indices().prepareStats("index-2").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat( - client.admin().indices().prepareStats("index-2").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(0L)); - assertThat( - client.admin().indices().prepareStats("index-3").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat( - client.admin().indices().prepareStats("index-3").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(0L)); + assertCacheState(client, "index-1", 0, 0); + assertCacheState(client, "index-2", 0, 0); + assertCacheState(client, "index-3", 0, 0); final SearchResponse r1 = client.prepareSearch("index-*").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) .setQuery(QueryBuilders.rangeQuery("d").gte("now-7d/d").lte("now")).get(); - assertSearchResponse(r1); + ElasticsearchAssertions.assertAllSuccessful(r1); assertThat(r1.getHits().getTotalHits(), equalTo(8L)); - assertThat( - client.admin().indices().prepareStats("index-1").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat( - client.admin().indices().prepareStats("index-1").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(1L)); - assertThat( - client.admin().indices().prepareStats("index-2").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat( - client.admin().indices().prepareStats("index-2").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(1L)); + assertCacheState(client, "index-1", 0, 1); + assertCacheState(client, "index-2", 0, 1); // Because the query will INTERSECT with the 3rd index it will not be // rewritten and will still contain `now` so won't be recorded as a // cache miss or cache hit since queries containing now can't be cached - assertThat( - client.admin().indices().prepareStats("index-3").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat( - client.admin().indices().prepareStats("index-3").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(0L)); + assertCacheState(client, "index-3", 0, 0); final SearchResponse r2 = client.prepareSearch("index-*").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) .setQuery(QueryBuilders.rangeQuery("d").gte("now-7d/d").lte("now")).get(); - assertSearchResponse(r2); + ElasticsearchAssertions.assertAllSuccessful(r2); assertThat(r2.getHits().getTotalHits(), equalTo(8L)); - assertThat( - client.admin().indices().prepareStats("index-1").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(1L)); - assertThat( - client.admin().indices().prepareStats("index-1").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(1L)); - assertThat( - client.admin().indices().prepareStats("index-2").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(1L)); - assertThat( - client.admin().indices().prepareStats("index-2").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(1L)); - assertThat( - client.admin().indices().prepareStats("index-3").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat( - client.admin().indices().prepareStats("index-3").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(0L)); + assertCacheState(client, "index-1", 1, 1); + assertCacheState(client, "index-2", 1, 1); + assertCacheState(client, "index-3", 0, 0); final SearchResponse r3 = client.prepareSearch("index-*").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) .setQuery(QueryBuilders.rangeQuery("d").gte("now-7d/d").lte("now")).get(); - assertSearchResponse(r3); + ElasticsearchAssertions.assertAllSuccessful(r3); assertThat(r3.getHits().getTotalHits(), equalTo(8L)); - assertThat( - client.admin().indices().prepareStats("index-1").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(2L)); - assertThat( - client.admin().indices().prepareStats("index-1").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(1L)); - assertThat( - client.admin().indices().prepareStats("index-2").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(2L)); - assertThat( - client.admin().indices().prepareStats("index-2").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(1L)); - assertThat( - client.admin().indices().prepareStats("index-3").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat( - client.admin().indices().prepareStats("index-3").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(0L)); + assertCacheState(client, "index-1", 2, 1); + assertCacheState(client, "index-2", 2, 1); + assertCacheState(client, "index-3", 0, 0); } public void testCanCache() throws Exception { @@ -378,74 +315,60 @@ public class IndicesRequestCacheIT extends ESIntegTestCase { client.prepareIndex("index", "type", "8").setRouting("3").setSource("s", "2016-03-26"), client.prepareIndex("index", "type", "9").setRouting("3").setSource("s", "2016-03-27")); ensureSearchable("index"); + assertCacheState(client, "index", 0, 0); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(0L)); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); + ElasticsearchAssertions.assertAllSuccessful(forceMergeResponse); + refresh(); + ensureSearchable("index"); + + assertCacheState(client, "index", 0, 0); // If size > 0 we should no cache by default final SearchResponse r1 = client.prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(1) .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-25")).get(); - assertSearchResponse(r1); + ElasticsearchAssertions.assertAllSuccessful(r1); assertThat(r1.getHits().getTotalHits(), equalTo(7L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(0L)); + assertCacheState(client, "index", 0, 0); // If search type is DFS_QUERY_THEN_FETCH we should not cache final SearchResponse r2 = client.prepareSearch("index").setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setSize(0) .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")).get(); - assertSearchResponse(r2); + ElasticsearchAssertions.assertAllSuccessful(r2); assertThat(r2.getHits().getTotalHits(), equalTo(7L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(0L)); + assertCacheState(client, "index", 0, 0); // If search type is DFS_QUERY_THEN_FETCH we should not cache even if // the cache flag is explicitly set on the request final SearchResponse r3 = client.prepareSearch("index").setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setSize(0) .setRequestCache(true).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")).get(); - assertSearchResponse(r3); + ElasticsearchAssertions.assertAllSuccessful(r3); assertThat(r3.getHits().getTotalHits(), equalTo(7L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(0L)); + assertCacheState(client, "index", 0, 0); // If the request has an non-filter aggregation containing now we should not cache final SearchResponse r5 = client.prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) .setRequestCache(true).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) .addAggregation(dateRange("foo").field("s").addRange("now-10y", "now")).get(); - assertSearchResponse(r5); + ElasticsearchAssertions.assertAllSuccessful(r5); assertThat(r5.getHits().getTotalHits(), equalTo(7L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(0L)); + assertCacheState(client, "index", 0, 0); // If size > 1 and cache flag is set on the request we should cache final SearchResponse r6 = client.prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(1) .setRequestCache(true).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-21").lte("2016-03-27")).get(); - assertSearchResponse(r6); + ElasticsearchAssertions.assertAllSuccessful(r6); assertThat(r6.getHits().getTotalHits(), equalTo(7L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(2L)); + assertCacheState(client, "index", 0, 2); // If the request has a filter aggregation containing now we should cache since it gets rewritten final SearchResponse r4 = client.prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) .setRequestCache(true).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) .addAggregation(filter("foo", QueryBuilders.rangeQuery("s").from("now-10y").to("now"))).get(); - assertSearchResponse(r4); + ElasticsearchAssertions.assertAllSuccessful(r4); assertThat(r4.getHits().getTotalHits(), equalTo(7L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(4L)); + assertCacheState(client, "index", 0, 4); } public void testCacheWithFilteredAlias() { @@ -460,45 +383,42 @@ public class IndicesRequestCacheIT extends ESIntegTestCase { client.prepareIndex("index", "type", "1").setRouting("1").setSource("created_at", DateTimeFormatter.ISO_LOCAL_DATE.format(now)).get(); refresh(); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); + ElasticsearchAssertions.assertAllSuccessful(forceMergeResponse); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(0L)); + assertCacheState(client, "index", 0, 0); SearchResponse r1 = client.prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) .setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")).get(); - assertSearchResponse(r1); + ElasticsearchAssertions.assertAllSuccessful(r1); assertThat(r1.getHits().getTotalHits(), equalTo(1L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(0L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(1L)); + assertCacheState(client, "index", 0, 1); r1 = client.prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) .setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")).get(); - assertSearchResponse(r1); + ElasticsearchAssertions.assertAllSuccessful(r1); assertThat(r1.getHits().getTotalHits(), equalTo(1L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(1L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(1L)); + assertCacheState(client, "index", 1, 1); r1 = client.prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get(); - assertSearchResponse(r1); + ElasticsearchAssertions.assertAllSuccessful(r1); assertThat(r1.getHits().getTotalHits(), equalTo(1L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(1L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(2L)); + assertCacheState(client, "index", 1, 2); r1 = client.prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get(); - assertSearchResponse(r1); + ElasticsearchAssertions.assertAllSuccessful(r1); assertThat(r1.getHits().getTotalHits(), equalTo(1L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(2L)); - assertThat(client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(2L)); + assertCacheState(client, "index", 2, 2); + } + + private static void assertCacheState(Client client, String index, long expectedHits, long expectedMisses) { + RequestCacheStats requestCacheStats = client.admin().indices().prepareStats(index).setRequestCache(true).get().getTotal() + .getRequestCache(); + // Check the hit count and miss count together so if they are not + // correct we can see both values + assertEquals(Arrays.asList(expectedHits, expectedMisses), + Arrays.asList(requestCacheStats.getHitCount(), requestCacheStats.getMissCount())); } } diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java index 8059c8a1039..4418a7cfb7f 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java @@ -343,7 +343,7 @@ public class IndicesRequestCacheTests extends ESTestCase { try (BytesStreamOutput out = new BytesStreamOutput()) { IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("id", Integer.toString(id))), 1); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); Document document = reader.document(topDocs.scoreDocs[0].doc); out.writeString(document.get("value")); loadedFromCache = false; diff --git a/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java b/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java index 47f30e10ef9..119a74262bf 100644 --- a/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java @@ -136,7 +136,7 @@ public class AnalysisModuleTests extends ESTestCase { .put("index.analysis.analyzer.foobar.type", "standard") .put("index.analysis.analyzer.foobar.alias","foobaz") // analyzer aliases were removed in v5.0.0 alpha6 - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_5_0_0_beta1, null)) + .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, null)) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); AnalysisRegistry registry = getNewRegistry(settings); @@ -149,7 +149,7 @@ public class AnalysisModuleTests extends ESTestCase { Settings settings2 = Settings.builder() .loadFromStream(yaml, getClass().getResourceAsStream(yaml), false) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_0_0) .build(); AnalysisRegistry newRegistry = getNewRegistry(settings2); IndexAnalyzers indexAnalyzers = getIndexAnalyzers(newRegistry, settings2); @@ -162,9 +162,9 @@ public class AnalysisModuleTests extends ESTestCase { // analysis service has the expected version assertThat(indexAnalyzers.get("standard").analyzer(), is(instanceOf(StandardAnalyzer.class))); - assertEquals(Version.V_5_0_0.luceneVersion, + assertEquals(Version.V_6_0_0.luceneVersion, indexAnalyzers.get("standard").analyzer().getVersion()); - assertEquals(Version.V_5_0_0.luceneVersion, + assertEquals(Version.V_6_0_0.luceneVersion, indexAnalyzers.get("stop").analyzer().getVersion()); assertThat(indexAnalyzers.get("custom7").analyzer(), is(instanceOf(StandardAnalyzer.class))); @@ -240,6 +240,35 @@ public class AnalysisModuleTests extends ESTestCase { } } + public void testStandardFilterBWC() throws IOException { + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT.minimumCompatibilityVersion()); + // bwc deprecation + { + Settings settings = Settings.builder() + .put("index.analysis.analyzer.my_standard.tokenizer", "standard") + .put("index.analysis.analyzer.my_standard.filter", "standard") + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put(IndexMetaData.SETTING_VERSION_CREATED, version) + .build(); + IndexAnalyzers analyzers = getIndexAnalyzers(settings); + assertTokenStreamContents(analyzers.get("my_standard").tokenStream("", "test"), new String[]{"test"}); + assertWarnings("The [standard] token filter is deprecated and will be removed in a future version."); + } + // removal + { + final Settings settings = Settings.builder() + .put("index.analysis.analyzer.my_standard.tokenizer", "standard") + .put("index.analysis.analyzer.my_standard.filter", "standard") + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_7_0_0_alpha1) + .build(); + IndexAnalyzers analyzers = getIndexAnalyzers(settings); + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> + analyzers.get("my_standard").tokenStream("", "")); + assertThat(exc.getMessage(), equalTo("The [standard] token filter has been removed.")); + } + } + /** * Tests that plugins can register pre-configured char filters that vary in behavior based on Elasticsearch version, Lucene version, * and that do not vary based on version at all. @@ -376,34 +405,34 @@ public class AnalysisModuleTests extends ESTestCase { } } AnalysisRegistry registry = new AnalysisModule(TestEnvironment.newEnvironment(emptyNodeSettings), - singletonList(new AnalysisPlugin() { - @Override - public List getPreConfiguredTokenizers() { - return Arrays.asList( + singletonList(new AnalysisPlugin() { + @Override + public List getPreConfiguredTokenizers() { + return Arrays.asList( PreConfiguredTokenizer.singleton("no_version", () -> new FixedTokenizer("no_version"), - noVersionSupportsMultiTerm ? () -> AppendTokenFilter.factoryForSuffix("no_version") : null), + noVersionSupportsMultiTerm ? () -> AppendTokenFilter.factoryForSuffix("no_version") : null), PreConfiguredTokenizer.luceneVersion("lucene_version", - luceneVersion -> new FixedTokenizer(luceneVersion.toString()), - luceneVersionSupportsMultiTerm ? - luceneVersion -> AppendTokenFilter.factoryForSuffix(luceneVersion.toString()) : null), + luceneVersion -> new FixedTokenizer(luceneVersion.toString()), + luceneVersionSupportsMultiTerm ? + luceneVersion -> AppendTokenFilter.factoryForSuffix(luceneVersion.toString()) : null), PreConfiguredTokenizer.elasticsearchVersion("elasticsearch_version", - esVersion -> new FixedTokenizer(esVersion.toString()), - elasticsearchVersionSupportsMultiTerm ? - esVersion -> AppendTokenFilter.factoryForSuffix(esVersion.toString()) : null) - ); - } - })).getAnalysisRegistry(); + esVersion -> new FixedTokenizer(esVersion.toString()), + elasticsearchVersionSupportsMultiTerm ? + esVersion -> AppendTokenFilter.factoryForSuffix(esVersion.toString()) : null) + ); + } + })).getAnalysisRegistry(); Version version = VersionUtils.randomVersion(random()); IndexAnalyzers analyzers = getIndexAnalyzers(registry, Settings.builder() - .put("index.analysis.analyzer.no_version.tokenizer", "no_version") - .put("index.analysis.analyzer.lucene_version.tokenizer", "lucene_version") - .put("index.analysis.analyzer.elasticsearch_version.tokenizer", "elasticsearch_version") - .put(IndexMetaData.SETTING_VERSION_CREATED, version) - .build()); - assertTokenStreamContents(analyzers.get("no_version").tokenStream("", "test"), new String[] {"no_version"}); - assertTokenStreamContents(analyzers.get("lucene_version").tokenStream("", "test"), new String[] {version.luceneVersion.toString()}); - assertTokenStreamContents(analyzers.get("elasticsearch_version").tokenStream("", "test"), new String[] {version.toString()}); + .put("index.analysis.analyzer.no_version.tokenizer", "no_version") + .put("index.analysis.analyzer.lucene_version.tokenizer", "lucene_version") + .put("index.analysis.analyzer.elasticsearch_version.tokenizer", "elasticsearch_version") + .put(IndexMetaData.SETTING_VERSION_CREATED, version) + .build()); + assertTokenStreamContents(analyzers.get("no_version").tokenStream("", "test"), new String[]{"no_version"}); + assertTokenStreamContents(analyzers.get("lucene_version").tokenStream("", "test"), new String[]{version.luceneVersion.toString()}); + assertTokenStreamContents(analyzers.get("elasticsearch_version").tokenStream("", "test"), new String[]{version.toString()}); // These are current broken by https://github.com/elastic/elasticsearch/issues/24752 // assertEquals("test" + (noVersionSupportsMultiTerm ? "no_version" : ""), diff --git a/server/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java b/server/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java index 7722795525d..01790a04dc6 100644 --- a/server/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java @@ -24,14 +24,10 @@ import org.apache.lucene.analysis.TokenStream; import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -44,9 +40,10 @@ import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE) public class PreBuiltAnalyzerIntegrationIT extends ESIntegTestCase { + @Override - protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class); + protected boolean forbidPrivateIndexSettings() { + return false; } public void testThatPreBuiltAnalyzersAreNotClosedOnIndexClose() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index 580696264bd..c68e4870aae 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -273,7 +273,7 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC } @Override - public boolean updateMapping(IndexMetaData indexMetaData) throws IOException { + public boolean updateMapping(final IndexMetaData currentIndexMetaData, final IndexMetaData newIndexMetaData) throws IOException { failRandomly(); return false; } diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index a113a929648..ab7c24b8b20 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -46,8 +46,8 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor.ClusterTasksResult; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.EmptyClusterInfoService; import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.cluster.action.shard.ShardStateAction.StartedShardEntry; import org.elasticsearch.cluster.action.shard.ShardStateAction.FailedShardEntry; +import org.elasticsearch.cluster.action.shard.ShardStateAction.StartedShardEntry; import org.elasticsearch.cluster.coordination.JoinTaskExecutor; import org.elasticsearch.cluster.metadata.AliasValidator; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -183,7 +183,7 @@ public class ClusterStateChanges extends AbstractComponent { allocationService, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, indicesService, threadPool); MetaDataCreateIndexService createIndexService = new MetaDataCreateIndexService(settings, clusterService, indicesService, allocationService, new AliasValidator(settings), environment, - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, threadPool, xContentRegistry); + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, threadPool, xContentRegistry, true); transportCloseIndexAction = new TransportCloseIndexAction(settings, transportService, clusterService, threadPool, indexStateService, clusterSettings, actionFilters, indexNameExpressionResolver, destructiveOperations); diff --git a/server/src/test/java/org/elasticsearch/indices/mapping/LegacyUpdateMappingIntegrationIT.java b/server/src/test/java/org/elasticsearch/indices/mapping/LegacyUpdateMappingIntegrationIT.java new file mode 100644 index 00000000000..1bf95f612ce --- /dev/null +++ b/server/src/test/java/org/elasticsearch/indices/mapping/LegacyUpdateMappingIntegrationIT.java @@ -0,0 +1,212 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.mapping; + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.not; + +public class LegacyUpdateMappingIntegrationIT extends ESIntegTestCase { + + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + + @SuppressWarnings("unchecked") + public void testUpdateDefaultMappingSettings() throws Exception { + logger.info("Creating index with _default_ mappings"); + try (XContentBuilder defaultMapping = JsonXContent.contentBuilder()) { + defaultMapping.startObject(); + { + defaultMapping.startObject(MapperService.DEFAULT_MAPPING); + { + defaultMapping.field("date_detection", false); + } + defaultMapping.endObject(); + } + defaultMapping.endObject(); + client() + .admin() + .indices() + .prepareCreate("test") + .setSettings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0).build()) + .addMapping(MapperService.DEFAULT_MAPPING, defaultMapping) + .get(); + } + + { + final GetMappingsResponse getResponse = + client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get(); + final Map defaultMapping = + getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap(); + assertThat(defaultMapping, hasKey("date_detection")); + } + + logger.info("Emptying _default_ mappings"); + // now remove it + try (XContentBuilder mappingBuilder = + JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING).endObject().endObject()) { + final AcknowledgedResponse putResponse = + client() + .admin() + .indices() + .preparePutMapping("test") + .setType(MapperService.DEFAULT_MAPPING) + .setSource(mappingBuilder) + .get(); + assertThat(putResponse.isAcknowledged(), equalTo(true)); + } + logger.info("Done Emptying _default_ mappings"); + + { + final GetMappingsResponse getResponse = + client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get(); + final Map defaultMapping = + getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap(); + assertThat(defaultMapping, not(hasKey("date_detection"))); + } + + // now test you can change stuff that are normally unchangeable + logger.info("Creating _default_ mappings with an analyzed field"); + try (XContentBuilder defaultMapping = JsonXContent.contentBuilder()) { + + defaultMapping.startObject(); + { + defaultMapping.startObject(MapperService.DEFAULT_MAPPING); + { + defaultMapping.startObject("properties"); + { + defaultMapping.startObject("f"); + { + defaultMapping.field("type", "text"); + defaultMapping.field("index", true); + } + defaultMapping.endObject(); + } + defaultMapping.endObject(); + } + defaultMapping.endObject(); + } + defaultMapping.endObject(); + + final AcknowledgedResponse putResponse = + client() + .admin() + .indices() + .preparePutMapping("test") + .setType(MapperService.DEFAULT_MAPPING).setSource(defaultMapping) + .get(); + assertThat(putResponse.isAcknowledged(), equalTo(true)); + } + + logger.info("Changing _default_ mappings field from analyzed to non-analyzed"); + { + try (XContentBuilder mappingBuilder = JsonXContent.contentBuilder()) { + mappingBuilder.startObject(); + { + mappingBuilder.startObject(MapperService.DEFAULT_MAPPING); + { + mappingBuilder.startObject("properties"); + { + mappingBuilder.startObject("f"); + { + mappingBuilder.field("type", "keyword"); + } + mappingBuilder.endObject(); + } + mappingBuilder.endObject(); + } + mappingBuilder.endObject(); + } + mappingBuilder.endObject(); + + final AcknowledgedResponse putResponse = + client() + .admin() + .indices() + .preparePutMapping("test") + .setType(MapperService.DEFAULT_MAPPING) + .setSource(mappingBuilder) + .get(); + assertThat(putResponse.isAcknowledged(), equalTo(true)); + } + } + logger.info("Done changing _default_ mappings field from analyzed to non-analyzed"); + + { + final GetMappingsResponse getResponse = + client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get(); + final Map defaultMapping = + getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap(); + final Map fieldSettings = (Map) ((Map) defaultMapping.get("properties")).get("f"); + assertThat(fieldSettings, hasEntry("type", "keyword")); + } + + // but we still validate the _default_ type + logger.info("Confirming _default_ mappings validation"); + try (XContentBuilder mappingBuilder = JsonXContent.contentBuilder()) { + + mappingBuilder.startObject(); + { + mappingBuilder.startObject(MapperService.DEFAULT_MAPPING); + { + mappingBuilder.startObject("properites"); + { + mappingBuilder.startObject("f"); + { + mappingBuilder.field("type", "non-existent"); + } + mappingBuilder.endObject(); + } + mappingBuilder.endObject(); + } + mappingBuilder.endObject(); + } + mappingBuilder.endObject(); + + expectThrows( + MapperParsingException.class, + () -> client() + .admin() + .indices() + .preparePutMapping("test") + .setType(MapperService.DEFAULT_MAPPING) + .setSource(mappingBuilder) + .get()); + } + + } + +} diff --git a/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index f28d253087d..af9bf9910ec 100644 --- a/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -19,21 +19,18 @@ package org.elasticsearch.indices.mapping; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -56,12 +53,8 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WR import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasEntry; -import static org.hamcrest.Matchers.hasKey; -import static org.hamcrest.Matchers.not; public class UpdateMappingIntegrationIT extends ESIntegTestCase { @@ -200,69 +193,6 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase { assertThat(putMappingResponse.isAcknowledged(), equalTo(true)); } - @SuppressWarnings("unchecked") - public void testUpdateDefaultMappingSettings() throws Exception { - logger.info("Creating index with _default_ mappings"); - client().admin().indices().prepareCreate("test") - .setSettings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0).build()) - .addMapping(MapperService.DEFAULT_MAPPING, - JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) - .field("date_detection", false) - .endObject().endObject() - ).get(); - - GetMappingsResponse getResponse = client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get(); - Map defaultMapping = getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap(); - assertThat(defaultMapping, hasKey("date_detection")); - - - logger.info("Emptying _default_ mappings"); - // now remove it - AcknowledgedResponse putResponse = client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource( - JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) - .endObject().endObject() - ).get(); - assertThat(putResponse.isAcknowledged(), equalTo(true)); - logger.info("Done Emptying _default_ mappings"); - - getResponse = client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get(); - defaultMapping = getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap(); - assertThat(defaultMapping, not(hasKey("date_detection"))); - - // now test you can change stuff that are normally unchangeable - logger.info("Creating _default_ mappings with an analyzed field"); - putResponse = client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource( - JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) - .startObject("properties").startObject("f").field("type", "text").field("index", true).endObject().endObject() - .endObject().endObject() - ).get(); - assertThat(putResponse.isAcknowledged(), equalTo(true)); - - - logger.info("Changing _default_ mappings field from analyzed to non-analyzed"); - putResponse = client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource( - JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) - .startObject("properties").startObject("f").field("type", "keyword").endObject().endObject() - .endObject().endObject() - ).get(); - assertThat(putResponse.isAcknowledged(), equalTo(true)); - logger.info("Done changing _default_ mappings field from analyzed to non-analyzed"); - - getResponse = client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get(); - defaultMapping = getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap(); - Map fieldSettings = (Map) ((Map) defaultMapping.get("properties")).get("f"); - assertThat(fieldSettings, hasEntry("type", "keyword")); - - // but we still validate the _default_ type - logger.info("Confirming _default_ mappings validation"); - assertThrows(client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource( - JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) - .startObject("properties").startObject("f").field("type", "DOESNT_EXIST").endObject().endObject() - .endObject().endObject() - ), MapperParsingException.class); - - } - public void testUpdateMappingConcurrently() throws Throwable { createIndex("test1", "test2"); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 89a8813e3e0..6a6970675eb 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -36,7 +36,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource; -import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; @@ -67,6 +66,7 @@ import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; +import org.junit.After; import java.io.IOException; import java.util.ArrayList; @@ -110,6 +110,11 @@ public class IndexRecoveryIT extends ESIntegTestCase { RecoverySettingsChunkSizePlugin.class); } + @After + public void assertConsistentHistoryInLuceneIndex() throws Exception { + internalCluster().assertConsistentHistoryBetweenTranslogAndLuceneIndex(); + } + private void assertRecoveryStateWithoutStage(RecoveryState state, int shardId, RecoverySource recoverySource, boolean primary, String sourceNode, String targetNode) { assertThat(state.getShardId().getId(), equalTo(shardId)); @@ -180,7 +185,7 @@ public class IndexRecoveryIT extends ESIntegTestCase { RecoveryState recoveryState = recoveryStates.get(0); - assertRecoveryState(recoveryState, 0, StoreRecoverySource.EXISTING_STORE_INSTANCE, true, Stage.DONE, null, node); + assertRecoveryState(recoveryState, 0, RecoverySource.ExistingStoreRecoverySource.INSTANCE, true, Stage.DONE, null, node); validateIndexRecoveryState(recoveryState.getIndex()); } @@ -233,7 +238,7 @@ public class IndexRecoveryIT extends ESIntegTestCase { // validate node A recovery RecoveryState nodeARecoveryState = nodeAResponses.get(0); - assertRecoveryState(nodeARecoveryState, 0, StoreRecoverySource.EMPTY_STORE_INSTANCE, true, Stage.DONE, null, nodeA); + assertRecoveryState(nodeARecoveryState, 0, RecoverySource.EmptyStoreRecoverySource.INSTANCE, true, Stage.DONE, null, nodeA); validateIndexRecoveryState(nodeARecoveryState.getIndex()); // validate node B recovery @@ -242,6 +247,7 @@ public class IndexRecoveryIT extends ESIntegTestCase { validateIndexRecoveryState(nodeBRecoveryState.getIndex()); } + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/32686") @TestLogging( "_root:DEBUG," + "org.elasticsearch.cluster.service:TRACE," @@ -288,7 +294,7 @@ public class IndexRecoveryIT extends ESIntegTestCase { List nodeBRecoveryStates = findRecoveriesForTargetNode(nodeB, recoveryStates); assertThat(nodeBRecoveryStates.size(), equalTo(1)); - assertRecoveryState(nodeARecoveryStates.get(0), 0, StoreRecoverySource.EMPTY_STORE_INSTANCE, true, Stage.DONE, null, nodeA); + assertRecoveryState(nodeARecoveryStates.get(0), 0, RecoverySource.EmptyStoreRecoverySource.INSTANCE, true, Stage.DONE, null, nodeA); validateIndexRecoveryState(nodeARecoveryStates.get(0).getIndex()); assertOnGoingRecoveryState(nodeBRecoveryStates.get(0), 0, PeerRecoverySource.INSTANCE, true, nodeA, nodeB); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index 4b1419375e6..b6f5a7b6451 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.NoMergePolicy; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; @@ -91,6 +92,7 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase { replica.close("test", false); final List commits = DirectoryReader.listCommits(replica.store().directory()); IndexWriterConfig iwc = new IndexWriterConfig(null) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setCommitOnClose(false) .setMergePolicy(NoMergePolicy.INSTANCE) .setOpenMode(IndexWriterConfig.OpenMode.APPEND); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index f0644b029c3..0f7a72aacf3 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -63,7 +63,6 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardRelocatedException; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.index.translog.Translog; @@ -108,7 +107,7 @@ public class RecoverySourceHandlerTests extends ESTestCase { final StartRecoveryRequest request = getStartRecoveryRequest(); Store store = newStore(createTempDir()); RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request, - recoverySettings.getChunkSize().bytesAsInt(), Settings.EMPTY); + recoverySettings.getChunkSize().bytesAsInt()); Directory dir = store.directory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig()); int numDocs = randomIntBetween(10, 100); @@ -174,7 +173,7 @@ public class RecoverySourceHandlerTests extends ESTestCase { when(shard.state()).thenReturn(IndexShardState.STARTED); final RecoveryTargetHandler recoveryTarget = mock(RecoveryTargetHandler.class); final RecoverySourceHandler handler = - new RecoverySourceHandler(shard, recoveryTarget, request, fileChunkSizeInBytes, Settings.EMPTY); + new RecoverySourceHandler(shard, recoveryTarget, request, fileChunkSizeInBytes); final List operations = new ArrayList<>(); final int initialNumberOfDocs = randomIntBetween(16, 64); for (int i = 0; i < initialNumberOfDocs; i++) { @@ -281,7 +280,7 @@ public class RecoverySourceHandlerTests extends ESTestCase { Store store = newStore(tempDir, false); AtomicBoolean failedEngine = new AtomicBoolean(false); RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request, - recoverySettings.getChunkSize().bytesAsInt(), Settings.EMPTY) { + recoverySettings.getChunkSize().bytesAsInt()) { @Override protected void failEngine(IOException cause) { assertFalse(failedEngine.get()); @@ -340,7 +339,7 @@ public class RecoverySourceHandlerTests extends ESTestCase { Store store = newStore(tempDir, false); AtomicBoolean failedEngine = new AtomicBoolean(false); RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request, - recoverySettings.getChunkSize().bytesAsInt(), Settings.EMPTY) { + recoverySettings.getChunkSize().bytesAsInt()) { @Override protected void failEngine(IOException cause) { assertFalse(failedEngine.get()); @@ -405,17 +404,10 @@ public class RecoverySourceHandlerTests extends ESTestCase { final AtomicBoolean prepareTargetForTranslogCalled = new AtomicBoolean(); final AtomicBoolean phase2Called = new AtomicBoolean(); final RecoverySourceHandler handler = new RecoverySourceHandler( - shard, - mock(RecoveryTargetHandler.class), - request, - recoverySettings.getChunkSize().bytesAsInt(), - Settings.EMPTY) { - - - @Override - boolean isTranslogReadyForSequenceNumberBasedRecovery() throws IOException { - return randomBoolean(); - } + shard, + mock(RecoveryTargetHandler.class), + request, + recoverySettings.getChunkSize().bytesAsInt()) { @Override public void phase1(final IndexCommit snapshot, final Supplier translogOps) { @@ -468,18 +460,11 @@ public class RecoverySourceHandlerTests extends ESTestCase { return newStore(path, true); } private Store newStore(Path path, boolean checkIndex) throws IOException { - DirectoryService directoryService = new DirectoryService(shardId, INDEX_SETTINGS) { - - @Override - public Directory newDirectory() throws IOException { - BaseDirectoryWrapper baseDirectoryWrapper = RecoverySourceHandlerTests.newFSDirectory(path); - if (checkIndex == false) { - baseDirectoryWrapper.setCheckIndexOnClose(false); // don't run checkindex we might corrupt the index in these tests - } - return baseDirectoryWrapper; - } - }; - return new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId)); + BaseDirectoryWrapper baseDirectoryWrapper = RecoverySourceHandlerTests.newFSDirectory(path); + if (checkIndex == false) { + baseDirectoryWrapper.setCheckIndexOnClose(false); // don't run checkindex we might corrupt the index in these tests + } + return new Store(shardId, INDEX_SETTINGS, baseDirectoryWrapper, new DummyShardLock(shardId)); } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 0f663eca75d..45535e19672 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.SourceToParse; @@ -63,17 +64,16 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase { int docs = shards.indexDocs(10); getTranslog(shards.getPrimary()).rollGeneration(); shards.flush(); - if (randomBoolean()) { - docs += shards.indexDocs(10); - } + int moreDocs = shards.indexDocs(randomInt(10)); shards.addReplica(); shards.startAll(); final IndexShard replica = shards.getReplicas().get(0); - assertThat(replica.estimateTranslogOperationsFromMinSeq(0), equalTo(docs)); + boolean softDeletesEnabled = replica.indexSettings().isSoftDeleteEnabled(); + assertThat(getTranslog(replica).totalOperations(), equalTo(softDeletesEnabled ? moreDocs : docs + moreDocs)); + shards.assertAllEqual(docs + moreDocs); } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32089") public void testRetentionPolicyChangeDuringRecovery() throws Exception { try (ReplicationGroup shards = createGroup(0)) { shards.startPrimary(); @@ -102,12 +102,12 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase { // rolling/flushing is async assertBusy(() -> { assertThat(replica.getLastSyncedGlobalCheckpoint(), equalTo(19L)); - assertThat(replica.estimateTranslogOperationsFromMinSeq(0), equalTo(0)); + assertThat(getTranslog(replica).totalOperations(), equalTo(0)); }); } } - public void testRecoveryWithOutOfOrderDelete() throws Exception { + public void testRecoveryWithOutOfOrderDeleteWithTranslog() throws Exception { /* * The flow of this test: * - delete #1 @@ -119,7 +119,8 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase { * - index #5 * - If flush and the translog retention disabled, delete #1 will be removed while index #0 is still retained and replayed. */ - try (ReplicationGroup shards = createGroup(1)) { + Settings settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false).build(); + try (ReplicationGroup shards = createGroup(1, settings)) { shards.startAll(); // create out of order delete and index op on replica final IndexShard orgReplica = shards.getReplicas().get(0); @@ -171,7 +172,63 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase { shards.recoverReplica(newReplica); shards.assertAllEqual(3); - assertThat(newReplica.estimateTranslogOperationsFromMinSeq(0), equalTo(translogOps)); + assertThat(getTranslog(newReplica).totalOperations(), equalTo(translogOps)); + } + } + + public void testRecoveryWithOutOfOrderDeleteWithSoftDeletes() throws Exception { + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 10) + // If soft-deletes is enabled, delete#1 will be reclaimed because its segment (segment_1) is fully deleted + // index#0 will be retained if merge is disabled; otherwise it will be reclaimed because gcp=3 and retained_ops=0 + .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).build(); + try (ReplicationGroup shards = createGroup(1, settings)) { + shards.startAll(); + // create out of order delete and index op on replica + final IndexShard orgReplica = shards.getReplicas().get(0); + final String indexName = orgReplica.shardId().getIndexName(); + + // delete #1 + orgReplica.applyDeleteOperationOnReplica(1, 2, "type", "id"); + orgReplica.flush(new FlushRequest().force(true)); // isolate delete#1 in its own translog generation and lucene segment + // index #0 + orgReplica.applyIndexOperationOnReplica(0, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, + SourceToParse.source(indexName, "type", "id", new BytesArray("{}"), XContentType.JSON)); + // index #3 + orgReplica.applyIndexOperationOnReplica(3, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, + SourceToParse.source(indexName, "type", "id-3", new BytesArray("{}"), XContentType.JSON)); + // Flushing a new commit with local checkpoint=1 allows to delete the translog gen #1. + orgReplica.flush(new FlushRequest().force(true).waitIfOngoing(true)); + // index #2 + orgReplica.applyIndexOperationOnReplica(2, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, + SourceToParse.source(indexName, "type", "id-2", new BytesArray("{}"), XContentType.JSON)); + orgReplica.updateGlobalCheckpointOnReplica(3L, "test"); + // index #5 -> force NoOp #4. + orgReplica.applyIndexOperationOnReplica(5, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, + SourceToParse.source(indexName, "type", "id-5", new BytesArray("{}"), XContentType.JSON)); + + if (randomBoolean()) { + if (randomBoolean()) { + logger.info("--> flushing shard (translog/soft-deletes will be trimmed)"); + IndexMetaData.Builder builder = IndexMetaData.builder(orgReplica.indexSettings().getIndexMetaData()); + builder.settings(Settings.builder().put(orgReplica.indexSettings().getSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0)); + orgReplica.indexSettings().updateIndexMetaData(builder.build()); + orgReplica.onSettingsChanged(); + } + flushShard(orgReplica); + } + + final IndexShard orgPrimary = shards.getPrimary(); + shards.promoteReplicaToPrimary(orgReplica).get(); // wait for primary/replica sync to make sure seq# gap is closed. + + IndexShard newReplica = shards.addReplicaWithExistingPath(orgPrimary.shardPath(), orgPrimary.routingEntry().currentNodeId()); + shards.recoverReplica(newReplica); + shards.assertAllEqual(3); + try (Translog.Snapshot snapshot = newReplica.getHistoryOperations("test", 0)) { + assertThat(snapshot, SnapshotMatchers.size(6)); + } } } @@ -223,7 +280,8 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase { shards.recoverReplica(newReplica); // file based recovery should be made assertThat(newReplica.recoveryState().getIndex().fileDetails(), not(empty())); - assertThat(newReplica.estimateTranslogOperationsFromMinSeq(0), equalTo(numDocs)); + boolean softDeletesEnabled = replica.indexSettings().isSoftDeleteEnabled(); + assertThat(getTranslog(newReplica).totalOperations(), equalTo(softDeletesEnabled ? nonFlushedDocs : numDocs)); // history uuid was restored assertThat(newReplica.getHistoryUUID(), equalTo(historyUUID)); @@ -327,7 +385,8 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase { shards.recoverReplica(replica); // Make sure the flushing will eventually be completed (eg. `shouldPeriodicallyFlush` is false) assertBusy(() -> assertThat(getEngine(replica).shouldPeriodicallyFlush(), equalTo(false))); - assertThat(replica.estimateTranslogOperationsFromMinSeq(0), equalTo(numDocs)); + boolean softDeletesEnabled = replica.indexSettings().isSoftDeleteEnabled(); + assertThat(getTranslog(replica).totalOperations(), equalTo(softDeletesEnabled ? 0 : numDocs)); shards.assertAllEqual(numDocs); } } diff --git a/server/src/test/java/org/elasticsearch/indices/settings/InternalOrPrivateSettingsPlugin.java b/server/src/test/java/org/elasticsearch/indices/settings/InternalOrPrivateSettingsPlugin.java new file mode 100644 index 00000000000..8792232b381 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/indices/settings/InternalOrPrivateSettingsPlugin.java @@ -0,0 +1,201 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.settings; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.MasterNodeRequest; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +public class InternalOrPrivateSettingsPlugin extends Plugin implements ActionPlugin { + + static final Setting INDEX_INTERNAL_SETTING = + Setting.simpleString("index.internal", Setting.Property.IndexScope, Setting.Property.InternalIndex); + + static final Setting INDEX_PRIVATE_SETTING = + Setting.simpleString("index.private", Setting.Property.IndexScope, Setting.Property.PrivateIndex); + + @Override + public List> getSettings() { + return Arrays.asList(INDEX_INTERNAL_SETTING, INDEX_PRIVATE_SETTING); + } + + public static class UpdateInternalOrPrivateAction extends Action { + + static final UpdateInternalOrPrivateAction INSTANCE = new UpdateInternalOrPrivateAction(); + private static final String NAME = "indices:admin/settings/update-internal-or-private-index"; + + public UpdateInternalOrPrivateAction() { + super(NAME); + } + + static class Request extends MasterNodeRequest { + + private String index; + private String key; + private String value; + + Request() { + + } + + Request(final String index, final String key, final String value) { + this.index = index; + this.key = key; + this.value = value; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(final StreamInput in) throws IOException { + super.readFrom(in); + index = in.readString(); + key = in.readString(); + value = in.readString(); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(index); + out.writeString(key); + out.writeString(value); + } + + } + + static class Response extends ActionResponse { + + } + + @Override + public UpdateInternalOrPrivateAction.Response newResponse() { + return new UpdateInternalOrPrivateAction.Response(); + } + + } + + public static class TransportUpdateInternalOrPrivateAction + extends TransportMasterNodeAction { + + @Inject + public TransportUpdateInternalOrPrivateAction( + final Settings settings, + final TransportService transportService, + final ClusterService clusterService, + final ThreadPool threadPool, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver) { + super( + settings, + UpdateInternalOrPrivateAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + indexNameExpressionResolver, + UpdateInternalOrPrivateAction.Request::new); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected UpdateInternalOrPrivateAction.Response newResponse() { + return new UpdateInternalOrPrivateAction.Response(); + } + + @Override + protected void masterOperation( + final UpdateInternalOrPrivateAction.Request request, + final ClusterState state, + final ActionListener listener) throws Exception { + clusterService.submitStateUpdateTask("update-index-internal-or-private", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(final ClusterState currentState) throws Exception { + final MetaData.Builder builder = MetaData.builder(currentState.metaData()); + final IndexMetaData.Builder imdBuilder = IndexMetaData.builder(currentState.metaData().index(request.index)); + final Settings.Builder settingsBuilder = + Settings.builder() + .put(currentState.metaData().index(request.index).getSettings()) + .put(request.key, request.value); + imdBuilder.settings(settingsBuilder); + builder.put(imdBuilder.build(), true); + return ClusterState.builder(currentState).metaData(builder).build(); + } + + @Override + public void clusterStateProcessed(final String source, final ClusterState oldState, final ClusterState newState) { + listener.onResponse(new UpdateInternalOrPrivateAction.Response()); + } + + @Override + public void onFailure(final String source, final Exception e) { + listener.onFailure(e); + } + + }); + } + + @Override + protected ClusterBlockException checkBlock(final UpdateInternalOrPrivateAction.Request request, final ClusterState state) { + return null; + } + + } + + @Override + public List> getActions() { + return Collections.singletonList( + new ActionHandler<>(UpdateInternalOrPrivateAction.INSTANCE, TransportUpdateInternalOrPrivateAction.class)); + } + +} diff --git a/server/src/test/java/org/elasticsearch/indices/settings/InternalSettingsIT.java b/server/src/test/java/org/elasticsearch/indices/settings/InternalSettingsIT.java new file mode 100644 index 00000000000..1d11fbc79fc --- /dev/null +++ b/server/src/test/java/org/elasticsearch/indices/settings/InternalSettingsIT.java @@ -0,0 +1,84 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.settings; + +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Collection; +import java.util.Collections; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; + +public class InternalSettingsIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Collections.singleton(InternalOrPrivateSettingsPlugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return Collections.singletonList(InternalOrPrivateSettingsPlugin.class); + } + + public void testSetInternalIndexSettingOnCreate() { + final Settings settings = Settings.builder().put("index.internal", "internal").build(); + createIndex("index", settings); + final GetSettingsResponse response = client().admin().indices().prepareGetSettings("index").get(); + assertThat(response.getSetting("index", "index.internal"), equalTo("internal")); + } + + public void testUpdateInternalIndexSettingViaSettingsAPI() { + final Settings settings = Settings.builder().put("index.internal", "internal").build(); + createIndex("test", settings); + final GetSettingsResponse response = client().admin().indices().prepareGetSettings("test").get(); + assertThat(response.getSetting("test", "index.internal"), equalTo("internal")); + // we can not update the setting via the update settings API + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put("index.internal", "internal-update")) + .get()); + final String message = "can not update internal setting [index.internal]; this setting is managed via a dedicated API"; + assertThat(e, hasToString(containsString(message))); + final GetSettingsResponse responseAfterAttemptedUpdate = client().admin().indices().prepareGetSettings("test").get(); + assertThat(responseAfterAttemptedUpdate.getSetting("test", "index.internal"), equalTo("internal")); + } + + public void testUpdateInternalIndexSettingViaDedicatedAPI() { + final Settings settings = Settings.builder().put("index.internal", "internal").build(); + createIndex("test", settings); + final GetSettingsResponse response = client().admin().indices().prepareGetSettings("test").get(); + assertThat(response.getSetting("test", "index.internal"), equalTo("internal")); + client().execute( + InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, + new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request("test", "index.internal", "internal-update")) + .actionGet(); + final GetSettingsResponse responseAfterUpdate = client().admin().indices().prepareGetSettings("test").get(); + assertThat(responseAfterUpdate.getSetting("test", "index.internal"), equalTo("internal-update")); + } + +} diff --git a/server/src/test/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java b/server/src/test/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java new file mode 100644 index 00000000000..08f45eac5be --- /dev/null +++ b/server/src/test/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.settings; + +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Collection; +import java.util.Collections; + +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.instanceOf; + +public class PrivateSettingsIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(InternalOrPrivateSettingsPlugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return Collections.singletonList(InternalOrPrivateSettingsPlugin.class); + } + + public void testSetPrivateIndexSettingOnCreate() { + final Settings settings = Settings.builder().put("index.private", "private").build(); + final Exception e = expectThrows(Exception.class, () -> createIndex("index", settings)); + assertThat(e, anyOf(instanceOf(IllegalArgumentException.class), instanceOf(ValidationException.class))); + assertThat(e, hasToString(containsString("private index setting [index.private] can not be set explicitly"))); + } + + public void testUpdatePrivateIndexSettingViaSettingsAPI() { + createIndex("test"); + // we can not update the setting via the update settings API + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put("index.private", "private-update")) + .get()); + final String message = "can not update private setting [index.private]; this setting is managed by Elasticsearch"; + assertThat(e, hasToString(containsString(message))); + final GetSettingsResponse responseAfterAttemptedUpdate = client().admin().indices().prepareGetSettings("test").get(); + assertNull(responseAfterAttemptedUpdate.getSetting("test", "index.private")); + } + + public void testUpdatePrivatelIndexSettingViaDedicatedAPI() { + createIndex("test"); + client().execute( + InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, + new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request("test", "index.private", "private-update")) + .actionGet(); + final GetSettingsResponse responseAfterUpdate = client().admin().indices().prepareGetSettings("test").get(); + assertThat(responseAfterUpdate.getSetting("test", "index.private"), equalTo("private-update")); + } + +} diff --git a/server/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java b/server/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java index 069f965ac6b..33e9af91501 100644 --- a/server/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java @@ -19,40 +19,19 @@ package org.elasticsearch.indices.settings; -import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.MasterNodeRequest; -import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; -import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; -import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -67,7 +46,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBloc import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.nullValue; public class UpdateSettingsIT extends ESIntegTestCase { @@ -101,12 +79,8 @@ public class UpdateSettingsIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(DummySettingPlugin.class, FinalSettingPlugin.class, InternalIndexSettingsPlugin.class); - } - - @Override - protected Collection> transportClientPlugins() { - return Collections.singletonList(InternalIndexSettingsPlugin.class); + return Arrays.asList( + DummySettingPlugin.class, FinalSettingPlugin.class); } public static class DummySettingPlugin extends Plugin { @@ -151,167 +125,22 @@ public class UpdateSettingsIT extends ESIntegTestCase { } } - public static class InternalIndexSettingsPlugin extends Plugin implements ActionPlugin { - - public static final Setting INDEX_INTERNAL_SETTING = - Setting.simpleString("index.internal", Setting.Property.IndexScope, Setting.Property.InternalIndex); - - @Override - public List> getSettings() { - return Collections.singletonList(INDEX_INTERNAL_SETTING); - } - - public static class UpdateInternalIndexAction - extends Action { - - private static final UpdateInternalIndexAction INSTANCE = new UpdateInternalIndexAction(); - private static final String NAME = "indices:admin/settings/update-internal-index"; - - public UpdateInternalIndexAction() { - super(NAME); - } - - static class Request extends MasterNodeRequest { - - private String index; - private String value; - - Request() { - - } - - Request(final String index, final String value) { - this.index = index; - this.value = value; - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void readFrom(final StreamInput in) throws IOException { - super.readFrom(in); - index = in.readString(); - value = in.readString(); - } - - @Override - public void writeTo(final StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(index); - out.writeString(value); - } - - } - - static class Response extends ActionResponse { - - } - - @Override - public Response newResponse() { - return new Response(); - } - - } - - public static class TransportUpdateInternalIndexAction - extends TransportMasterNodeAction { - - @Inject - public TransportUpdateInternalIndexAction( - final Settings settings, - final TransportService transportService, - final ClusterService clusterService, - final ThreadPool threadPool, - final ActionFilters actionFilters, - final IndexNameExpressionResolver indexNameExpressionResolver) { - super( - settings, - UpdateInternalIndexAction.NAME, - transportService, - clusterService, - threadPool, - actionFilters, - indexNameExpressionResolver, - UpdateInternalIndexAction.Request::new); - } - - @Override - protected String executor() { - return ThreadPool.Names.SAME; - } - - @Override - protected UpdateInternalIndexAction.Response newResponse() { - return new UpdateInternalIndexAction.Response(); - } - - @Override - protected void masterOperation( - final UpdateInternalIndexAction.Request request, - final ClusterState state, - final ActionListener listener) throws Exception { - clusterService.submitStateUpdateTask("update-index-internal", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(final ClusterState currentState) throws Exception { - final MetaData.Builder builder = MetaData.builder(currentState.metaData()); - final IndexMetaData.Builder imdBuilder = IndexMetaData.builder(currentState.metaData().index(request.index)); - final Settings.Builder settingsBuilder = - Settings.builder() - .put(currentState.metaData().index(request.index).getSettings()) - .put("index.internal", request.value); - imdBuilder.settings(settingsBuilder); - builder.put(imdBuilder.build(), true); - return ClusterState.builder(currentState).metaData(builder).build(); - } - - @Override - public void clusterStateProcessed(final String source, final ClusterState oldState, final ClusterState newState) { - listener.onResponse(new UpdateInternalIndexAction.Response()); - } - - @Override - public void onFailure(final String source, final Exception e) { - listener.onFailure(e); - } - - }); - } - - @Override - protected ClusterBlockException checkBlock(UpdateInternalIndexAction.Request request, ClusterState state) { - return null; - } - - } - - @Override - public List> getActions() { - return Collections.singletonList( - new ActionHandler<>(UpdateInternalIndexAction.INSTANCE, TransportUpdateInternalIndexAction.class)); - } - - } - public void testUpdateDependentClusterSettings() { IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder() .put("cluster.acc.test.pw", "asdf")).get()); - assertEquals("Missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); + assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); iae = expectThrows(IllegalArgumentException.class, () -> client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() .put("cluster.acc.test.pw", "asdf")).get()); - assertEquals("Missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); + assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); iae = expectThrows(IllegalArgumentException.class, () -> client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() .put("cluster.acc.test.pw", "asdf")).setPersistentSettings(Settings.builder() .put("cluster.acc.test.user", "asdf")).get()); - assertEquals("Missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); + assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); if (randomBoolean()) { client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() @@ -320,7 +149,7 @@ public class UpdateSettingsIT extends ESIntegTestCase { iae = expectThrows(IllegalArgumentException.class, () -> client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() .putNull("cluster.acc.test.user")).get()); - assertEquals("Missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); + assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() .putNull("cluster.acc.test.pw") .putNull("cluster.acc.test.user")).get(); @@ -332,7 +161,7 @@ public class UpdateSettingsIT extends ESIntegTestCase { iae = expectThrows(IllegalArgumentException.class, () -> client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder() .putNull("cluster.acc.test.user")).get()); - assertEquals("Missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); + assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder() .putNull("cluster.acc.test.pw") @@ -344,7 +173,7 @@ public class UpdateSettingsIT extends ESIntegTestCase { public void testUpdateDependentIndexSettings() { IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> prepareCreate("test", Settings.builder().put("index.acc.test.pw", "asdf")).get()); - assertEquals("Missing required setting [index.acc.test.user] for setting [index.acc.test.pw]", iae.getMessage()); + assertEquals("missing required setting [index.acc.test.user] for setting [index.acc.test.pw]", iae.getMessage()); createIndex("test"); for (int i = 0; i < 2; i++) { @@ -363,7 +192,7 @@ public class UpdateSettingsIT extends ESIntegTestCase { .put("index.acc.test.pw", "asdf")) .execute() .actionGet()); - assertEquals("Missing required setting [index.acc.test.user] for setting [index.acc.test.pw]", iae.getMessage()); + assertEquals("missing required setting [index.acc.test.user] for setting [index.acc.test.pw]", iae.getMessage()); // user has no dependency client() @@ -398,7 +227,7 @@ public class UpdateSettingsIT extends ESIntegTestCase { .putNull("index.acc.test.user")) .execute() .actionGet()); - assertEquals("Missing required setting [index.acc.test.user] for setting [index.acc.test.pw]", iae.getMessage()); + assertEquals("missing required setting [index.acc.test.user] for setting [index.acc.test.pw]", iae.getMessage()); // now we are consistent client() @@ -646,35 +475,4 @@ public class UpdateSettingsIT extends ESIntegTestCase { } } - public void testUpdateInternalIndexSettingViaSettingsAPI() { - final Settings settings = Settings.builder().put("index.internal", "internal").build(); - createIndex("test", settings); - final GetSettingsResponse response = client().admin().indices().prepareGetSettings("test").get(); - assertThat(response.getSetting("test", "index.internal"), equalTo("internal")); - // we can not update the setting via the update settings API - final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> client().admin() - .indices() - .prepareUpdateSettings("test") - .setSettings(Settings.builder().put("index.internal", "internal-update")) - .get()); - final String message = "can not update internal setting [index.internal]; this setting is managed via a dedicated API"; - assertThat(e, hasToString(containsString(message))); - final GetSettingsResponse responseAfterAttemptedUpdate = client().admin().indices().prepareGetSettings("test").get(); - assertThat(responseAfterAttemptedUpdate.getSetting("test", "index.internal"), equalTo("internal")); - } - - public void testUpdateInternalIndexSettingViaDedicatedAPI() { - final Settings settings = Settings.builder().put("index.internal", "internal").build(); - createIndex("test", settings); - final GetSettingsResponse response = client().admin().indices().prepareGetSettings("test").get(); - assertThat(response.getSetting("test", "index.internal"), equalTo("internal")); - client().execute( - InternalIndexSettingsPlugin.UpdateInternalIndexAction.INSTANCE, - new InternalIndexSettingsPlugin.UpdateInternalIndexAction.Request("test", "internal-update")) - .actionGet(); - final GetSettingsResponse responseAfterUpdate = client().admin().indices().prepareGetSettings("test").get(); - assertThat(responseAfterUpdate.getSetting("test", "index.internal"), equalTo("internal-update")); - } - } diff --git a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index 5ed4b370307..44ca66d571d 100644 --- a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.indices.stats; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; -import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; @@ -43,6 +42,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.MergeSchedulerConfig; @@ -50,6 +50,7 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.IndicesRequestCache; @@ -69,6 +70,7 @@ import java.util.Collections; import java.util.EnumSet; import java.util.List; import java.util.Random; +import java.util.Set; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; @@ -115,6 +117,7 @@ public class IndexStatsIT extends ESIntegTestCase { return Settings.builder().put(super.indexSettings()) .put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true) .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), true) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0) .build(); } @@ -723,54 +726,6 @@ public class IndexStatsIT extends ESIntegTestCase { } - public void testFieldDataFieldsParam() throws Exception { - assertAcked(client().admin().indices().prepareCreate("test1") - .setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id)) - .addMapping("_doc", "bar", "type=text,fielddata=true", - "baz", "type=text,fielddata=true").get()); - - ensureGreen(); - - client().prepareIndex("test1", "_doc", Integer.toString(1)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get(); - client().prepareIndex("test1", "_doc", Integer.toString(2)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get(); - refresh(); - - client().prepareSearch("_all").addSort("bar", SortOrder.ASC).addSort("baz", SortOrder.ASC).execute().actionGet(); - - IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats(); - IndicesStatsResponse stats = builder.execute().actionGet(); - - assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields(), is(nullValue())); - - stats = builder.setFieldDataFields("bar").execute().actionGet(); - assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("bar"), is(true)); - assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("baz"), is(false)); - - stats = builder.setFieldDataFields("bar", "baz").execute().actionGet(); - assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("bar"), is(true)); - assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("baz"), is(true)); - assertThat(stats.getTotal().fieldData.getFields().get("baz"), greaterThan(0L)); - - stats = builder.setFieldDataFields("*").execute().actionGet(); - assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("bar"), is(true)); - assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("baz"), is(true)); - assertThat(stats.getTotal().fieldData.getFields().get("baz"), greaterThan(0L)); - - stats = builder.setFieldDataFields("*r").execute().actionGet(); - assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("bar"), is(true)); - assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("baz"), is(false)); - - } - public void testCompletionFieldsParam() throws Exception { assertAcked(prepareCreate("test1") .addMapping( @@ -1006,10 +961,15 @@ public class IndexStatsIT extends ESIntegTestCase { @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32506") public void testFilterCacheStats() throws Exception { - assertAcked(prepareCreate("index").setSettings(Settings.builder().put(indexSettings()).put("number_of_replicas", 0).build()).get()); - indexRandom(true, + Settings settings = Settings.builder().put(indexSettings()).put("number_of_replicas", 0).build(); + assertAcked(prepareCreate("index").setSettings(settings).get()); + indexRandom(false, true, client().prepareIndex("index", "type", "1").setSource("foo", "bar"), client().prepareIndex("index", "type", "2").setSource("foo", "baz")); + if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(settings)) { + persistGlobalCheckpoint("index"); // Need to persist the global checkpoint for the soft-deletes retention MP. + } + refresh(); ensureGreen(); IndicesStatsResponse response = client().admin().indices().prepareStats("index").setQueryCache(true).get(); @@ -1040,6 +1000,13 @@ public class IndexStatsIT extends ESIntegTestCase { assertEquals(DocWriteResponse.Result.DELETED, client().prepareDelete("index", "type", "1").get().getResult()); assertEquals(DocWriteResponse.Result.DELETED, client().prepareDelete("index", "type", "2").get().getResult()); + // Here we are testing that a fully deleted segment should be dropped and its cached is evicted. + // In order to instruct the merge policy not to keep a fully deleted segment, + // we need to flush and make that commit safe so that the SoftDeletesPolicy can drop everything. + if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(settings)) { + persistGlobalCheckpoint("index"); + flush("index"); + } refresh(); response = client().admin().indices().prepareStats("index").setQueryCache(true).get(); assertCumulativeQueryCacheStats(response); @@ -1173,4 +1140,21 @@ public class IndexStatsIT extends ESIntegTestCase { assertThat(executionFailures.get(), emptyCollectionOf(Exception.class)); } + + /** + * Persist the global checkpoint on all shards of the given index into disk. + * This makes sure that the persisted global checkpoint on those shards will equal to the in-memory value. + */ + private void persistGlobalCheckpoint(String index) throws Exception { + final Set nodes = internalCluster().nodesInclude(index); + for (String node : nodes) { + final IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node); + for (IndexService indexService : indexServices) { + for (IndexShard indexShard : indexService) { + indexShard.sync(); + assertThat(indexShard.getLastSyncedGlobalCheckpoint(), equalTo(indexShard.getGlobalCheckpoint())); + } + } + } + } } diff --git a/server/src/test/java/org/elasticsearch/indices/stats/LegacyIndexStatsIT.java b/server/src/test/java/org/elasticsearch/indices/stats/LegacyIndexStatsIT.java new file mode 100644 index 00000000000..c8ae3edb886 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/indices/stats/LegacyIndexStatsIT.java @@ -0,0 +1,104 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.stats; + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.ESIntegTestCase; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class LegacyIndexStatsIT extends ESIntegTestCase { + + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + + public void testFieldDataFieldsParam() { + assertAcked(client() + .admin() + .indices() + .prepareCreate("test1") + .setSettings(Settings.builder().put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.V_6_0_0)) + .addMapping("_doc", "bar", "type=text,fielddata=true", "baz", "type=text,fielddata=true") + .get()); + + ensureGreen(); + + client().prepareIndex("test1", "_doc", Integer.toString(1)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get(); + client().prepareIndex("test1", "_doc", Integer.toString(2)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get(); + refresh(); + + client().prepareSearch("_all").addSort("bar", SortOrder.ASC).addSort("baz", SortOrder.ASC).execute().actionGet(); + + final IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats(); + + { + final IndicesStatsResponse stats = builder.execute().actionGet(); + assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); + assertThat(stats.getTotal().fieldData.getFields(), is(nullValue())); + } + + { + final IndicesStatsResponse stats = builder.setFieldDataFields("bar").execute().actionGet(); + assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); + assertThat(stats.getTotal().fieldData.getFields().containsField("bar"), is(true)); + assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0L)); + assertThat(stats.getTotal().fieldData.getFields().containsField("baz"), is(false)); + } + + { + final IndicesStatsResponse stats = builder.setFieldDataFields("bar", "baz").execute().actionGet(); + assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); + assertThat(stats.getTotal().fieldData.getFields().containsField("bar"), is(true)); + assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0L)); + assertThat(stats.getTotal().fieldData.getFields().containsField("baz"), is(true)); + assertThat(stats.getTotal().fieldData.getFields().get("baz"), greaterThan(0L)); + } + + { + final IndicesStatsResponse stats = builder.setFieldDataFields("*").execute().actionGet(); + assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); + assertThat(stats.getTotal().fieldData.getFields().containsField("bar"), is(true)); + assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0L)); + assertThat(stats.getTotal().fieldData.getFields().containsField("baz"), is(true)); + assertThat(stats.getTotal().fieldData.getFields().get("baz"), greaterThan(0L)); + } + + { + final IndicesStatsResponse stats = builder.setFieldDataFields("*r").execute().actionGet(); + assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); + assertThat(stats.getTotal().fieldData.getFields().containsField("bar"), is(true)); + assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0L)); + assertThat(stats.getTotal().fieldData.getFields().containsField("baz"), is(false)); + } + + } + +} diff --git a/server/src/test/java/org/elasticsearch/ingest/ConditionalProcessorTests.java b/server/src/test/java/org/elasticsearch/ingest/ConditionalProcessorTests.java new file mode 100644 index 00000000000..12b4078ddf8 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/ingest/ConditionalProcessorTests.java @@ -0,0 +1,142 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.function.Consumer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.MockScriptEngine; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.core.Is.is; + +public class ConditionalProcessorTests extends ESTestCase { + + public void testChecksCondition() throws Exception { + String conditionalField = "field1"; + String scriptName = "conditionalScript"; + String trueValue = "truthy"; + ScriptService scriptService = new ScriptService(Settings.builder().build(), + Collections.singletonMap( + Script.DEFAULT_SCRIPT_LANG, + new MockScriptEngine( + Script.DEFAULT_SCRIPT_LANG, + Collections.singletonMap( + scriptName, ctx -> trueValue.equals(ctx.get(conditionalField)) + ) + ) + ), + new HashMap<>(ScriptModule.CORE_CONTEXTS) + ); + Map document = new HashMap<>(); + ConditionalProcessor processor = new ConditionalProcessor( + randomAlphaOfLength(10), + new Script( + ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, + scriptName, Collections.emptyMap()), scriptService, + new Processor() { + @Override + public IngestDocument execute(final IngestDocument ingestDocument) throws Exception { + ingestDocument.setFieldValue("foo", "bar"); + return ingestDocument; + } + + @Override + public String getType() { + return null; + } + + @Override + public String getTag() { + return null; + } + }); + + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); + ingestDocument.setFieldValue(conditionalField, trueValue); + processor.execute(ingestDocument); + assertThat(ingestDocument.getSourceAndMetadata().get(conditionalField), is(trueValue)); + assertThat(ingestDocument.getSourceAndMetadata().get("foo"), is("bar")); + + String falseValue = "falsy"; + ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); + ingestDocument.setFieldValue(conditionalField, falseValue); + processor.execute(ingestDocument); + assertThat(ingestDocument.getSourceAndMetadata().get(conditionalField), is(falseValue)); + assertThat(ingestDocument.getSourceAndMetadata(), not(hasKey("foo"))); + } + + @SuppressWarnings("unchecked") + public void testActsOnImmutableData() throws Exception { + assertMutatingCtxThrows(ctx -> ctx.remove("foo")); + assertMutatingCtxThrows(ctx -> ctx.put("foo", "bar")); + assertMutatingCtxThrows(ctx -> ((List)ctx.get("listField")).add("bar")); + assertMutatingCtxThrows(ctx -> ((List)ctx.get("listField")).remove("bar")); + } + + private static void assertMutatingCtxThrows(Consumer> mutation) throws Exception { + String scriptName = "conditionalScript"; + CompletableFuture expectedException = new CompletableFuture<>(); + ScriptService scriptService = new ScriptService(Settings.builder().build(), + Collections.singletonMap( + Script.DEFAULT_SCRIPT_LANG, + new MockScriptEngine( + Script.DEFAULT_SCRIPT_LANG, + Collections.singletonMap( + scriptName, ctx -> { + try { + mutation.accept(ctx); + } catch (Exception e) { + expectedException.complete(e); + } + return false; + } + ) + ) + ), + new HashMap<>(ScriptModule.CORE_CONTEXTS) + ); + Map document = new HashMap<>(); + ConditionalProcessor processor = new ConditionalProcessor( + randomAlphaOfLength(10), + new Script( + ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, + scriptName, Collections.emptyMap()), scriptService, null + ); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); + ingestDocument.setFieldValue("listField", new ArrayList<>()); + processor.execute(ingestDocument); + Exception e = expectedException.get(); + assertThat(e, instanceOf(UnsupportedOperationException.class)); + assertEquals("Mutating ingest documents in conditionals is not supported", e.getMessage()); + } +} diff --git a/server/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java b/server/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java index 61afd9ce2a4..9111658e49c 100644 --- a/server/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.ingest; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -38,6 +39,9 @@ import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; public class ConfigurationUtilsTests extends ESTestCase { + + private final ScriptService scriptService = mock(ScriptService.class); + private Map config; @Before @@ -88,12 +92,6 @@ public class ConfigurationUtilsTests extends ESTestCase { } } - // TODO(talevy): Issue with generics. This test should fail, "int" is of type List - public void testOptional_InvalidType() { - List val = ConfigurationUtils.readList(null, null, config, "int"); - assertThat(val, equalTo(Collections.singletonList(2))); - } - public void testReadStringOrIntProperty() { String val1 = ConfigurationUtils.readStringOrIntProperty(null, null, config, "foo", null); String val2 = ConfigurationUtils.readStringOrIntProperty(null, null, config, "num", null); @@ -120,7 +118,7 @@ public class ConfigurationUtilsTests extends ESTestCase { config.add(Collections.singletonMap("test_processor", emptyConfig)); config.add(Collections.singletonMap("test_processor", emptyConfig)); - List result = ConfigurationUtils.readProcessorConfigs(config, registry); + List result = ConfigurationUtils.readProcessorConfigs(config, scriptService, registry); assertThat(result.size(), equalTo(2)); assertThat(result.get(0), sameInstance(processor)); assertThat(result.get(1), sameInstance(processor)); @@ -129,7 +127,7 @@ public class ConfigurationUtilsTests extends ESTestCase { unknownTaggedConfig.put("tag", "my_unknown"); config.add(Collections.singletonMap("unknown_processor", unknownTaggedConfig)); ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, - () -> ConfigurationUtils.readProcessorConfigs(config, registry)); + () -> ConfigurationUtils.readProcessorConfigs(config, scriptService, registry)); assertThat(e.getMessage(), equalTo("No processor type exists with name [unknown_processor]")); assertThat(e.getMetadata("es.processor_tag"), equalTo(Collections.singletonList("my_unknown"))); assertThat(e.getMetadata("es.processor_type"), equalTo(Collections.singletonList("unknown_processor"))); @@ -142,7 +140,10 @@ public class ConfigurationUtilsTests extends ESTestCase { Map secondUnknonwTaggedConfig = new HashMap<>(); secondUnknonwTaggedConfig.put("tag", "my_second_unknown"); config2.add(Collections.singletonMap("second_unknown_processor", secondUnknonwTaggedConfig)); - e = expectThrows(ElasticsearchParseException.class, () -> ConfigurationUtils.readProcessorConfigs(config2, registry)); + e = expectThrows( + ElasticsearchParseException.class, + () -> ConfigurationUtils.readProcessorConfigs(config2, scriptService, registry) + ); assertThat(e.getMessage(), equalTo("No processor type exists with name [unknown_processor]")); assertThat(e.getMetadata("es.processor_tag"), equalTo(Collections.singletonList("my_unknown"))); assertThat(e.getMetadata("es.processor_type"), equalTo(Collections.singletonList("unknown_processor"))); @@ -166,17 +167,17 @@ public class ConfigurationUtilsTests extends ESTestCase { }); Object emptyConfig = Collections.emptyMap(); - Processor processor1 = ConfigurationUtils.readProcessor(registry, "script", emptyConfig); + Processor processor1 = ConfigurationUtils.readProcessor(registry, scriptService, "script", emptyConfig); assertThat(processor1, sameInstance(processor)); Object inlineScript = "test_script"; - Processor processor2 = ConfigurationUtils.readProcessor(registry, "script", inlineScript); + Processor processor2 = ConfigurationUtils.readProcessor(registry, scriptService, "script", inlineScript); assertThat(processor2, sameInstance(processor)); Object invalidConfig = 12L; ElasticsearchParseException ex = expectThrows(ElasticsearchParseException.class, - () -> ConfigurationUtils.readProcessor(registry, "unknown_processor", invalidConfig)); + () -> ConfigurationUtils.readProcessor(registry, scriptService, "unknown_processor", invalidConfig)); assertThat(ex.getMessage(), equalTo("property isn't a map, but of type [" + invalidConfig.getClass().getName() + "]")); } diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestClientIT.java b/server/src/test/java/org/elasticsearch/ingest/IngestClientIT.java index 65139109a83..6e5d862372a 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestClientIT.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestClientIT.java @@ -60,7 +60,6 @@ public class IngestClientIT extends ESIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { - // TODO: Remove this method once gets in: https://github.com/elastic/elasticsearch/issues/16019 if (nodeOrdinal % 2 == 0) { return Settings.builder().put("node.ingest", false).put(super.nodeSettings(nodeOrdinal)).build(); } diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java b/server/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java index 338e5b662c5..4c2352bfebe 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java @@ -97,12 +97,12 @@ public class IngestProcessorNotInstalledOnAllNodesIT extends ESIntegTestCase { AcknowledgedResponse response = client().admin().cluster().preparePutPipeline("_id", pipelineSource, XContentType.JSON).get(); assertThat(response.isAcknowledged(), is(true)); - Pipeline pipeline = internalCluster().getInstance(NodeService.class, node1).getIngestService().getPipelineStore().get("_id"); + Pipeline pipeline = internalCluster().getInstance(NodeService.class, node1).getIngestService().getPipeline("_id"); assertThat(pipeline, notNullValue()); installPlugin = false; String node2 = internalCluster().startNode(); - pipeline = internalCluster().getInstance(NodeService.class, node2).getIngestService().getPipelineStore().get("_id"); + pipeline = internalCluster().getInstance(NodeService.class, node2).getIngestService().getPipeline("_id"); assertNotNull(pipeline); assertThat(pipeline.getId(), equalTo("_id")); diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index c0353acb7f9..e3f52f35b79 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -21,16 +21,69 @@ package org.elasticsearch.ingest; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; import java.util.Map; - -import org.elasticsearch.common.settings.Settings; +import java.util.Objects; +import java.util.concurrent.ExecutorService; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.Version; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.DeletePipelineRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.Requests; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.VersionType; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.mockito.Mockito; +import org.hamcrest.CustomTypeSafeMatcher; +import org.mockito.ArgumentMatcher; +import org.mockito.invocation.InvocationOnMock; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.argThat; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class IngestServiceTests extends ESTestCase { - private final IngestPlugin DUMMY_PLUGIN = new IngestPlugin() { + + private static final IngestPlugin DUMMY_PLUGIN = new IngestPlugin() { @Override public Map getProcessors(Processor.Parameters parameters) { return Collections.singletonMap("foo", (factories, tag, config) -> null); @@ -38,19 +91,814 @@ public class IngestServiceTests extends ESTestCase { }; public void testIngestPlugin() { - ThreadPool tp = Mockito.mock(ThreadPool.class); - IngestService ingestService = new IngestService(Settings.EMPTY, tp, null, null, + ThreadPool tp = mock(ThreadPool.class); + IngestService ingestService = new IngestService(mock(ClusterService.class), tp, null, null, null, Collections.singletonList(DUMMY_PLUGIN)); - Map factories = ingestService.getPipelineStore().getProcessorFactories(); + Map factories = ingestService.getProcessorFactories(); assertTrue(factories.containsKey("foo")); assertEquals(1, factories.size()); } public void testIngestPluginDuplicate() { - ThreadPool tp = Mockito.mock(ThreadPool.class); + ThreadPool tp = mock(ThreadPool.class); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - new IngestService(Settings.EMPTY, tp, null, null, + new IngestService(mock(ClusterService.class), tp, null, null, null, Arrays.asList(DUMMY_PLUGIN, DUMMY_PLUGIN))); assertTrue(e.getMessage(), e.getMessage().contains("already registered")); } + + public void testExecuteIndexPipelineDoesNotExist() { + ThreadPool threadPool = mock(ThreadPool.class); + final ExecutorService executorService = EsExecutors.newDirectExecutorService(); + when(threadPool.executor(anyString())).thenReturn(executorService); + IngestService ingestService = new IngestService(mock(ClusterService.class), threadPool, null, null, + null, Collections.singletonList(DUMMY_PLUGIN)); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + + final SetOnce failure = new SetOnce<>(); + final BiConsumer failureHandler = (request, e) -> { + failure.set(true); + assertThat(request, sameInstance(indexRequest)); + assertThat(e, instanceOf(IllegalArgumentException.class)); + assertThat(e.getMessage(), equalTo("pipeline with id [_id] does not exist")); + }; + + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler, indexReq -> {}); + + assertTrue(failure.get()); + verify(completionHandler, times(1)).accept(null); + } + + public void testUpdatePipelines() { + IngestService ingestService = createWithProcessors(); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousClusterState = clusterState; + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.pipelines().size(), is(0)); + + PipelineConfiguration pipeline = new PipelineConfiguration( + "_id",new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON + ); + IngestMetadata ingestMetadata = new IngestMetadata(Collections.singletonMap("_id", pipeline)); + clusterState = ClusterState.builder(clusterState) + .metaData(MetaData.builder().putCustom(IngestMetadata.TYPE, ingestMetadata)) + .build(); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.pipelines().size(), is(1)); + assertThat(ingestService.pipelines().get("_id").getId(), equalTo("_id")); + assertThat(ingestService.pipelines().get("_id").getDescription(), nullValue()); + assertThat(ingestService.pipelines().get("_id").getProcessors().size(), equalTo(1)); + assertThat(ingestService.pipelines().get("_id").getProcessors().get(0).getType(), equalTo("set")); + } + + public void testDelete() { + IngestService ingestService = createWithProcessors(); + PipelineConfiguration config = new PipelineConfiguration( + "_id",new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON + ); + IngestMetadata ingestMetadata = new IngestMetadata(Collections.singletonMap("_id", config)); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder() + .putCustom(IngestMetadata.TYPE, ingestMetadata)).build(); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.getPipeline("_id"), notNullValue()); + + // Delete pipeline: + DeletePipelineRequest deleteRequest = new DeletePipelineRequest("_id"); + previousClusterState = clusterState; + clusterState = IngestService.innerDelete(deleteRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.getPipeline("_id"), nullValue()); + + // Delete existing pipeline: + try { + IngestService.innerDelete(deleteRequest, clusterState); + fail("exception expected"); + } catch (ResourceNotFoundException e) { + assertThat(e.getMessage(), equalTo("pipeline [_id] is missing")); + } + } + + public void testValidateNoIngestInfo() throws Exception { + IngestService ingestService = createWithProcessors(); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", new BytesArray( + "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON); + Exception e = expectThrows(IllegalStateException.class, () -> ingestService.validatePipeline(emptyMap(), putRequest)); + assertEquals("Ingest info is empty", e.getMessage()); + + DiscoveryNode discoveryNode = new DiscoveryNode("_node_id", buildNewFakeTransportAddress(), + emptyMap(), emptySet(), Version.CURRENT); + IngestInfo ingestInfo = new IngestInfo(Collections.singletonList(new ProcessorInfo("set"))); + ingestService.validatePipeline(Collections.singletonMap(discoveryNode, ingestInfo), putRequest); + } + + public void testCrud() throws Exception { + IngestService ingestService = createWithProcessors(); + String id = "_id"; + Pipeline pipeline = ingestService.getPipeline(id); + assertThat(pipeline, nullValue()); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + + PutPipelineRequest putRequest = new PutPipelineRequest(id, + new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON); + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + pipeline = ingestService.getPipeline(id); + assertThat(pipeline, notNullValue()); + assertThat(pipeline.getId(), equalTo(id)); + assertThat(pipeline.getDescription(), nullValue()); + assertThat(pipeline.getProcessors().size(), equalTo(1)); + assertThat(pipeline.getProcessors().get(0).getType(), equalTo("set")); + + DeletePipelineRequest deleteRequest = new DeletePipelineRequest(id); + previousClusterState = clusterState; + clusterState = IngestService.innerDelete(deleteRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + pipeline = ingestService.getPipeline(id); + assertThat(pipeline, nullValue()); + } + + public void testPut() { + IngestService ingestService = createWithProcessors(); + String id = "_id"; + Pipeline pipeline = ingestService.getPipeline(id); + assertThat(pipeline, nullValue()); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + + // add a new pipeline: + PutPipelineRequest putRequest = new PutPipelineRequest(id, new BytesArray("{\"processors\": []}"), XContentType.JSON); + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + pipeline = ingestService.getPipeline(id); + assertThat(pipeline, notNullValue()); + assertThat(pipeline.getId(), equalTo(id)); + assertThat(pipeline.getDescription(), nullValue()); + assertThat(pipeline.getProcessors().size(), equalTo(0)); + + // overwrite existing pipeline: + putRequest = + new PutPipelineRequest(id, new BytesArray("{\"processors\": [], \"description\": \"_description\"}"), XContentType.JSON); + previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + pipeline = ingestService.getPipeline(id); + assertThat(pipeline, notNullValue()); + assertThat(pipeline.getId(), equalTo(id)); + assertThat(pipeline.getDescription(), equalTo("_description")); + assertThat(pipeline.getProcessors().size(), equalTo(0)); + } + + public void testPutWithErrorResponse() { + IngestService ingestService = createWithProcessors(); + String id = "_id"; + Pipeline pipeline = ingestService.getPipeline(id); + assertThat(pipeline, nullValue()); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + + PutPipelineRequest putRequest = + new PutPipelineRequest(id, new BytesArray("{\"description\": \"empty processors\"}"), XContentType.JSON); + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + try { + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + fail("should fail"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), equalTo("[processors] required property is missing")); + } + pipeline = ingestService.getPipeline(id); + assertNotNull(pipeline); + assertThat(pipeline.getId(), equalTo("_id")); + assertThat(pipeline.getDescription(), equalTo("this is a place holder pipeline, because pipeline with" + + " id [_id] could not be loaded")); + assertThat(pipeline.getProcessors().size(), equalTo(1)); + assertNull(pipeline.getProcessors().get(0).getTag()); + assertThat(pipeline.getProcessors().get(0).getType(), equalTo("unknown")); + } + + public void testDeleteUsingWildcard() { + IngestService ingestService = createWithProcessors(); + HashMap pipelines = new HashMap<>(); + BytesArray definition = new BytesArray( + "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}" + ); + pipelines.put("p1", new PipelineConfiguration("p1", definition, XContentType.JSON)); + pipelines.put("p2", new PipelineConfiguration("p2", definition, XContentType.JSON)); + pipelines.put("q1", new PipelineConfiguration("q1", definition, XContentType.JSON)); + IngestMetadata ingestMetadata = new IngestMetadata(pipelines); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder() + .putCustom(IngestMetadata.TYPE, ingestMetadata)).build(); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.getPipeline("p1"), notNullValue()); + assertThat(ingestService.getPipeline("p2"), notNullValue()); + assertThat(ingestService.getPipeline("q1"), notNullValue()); + + // Delete pipeline matching wildcard + DeletePipelineRequest deleteRequest = new DeletePipelineRequest("p*"); + previousClusterState = clusterState; + clusterState = IngestService.innerDelete(deleteRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.getPipeline("p1"), nullValue()); + assertThat(ingestService.getPipeline("p2"), nullValue()); + assertThat(ingestService.getPipeline("q1"), notNullValue()); + + // Exception if we used name which does not exist + try { + IngestService.innerDelete(new DeletePipelineRequest("unknown"), clusterState); + fail("exception expected"); + } catch (ResourceNotFoundException e) { + assertThat(e.getMessage(), equalTo("pipeline [unknown] is missing")); + } + + // match all wildcard works on last remaining pipeline + DeletePipelineRequest matchAllDeleteRequest = new DeletePipelineRequest("*"); + previousClusterState = clusterState; + clusterState = IngestService.innerDelete(matchAllDeleteRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.getPipeline("p1"), nullValue()); + assertThat(ingestService.getPipeline("p2"), nullValue()); + assertThat(ingestService.getPipeline("q1"), nullValue()); + + // match all wildcard does not throw exception if none match + IngestService.innerDelete(matchAllDeleteRequest, clusterState); + } + + public void testDeleteWithExistingUnmatchedPipelines() { + IngestService ingestService = createWithProcessors(); + HashMap pipelines = new HashMap<>(); + BytesArray definition = new BytesArray( + "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}" + ); + pipelines.put("p1", new PipelineConfiguration("p1", definition, XContentType.JSON)); + IngestMetadata ingestMetadata = new IngestMetadata(pipelines); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder() + .putCustom(IngestMetadata.TYPE, ingestMetadata)).build(); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.getPipeline("p1"), notNullValue()); + + DeletePipelineRequest deleteRequest = new DeletePipelineRequest("z*"); + try { + IngestService.innerDelete(deleteRequest, clusterState); + fail("exception expected"); + } catch (ResourceNotFoundException e) { + assertThat(e.getMessage(), equalTo("pipeline [z*] is missing")); + } + } + + public void testGetPipelines() { + Map configs = new HashMap<>(); + configs.put("_id1", new PipelineConfiguration( + "_id1", new BytesArray("{\"processors\": []}"), XContentType.JSON + )); + configs.put("_id2", new PipelineConfiguration( + "_id2", new BytesArray("{\"processors\": []}"), XContentType.JSON + )); + + assertThat(IngestService.innerGetPipelines(null, "_id1").isEmpty(), is(true)); + + IngestMetadata ingestMetadata = new IngestMetadata(configs); + List pipelines = IngestService.innerGetPipelines(ingestMetadata, "_id1"); + assertThat(pipelines.size(), equalTo(1)); + assertThat(pipelines.get(0).getId(), equalTo("_id1")); + + pipelines = IngestService.innerGetPipelines(ingestMetadata, "_id1", "_id2"); + assertThat(pipelines.size(), equalTo(2)); + assertThat(pipelines.get(0).getId(), equalTo("_id1")); + assertThat(pipelines.get(1).getId(), equalTo("_id2")); + + pipelines = IngestService.innerGetPipelines(ingestMetadata, "_id*"); + pipelines.sort(Comparator.comparing(PipelineConfiguration::getId)); + assertThat(pipelines.size(), equalTo(2)); + assertThat(pipelines.get(0).getId(), equalTo("_id1")); + assertThat(pipelines.get(1).getId(), equalTo("_id2")); + + // get all variants: (no IDs or '*') + pipelines = IngestService.innerGetPipelines(ingestMetadata); + pipelines.sort(Comparator.comparing(PipelineConfiguration::getId)); + assertThat(pipelines.size(), equalTo(2)); + assertThat(pipelines.get(0).getId(), equalTo("_id1")); + assertThat(pipelines.get(1).getId(), equalTo("_id2")); + + pipelines = IngestService.innerGetPipelines(ingestMetadata, "*"); + pipelines.sort(Comparator.comparing(PipelineConfiguration::getId)); + assertThat(pipelines.size(), equalTo(2)); + assertThat(pipelines.get(0).getId(), equalTo("_id1")); + assertThat(pipelines.get(1).getId(), equalTo("_id2")); + } + + public void testValidate() throws Exception { + IngestService ingestService = createWithProcessors(); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", new BytesArray( + "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\", \"tag\": \"tag1\"}}," + + "{\"remove\" : {\"field\": \"_field\", \"tag\": \"tag2\"}}]}"), + XContentType.JSON); + + DiscoveryNode node1 = new DiscoveryNode("_node_id1", buildNewFakeTransportAddress(), + emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode node2 = new DiscoveryNode("_node_id2", buildNewFakeTransportAddress(), + emptyMap(), emptySet(), Version.CURRENT); + Map ingestInfos = new HashMap<>(); + ingestInfos.put(node1, new IngestInfo(Arrays.asList(new ProcessorInfo("set"), new ProcessorInfo("remove")))); + ingestInfos.put(node2, new IngestInfo(Arrays.asList(new ProcessorInfo("set")))); + + ElasticsearchParseException e = + expectThrows(ElasticsearchParseException.class, () -> ingestService.validatePipeline(ingestInfos, putRequest)); + assertEquals("Processor type [remove] is not installed on node [" + node2 + "]", e.getMessage()); + assertEquals("remove", e.getMetadata("es.processor_type").get(0)); + assertEquals("tag2", e.getMetadata("es.processor_tag").get(0)); + + ingestInfos.put(node2, new IngestInfo(Arrays.asList(new ProcessorInfo("set"), new ProcessorInfo("remove")))); + ingestService.validatePipeline(ingestInfos, putRequest); + } + + public void testExecuteIndexPipelineExistsButFailedParsing() { + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> new AbstractProcessor("mock") { + @Override + public IngestDocument execute(IngestDocument ingestDocument) { + throw new IllegalStateException("error"); + } + + @Override + public String getType() { + return null; + } + } + )); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + String id = "_id"; + PutPipelineRequest putRequest = new PutPipelineRequest(id, + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final SetOnce failure = new SetOnce<>(); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline(id); + final BiConsumer failureHandler = (request, e) -> { + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); + assertThat(e.getCause().getCause(), instanceOf(IllegalStateException.class)); + assertThat(e.getCause().getCause().getMessage(), equalTo("error")); + failure.set(true); + }; + + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler, indexReq -> {}); + + assertTrue(failure.get()); + verify(completionHandler, times(1)).accept(null); + } + + public void testExecuteBulkPipelineDoesNotExist() { + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> mock(CompoundProcessor.class))); + + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + + BulkRequest bulkRequest = new BulkRequest(); + + IndexRequest indexRequest1 = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + bulkRequest.add(indexRequest1); + IndexRequest indexRequest2 = + new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("does_not_exist"); + bulkRequest.add(indexRequest2); + @SuppressWarnings("unchecked") + BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(bulkRequest.requests(), failureHandler, completionHandler, indexReq -> {}); + verify(failureHandler, times(1)).accept( + argThat(new CustomTypeSafeMatcher("failure handler was not called with the expected arguments") { + @Override + protected boolean matchesSafely(IndexRequest item) { + return item == indexRequest2; + } + + }), + argThat(new CustomTypeSafeMatcher("failure handler was not called with the expected arguments") { + @Override + protected boolean matchesSafely(IllegalArgumentException iae) { + return "pipeline with id [does_not_exist] does not exist".equals(iae.getMessage()); + } + }) + ); + verify(completionHandler, times(1)).accept(null); + } + + public void testExecuteSuccess() { + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> mock(CompoundProcessor.class))); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + @SuppressWarnings("unchecked") + final BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler, indexReq -> {}); + verify(failureHandler, never()).accept(any(), any()); + verify(completionHandler, times(1)).accept(null); + } + + public void testExecuteEmptyPipeline() throws Exception { + IngestService ingestService = createWithProcessors(emptyMap()); + PutPipelineRequest putRequest = + new PutPipelineRequest("_id", new BytesArray("{\"processors\": [], \"description\": \"_description\"}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + @SuppressWarnings("unchecked") + final BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler, indexReq -> {}); + verify(failureHandler, never()).accept(any(), any()); + verify(completionHandler, times(1)).accept(null); + } + + public void testExecutePropagateAllMetaDataUpdates() throws Exception { + final CompoundProcessor processor = mock(CompoundProcessor.class); + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> processor)); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final long newVersion = randomLong(); + final String versionType = randomFrom("internal", "external", "external_gt", "external_gte"); + doAnswer((InvocationOnMock invocationOnMock) -> { + IngestDocument ingestDocument = (IngestDocument) invocationOnMock.getArguments()[0]; + for (IngestDocument.MetaData metaData : IngestDocument.MetaData.values()) { + if (metaData == IngestDocument.MetaData.VERSION) { + ingestDocument.setFieldValue(metaData.getFieldName(), newVersion); + } else if (metaData == IngestDocument.MetaData.VERSION_TYPE) { + ingestDocument.setFieldValue(metaData.getFieldName(), versionType); + } else { + ingestDocument.setFieldValue(metaData.getFieldName(), "update" + metaData.getFieldName()); + } + } + return ingestDocument; + }).when(processor).execute(any()); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + @SuppressWarnings("unchecked") + final BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler, indexReq -> {}); + verify(processor).execute(any()); + verify(failureHandler, never()).accept(any(), any()); + verify(completionHandler, times(1)).accept(null); + assertThat(indexRequest.index(), equalTo("update_index")); + assertThat(indexRequest.type(), equalTo("update_type")); + assertThat(indexRequest.id(), equalTo("update_id")); + assertThat(indexRequest.routing(), equalTo("update_routing")); + assertThat(indexRequest.version(), equalTo(newVersion)); + assertThat(indexRequest.versionType(), equalTo(VersionType.fromString(versionType))); + } + + public void testExecuteFailure() throws Exception { + final CompoundProcessor processor = mock(CompoundProcessor.class); + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> processor)); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + doThrow(new RuntimeException()) + .when(processor) + .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), emptyMap())); + @SuppressWarnings("unchecked") + final BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler, indexReq -> {}); + verify(processor).execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), emptyMap())); + verify(failureHandler, times(1)).accept(eq(indexRequest), any(RuntimeException.class)); + verify(completionHandler, times(1)).accept(null); + } + + public void testExecuteSuccessWithOnFailure() throws Exception { + final Processor processor = mock(Processor.class); + when(processor.getType()).thenReturn("mock_processor_type"); + when(processor.getTag()).thenReturn("mock_processor_tag"); + final Processor onFailureProcessor = mock(Processor.class); + final CompoundProcessor compoundProcessor = new CompoundProcessor( + false, Collections.singletonList(processor), Collections.singletonList(new CompoundProcessor(onFailureProcessor))); + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> compoundProcessor)); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + doThrow(new RuntimeException()).when(processor).execute(eqIndexTypeId(emptyMap())); + @SuppressWarnings("unchecked") + final BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler, indexReq -> {}); + verify(failureHandler, never()).accept(eq(indexRequest), any(ElasticsearchException.class)); + verify(completionHandler, times(1)).accept(null); + } + + public void testExecuteFailureWithNestedOnFailure() throws Exception { + final Processor processor = mock(Processor.class); + final Processor onFailureProcessor = mock(Processor.class); + final Processor onFailureOnFailureProcessor = mock(Processor.class); + final List processors = Collections.singletonList(onFailureProcessor); + final List onFailureProcessors = Collections.singletonList(onFailureOnFailureProcessor); + final CompoundProcessor compoundProcessor = new CompoundProcessor( + false, + Collections.singletonList(processor), + Collections.singletonList(new CompoundProcessor(false, processors, onFailureProcessors))); + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> compoundProcessor)); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + doThrow(new RuntimeException()) + .when(onFailureOnFailureProcessor) + .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), emptyMap())); + doThrow(new RuntimeException()) + .when(onFailureProcessor) + .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), emptyMap())); + doThrow(new RuntimeException()) + .when(processor) + .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), emptyMap())); + @SuppressWarnings("unchecked") + final BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler, indexReq -> {}); + verify(processor).execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), emptyMap())); + verify(failureHandler, times(1)).accept(eq(indexRequest), any(RuntimeException.class)); + verify(completionHandler, times(1)).accept(null); + } + + public void testBulkRequestExecutionWithFailures() throws Exception { + BulkRequest bulkRequest = new BulkRequest(); + String pipelineId = "_id"; + + int numRequest = scaledRandomIntBetween(8, 64); + int numIndexRequests = 0; + for (int i = 0; i < numRequest; i++) { + DocWriteRequest request; + if (randomBoolean()) { + if (randomBoolean()) { + request = new DeleteRequest("_index", "_type", "_id"); + } else { + request = new UpdateRequest("_index", "_type", "_id"); + } + } else { + IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").setPipeline(pipelineId); + indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); + request = indexRequest; + numIndexRequests++; + } + bulkRequest.add(request); + } + + CompoundProcessor processor = mock(CompoundProcessor.class); + when(processor.getProcessors()).thenReturn(Collections.singletonList(mock(Processor.class))); + Exception error = new RuntimeException(); + doThrow(error).when(processor).execute(any()); + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> processor)); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + + @SuppressWarnings("unchecked") + BiConsumer requestItemErrorHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(bulkRequest.requests(), requestItemErrorHandler, completionHandler, indexReq -> {}); + + verify(requestItemErrorHandler, times(numIndexRequests)).accept(any(IndexRequest.class), argThat(new ArgumentMatcher() { + @Override + public boolean matches(final Object o) { + return ((Exception)o).getCause().getCause().equals(error); + } + })); + verify(completionHandler, times(1)).accept(null); + } + + public void testBulkRequestExecution() { + BulkRequest bulkRequest = new BulkRequest(); + String pipelineId = "_id"; + + int numRequest = scaledRandomIntBetween(8, 64); + for (int i = 0; i < numRequest; i++) { + IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").setPipeline(pipelineId); + indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); + bulkRequest.add(indexRequest); + } + + IngestService ingestService = createWithProcessors(emptyMap()); + PutPipelineRequest putRequest = + new PutPipelineRequest("_id", new BytesArray("{\"processors\": [], \"description\": \"_description\"}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + + @SuppressWarnings("unchecked") + BiConsumer requestItemErrorHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(bulkRequest.requests(), requestItemErrorHandler, completionHandler, indexReq -> {}); + + verify(requestItemErrorHandler, never()).accept(any(), any()); + verify(completionHandler, times(1)).accept(null); + } + + public void testStats() { + final Processor processor = mock(Processor.class); + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> processor)); + final IngestStats initialStats = ingestService.stats(); + assertThat(initialStats.getStatsPerPipeline().size(), equalTo(0)); + assertThat(initialStats.getTotalStats().getIngestCount(), equalTo(0L)); + assertThat(initialStats.getTotalStats().getIngestCurrent(), equalTo(0L)); + assertThat(initialStats.getTotalStats().getIngestFailedCount(), equalTo(0L)); + assertThat(initialStats.getTotalStats().getIngestTimeInMillis(), equalTo(0L)); + + PutPipelineRequest putRequest = new PutPipelineRequest("_id1", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + putRequest = new PutPipelineRequest("_id2", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final Map configurationMap = new HashMap<>(); + configurationMap.put("_id1", new PipelineConfiguration("_id1", new BytesArray("{}"), XContentType.JSON)); + configurationMap.put("_id2", new PipelineConfiguration("_id2", new BytesArray("{}"), XContentType.JSON)); + ingestService.updatePipelineStats(new IngestMetadata(configurationMap)); + + @SuppressWarnings("unchecked") final BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") final Consumer completionHandler = mock(Consumer.class); + + final IndexRequest indexRequest = new IndexRequest("_index"); + indexRequest.setPipeline("_id1"); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler, indexReq -> {}); + final IngestStats afterFirstRequestStats = ingestService.stats(); + assertThat(afterFirstRequestStats.getStatsPerPipeline().size(), equalTo(2)); + assertThat(afterFirstRequestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L)); + assertThat(afterFirstRequestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(0L)); + assertThat(afterFirstRequestStats.getTotalStats().getIngestCount(), equalTo(1L)); + + indexRequest.setPipeline("_id2"); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler, indexReq -> {}); + final IngestStats afterSecondRequestStats = ingestService.stats(); + assertThat(afterSecondRequestStats.getStatsPerPipeline().size(), equalTo(2)); + assertThat(afterSecondRequestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L)); + assertThat(afterSecondRequestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(1L)); + assertThat(afterSecondRequestStats.getTotalStats().getIngestCount(), equalTo(2L)); + } + + // issue: https://github.com/elastic/elasticsearch/issues/18126 + public void testUpdatingStatsWhenRemovingPipelineWorks() { + IngestService ingestService = createWithProcessors(); + Map configurationMap = new HashMap<>(); + configurationMap.put("_id1", new PipelineConfiguration("_id1", new BytesArray("{}"), XContentType.JSON)); + configurationMap.put("_id2", new PipelineConfiguration("_id2", new BytesArray("{}"), XContentType.JSON)); + ingestService.updatePipelineStats(new IngestMetadata(configurationMap)); + assertThat(ingestService.stats().getStatsPerPipeline(), hasKey("_id1")); + assertThat(ingestService.stats().getStatsPerPipeline(), hasKey("_id2")); + + configurationMap = new HashMap<>(); + configurationMap.put("_id3", new PipelineConfiguration("_id3", new BytesArray("{}"), XContentType.JSON)); + ingestService.updatePipelineStats(new IngestMetadata(configurationMap)); + assertThat(ingestService.stats().getStatsPerPipeline(), not(hasKey("_id1"))); + assertThat(ingestService.stats().getStatsPerPipeline(), not(hasKey("_id2"))); + } + + private IngestDocument eqIndexTypeId(final Map source) { + return argThat(new IngestDocumentMatcher("_index", "_type", "_id", source)); + } + + private IngestDocument eqIndexTypeId(final Long version, final VersionType versionType, final Map source) { + return argThat(new IngestDocumentMatcher("_index", "_type", "_id", version, versionType, source)); + } + + private static IngestService createWithProcessors() { + Map processors = new HashMap<>(); + processors.put("set", (factories, tag, config) -> { + String field = (String) config.remove("field"); + String value = (String) config.remove("value"); + return new Processor() { + @Override + public IngestDocument execute(IngestDocument ingestDocument) { + ingestDocument.setFieldValue(field, value); + return ingestDocument; + } + + @Override + public String getType() { + return "set"; + } + + @Override + public String getTag() { + return tag; + } + }; + }); + processors.put("remove", (factories, tag, config) -> { + String field = (String) config.remove("field"); + return new Processor() { + @Override + public IngestDocument execute(IngestDocument ingestDocument) { + ingestDocument.removeField(field); + return ingestDocument; + } + + @Override + public String getType() { + return "remove"; + } + + @Override + public String getTag() { + return tag; + } + }; + }); + return createWithProcessors(processors); + } + + private static IngestService createWithProcessors(Map processors) { + ThreadPool threadPool = mock(ThreadPool.class); + final ExecutorService executorService = EsExecutors.newDirectExecutorService(); + when(threadPool.executor(anyString())).thenReturn(executorService); + return new IngestService(mock(ClusterService.class), threadPool, null, null, + null, Collections.singletonList(new IngestPlugin() { + @Override + public Map getProcessors(final Processor.Parameters parameters) { + return processors; + } + })); + } + + private class IngestDocumentMatcher extends ArgumentMatcher { + + private final IngestDocument ingestDocument; + + IngestDocumentMatcher(String index, String type, String id, Map source) { + this.ingestDocument = new IngestDocument(index, type, id, null, null, null, source); + } + + IngestDocumentMatcher(String index, String type, String id, Long version, VersionType versionType, Map source) { + this.ingestDocument = new IngestDocument(index, type, id, null, version, versionType, source); + } + + @Override + public boolean matches(Object o) { + if (o.getClass() == IngestDocument.class) { + IngestDocument otherIngestDocument = (IngestDocument) o; + //ingest metadata will not be the same (timestamp differs every time) + return Objects.equals(ingestDocument.getSourceAndMetadata(), otherIngestDocument.getSourceAndMetadata()); + } + return false; + } + } } diff --git a/server/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java deleted file mode 100644 index 15a23421da2..00000000000 --- a/server/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java +++ /dev/null @@ -1,471 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.ingest; - -import org.apache.lucene.util.SetOnce; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.client.Requests; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.VersionType; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.hamcrest.CustomTypeSafeMatcher; -import org.junit.Before; -import org.mockito.ArgumentMatcher; -import org.mockito.invocation.InvocationOnMock; - -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.ExecutorService; -import java.util.function.BiConsumer; -import java.util.function.Consumer; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasKey; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.sameInstance; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyString; -import static org.mockito.Matchers.argThat; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class PipelineExecutionServiceTests extends ESTestCase { - - private final Integer version = randomBoolean() ? randomInt() : null; - private PipelineStore store; - private PipelineExecutionService executionService; - - @Before - public void setup() { - store = mock(PipelineStore.class); - ThreadPool threadPool = mock(ThreadPool.class); - final ExecutorService executorService = EsExecutors.newDirectExecutorService(); - when(threadPool.executor(anyString())).thenReturn(executorService); - executionService = new PipelineExecutionService(store, threadPool); - } - - public void testExecuteIndexPipelineDoesNotExist() { - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - - final SetOnce failure = new SetOnce<>(); - final BiConsumer failureHandler = (request, e) -> { - failure.set(true); - assertThat(request, sameInstance(indexRequest)); - assertThat(e, instanceOf(IllegalArgumentException.class)); - assertThat(e.getMessage(), equalTo("pipeline with id [_id] does not exist")); - }; - - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - - assertTrue(failure.get()); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecuteIndexPipelineExistsButFailedParsing() { - when(store.get("_id")).thenReturn(new Pipeline("_id", "stub", null, - new CompoundProcessor(new AbstractProcessor("mock") { - @Override - public void execute(IngestDocument ingestDocument) { - throw new IllegalStateException("error"); - } - - @Override - public String getType() { - return null; - } - }))); - - final SetOnce failure = new SetOnce<>(); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - final BiConsumer failureHandler = (request, e) -> { - assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); - assertThat(e.getCause().getCause(), instanceOf(IllegalStateException.class)); - assertThat(e.getCause().getCause().getMessage(), equalTo("error")); - failure.set(true); - }; - - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - - assertTrue(failure.get()); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecuteBulkPipelineDoesNotExist() { - CompoundProcessor processor = mock(CompoundProcessor.class); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor)); - BulkRequest bulkRequest = new BulkRequest(); - - IndexRequest indexRequest1 = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - bulkRequest.add(indexRequest1); - IndexRequest indexRequest2 = - new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("does_not_exist"); - bulkRequest.add(indexRequest2); - @SuppressWarnings("unchecked") - BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(bulkRequest.requests(), failureHandler, completionHandler); - verify(failureHandler, times(1)).accept( - argThat(new CustomTypeSafeMatcher("failure handler was not called with the expected arguments") { - @Override - protected boolean matchesSafely(IndexRequest item) { - return item == indexRequest2; - } - - }), - argThat(new CustomTypeSafeMatcher("failure handler was not called with the expected arguments") { - @Override - protected boolean matchesSafely(IllegalArgumentException iae) { - return "pipeline with id [does_not_exist] does not exist".equals(iae.getMessage()); - } - }) - ); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecuteSuccess() { - final CompoundProcessor processor = mock(CompoundProcessor.class); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor)); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - verify(failureHandler, never()).accept(any(), any()); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecuteEmptyPipeline() throws Exception { - final CompoundProcessor processor = mock(CompoundProcessor.class); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor)); - when(processor.getProcessors()).thenReturn(Collections.emptyList()); - - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - verify(processor, never()).execute(any()); - verify(failureHandler, never()).accept(any(), any()); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecutePropagateAllMetaDataUpdates() throws Exception { - final CompoundProcessor processor = mock(CompoundProcessor.class); - when(processor.getProcessors()).thenReturn(Collections.singletonList(mock(Processor.class))); - final long newVersion = randomLong(); - final String versionType = randomFrom("internal", "external", "external_gt", "external_gte"); - doAnswer((InvocationOnMock invocationOnMock) -> { - IngestDocument ingestDocument = (IngestDocument) invocationOnMock.getArguments()[0]; - for (IngestDocument.MetaData metaData : IngestDocument.MetaData.values()) { - if (metaData == IngestDocument.MetaData.VERSION) { - ingestDocument.setFieldValue(metaData.getFieldName(), newVersion); - } else if (metaData == IngestDocument.MetaData.VERSION_TYPE) { - ingestDocument.setFieldValue(metaData.getFieldName(), versionType); - } else { - ingestDocument.setFieldValue(metaData.getFieldName(), "update" + metaData.getFieldName()); - } - } - return null; - }).when(processor).execute(any()); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor)); - - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - verify(processor).execute(any()); - verify(failureHandler, never()).accept(any(), any()); - verify(completionHandler, times(1)).accept(null); - assertThat(indexRequest.index(), equalTo("update_index")); - assertThat(indexRequest.type(), equalTo("update_type")); - assertThat(indexRequest.id(), equalTo("update_id")); - assertThat(indexRequest.routing(), equalTo("update_routing")); - assertThat(indexRequest.version(), equalTo(newVersion)); - assertThat(indexRequest.versionType(), equalTo(VersionType.fromString(versionType))); - } - - public void testExecuteFailure() throws Exception { - final CompoundProcessor processor = mock(CompoundProcessor.class); - when(processor.getProcessors()).thenReturn(Collections.singletonList(mock(Processor.class))); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor)); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - doThrow(new RuntimeException()) - .when(processor) - .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - verify(processor).execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - verify(failureHandler, times(1)).accept(eq(indexRequest), any(RuntimeException.class)); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecuteSuccessWithOnFailure() throws Exception { - final Processor processor = mock(Processor.class); - when(processor.getType()).thenReturn("mock_processor_type"); - when(processor.getTag()).thenReturn("mock_processor_tag"); - final Processor onFailureProcessor = mock(Processor.class); - final CompoundProcessor compoundProcessor = new CompoundProcessor( - false, Collections.singletonList(processor), Collections.singletonList(new CompoundProcessor(onFailureProcessor))); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, compoundProcessor)); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - doThrow(new RuntimeException()).when(processor).execute(eqIndexTypeId(Collections.emptyMap())); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - verify(failureHandler, never()).accept(eq(indexRequest), any(ElasticsearchException.class)); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecuteFailureWithOnFailure() throws Exception { - final Processor processor = mock(Processor.class); - final Processor onFailureProcessor = mock(Processor.class); - final CompoundProcessor compoundProcessor = new CompoundProcessor( - false, Collections.singletonList(processor), Collections.singletonList(new CompoundProcessor(onFailureProcessor))); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, compoundProcessor)); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - doThrow(new RuntimeException()) - .when(processor) - .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - doThrow(new RuntimeException()) - .when(onFailureProcessor) - .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - verify(processor).execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - verify(failureHandler, times(1)).accept(eq(indexRequest), any(RuntimeException.class)); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecuteFailureWithNestedOnFailure() throws Exception { - final Processor processor = mock(Processor.class); - final Processor onFailureProcessor = mock(Processor.class); - final Processor onFailureOnFailureProcessor = mock(Processor.class); - final List processors = Collections.singletonList(onFailureProcessor); - final List onFailureProcessors = Collections.singletonList(onFailureOnFailureProcessor); - final CompoundProcessor compoundProcessor = new CompoundProcessor( - false, - Collections.singletonList(processor), - Collections.singletonList(new CompoundProcessor(false, processors, onFailureProcessors))); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, compoundProcessor)); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - doThrow(new RuntimeException()) - .when(onFailureOnFailureProcessor) - .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - doThrow(new RuntimeException()) - .when(onFailureProcessor) - .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - doThrow(new RuntimeException()) - .when(processor) - .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - verify(processor).execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - verify(failureHandler, times(1)).accept(eq(indexRequest), any(RuntimeException.class)); - verify(completionHandler, times(1)).accept(null); - } - - public void testBulkRequestExecutionWithFailures() throws Exception { - BulkRequest bulkRequest = new BulkRequest(); - String pipelineId = "_id"; - - int numRequest = scaledRandomIntBetween(8, 64); - int numIndexRequests = 0; - for (int i = 0; i < numRequest; i++) { - DocWriteRequest request; - if (randomBoolean()) { - if (randomBoolean()) { - request = new DeleteRequest("_index", "_type", "_id"); - } else { - request = new UpdateRequest("_index", "_type", "_id"); - } - } else { - IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").setPipeline(pipelineId); - indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); - request = indexRequest; - numIndexRequests++; - } - bulkRequest.add(request); - } - - CompoundProcessor processor = mock(CompoundProcessor.class); - when(processor.getProcessors()).thenReturn(Collections.singletonList(mock(Processor.class))); - Exception error = new RuntimeException(); - doThrow(error).when(processor).execute(any()); - when(store.get(pipelineId)).thenReturn(new Pipeline(pipelineId, null, version, processor)); - - @SuppressWarnings("unchecked") - BiConsumer requestItemErrorHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(bulkRequest.requests(), requestItemErrorHandler, completionHandler); - - verify(requestItemErrorHandler, times(numIndexRequests)).accept(any(IndexRequest.class), eq(error)); - verify(completionHandler, times(1)).accept(null); - } - - public void testBulkRequestExecution() { - BulkRequest bulkRequest = new BulkRequest(); - String pipelineId = "_id"; - - int numRequest = scaledRandomIntBetween(8, 64); - for (int i = 0; i < numRequest; i++) { - IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").setPipeline(pipelineId); - indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); - bulkRequest.add(indexRequest); - } - - when(store.get(pipelineId)).thenReturn(new Pipeline(pipelineId, null, version, new CompoundProcessor())); - - @SuppressWarnings("unchecked") - BiConsumer requestItemErrorHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(bulkRequest.requests(), requestItemErrorHandler, completionHandler); - - verify(requestItemErrorHandler, never()).accept(any(), any()); - verify(completionHandler, times(1)).accept(null); - } - - public void testStats() { - final IngestStats initialStats = executionService.stats(); - assertThat(initialStats.getStatsPerPipeline().size(), equalTo(0)); - assertThat(initialStats.getTotalStats().getIngestCount(), equalTo(0L)); - assertThat(initialStats.getTotalStats().getIngestCurrent(), equalTo(0L)); - assertThat(initialStats.getTotalStats().getIngestFailedCount(), equalTo(0L)); - assertThat(initialStats.getTotalStats().getIngestTimeInMillis(), equalTo(0L)); - - when(store.get("_id1")).thenReturn(new Pipeline("_id1", null, version, new CompoundProcessor(mock(Processor.class)))); - when(store.get("_id2")).thenReturn(new Pipeline("_id2", null, null, new CompoundProcessor(mock(Processor.class)))); - - final Map configurationMap = new HashMap<>(); - configurationMap.put("_id1", new PipelineConfiguration("_id1", new BytesArray("{}"), XContentType.JSON)); - configurationMap.put("_id2", new PipelineConfiguration("_id2", new BytesArray("{}"), XContentType.JSON)); - executionService.updatePipelineStats(new IngestMetadata(configurationMap)); - - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - - final IndexRequest indexRequest = new IndexRequest("_index"); - indexRequest.setPipeline("_id1"); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - final IngestStats afterFirstRequestStats = executionService.stats(); - assertThat(afterFirstRequestStats.getStatsPerPipeline().size(), equalTo(2)); - assertThat(afterFirstRequestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L)); - assertThat(afterFirstRequestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(0L)); - assertThat(afterFirstRequestStats.getTotalStats().getIngestCount(), equalTo(1L)); - - indexRequest.setPipeline("_id2"); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - final IngestStats afterSecondRequestStats = executionService.stats(); - assertThat(afterSecondRequestStats.getStatsPerPipeline().size(), equalTo(2)); - assertThat(afterSecondRequestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L)); - assertThat(afterSecondRequestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(1L)); - assertThat(afterSecondRequestStats.getTotalStats().getIngestCount(), equalTo(2L)); - } - - // issue: https://github.com/elastic/elasticsearch/issues/18126 - public void testUpdatingStatsWhenRemovingPipelineWorks() { - Map configurationMap = new HashMap<>(); - configurationMap.put("_id1", new PipelineConfiguration("_id1", new BytesArray("{}"), XContentType.JSON)); - configurationMap.put("_id2", new PipelineConfiguration("_id2", new BytesArray("{}"), XContentType.JSON)); - executionService.updatePipelineStats(new IngestMetadata(configurationMap)); - assertThat(executionService.stats().getStatsPerPipeline(), hasKey("_id1")); - assertThat(executionService.stats().getStatsPerPipeline(), hasKey("_id2")); - - configurationMap = new HashMap<>(); - configurationMap.put("_id3", new PipelineConfiguration("_id3", new BytesArray("{}"), XContentType.JSON)); - executionService.updatePipelineStats(new IngestMetadata(configurationMap)); - assertThat(executionService.stats().getStatsPerPipeline(), not(hasKey("_id1"))); - assertThat(executionService.stats().getStatsPerPipeline(), not(hasKey("_id2"))); - } - - private IngestDocument eqIndexTypeId(final Map source) { - return argThat(new IngestDocumentMatcher("_index", "_type", "_id", source)); - } - - private IngestDocument eqIndexTypeId(final Long version, final VersionType versionType, final Map source) { - return argThat(new IngestDocumentMatcher("_index", "_type", "_id", version, versionType, source)); - } - - private class IngestDocumentMatcher extends ArgumentMatcher { - - private final IngestDocument ingestDocument; - - IngestDocumentMatcher(String index, String type, String id, Map source) { - this.ingestDocument = new IngestDocument(index, type, id, null, null, null, source); - } - - IngestDocumentMatcher(String index, String type, String id, Long version, VersionType versionType, Map source) { - this.ingestDocument = new IngestDocument(index, type, id, null, version, versionType, source); - } - - @Override - public boolean matches(Object o) { - if (o.getClass() == IngestDocument.class) { - IngestDocument otherIngestDocument = (IngestDocument) o; - //ingest metadata will not be the same (timestamp differs every time) - return Objects.equals(ingestDocument.getSourceAndMetadata(), otherIngestDocument.getSourceAndMetadata()); - } - return false; - } - } -} diff --git a/server/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java b/server/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java index 461873a3fe3..d6d7b4ffa81 100644 --- a/server/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.ingest; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; @@ -32,11 +33,13 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; public class PipelineFactoryTests extends ESTestCase { private final Integer version = randomBoolean() ? randomInt() : null; private final String versionString = version != null ? Integer.toString(version) : null; + private final ScriptService scriptService = mock(ScriptService.class); public void testCreate() throws Exception { Map processorConfig0 = new HashMap<>(); @@ -47,9 +50,8 @@ public class PipelineFactoryTests extends ESTestCase { pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Arrays.asList(Collections.singletonMap("test", processorConfig0), Collections.singletonMap("test", processorConfig1))); - Pipeline.Factory factory = new Pipeline.Factory(); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Pipeline pipeline = factory.create("_id", pipelineConfig, processorRegistry); + Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService); assertThat(pipeline.getId(), equalTo("_id")); assertThat(pipeline.getDescription(), equalTo("_description")); assertThat(pipeline.getVersion(), equalTo(version)); @@ -64,9 +66,8 @@ public class PipelineFactoryTests extends ESTestCase { Map pipelineConfig = new HashMap<>(); pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description"); pipelineConfig.put(Pipeline.VERSION_KEY, versionString); - Pipeline.Factory factory = new Pipeline.Factory(); try { - factory.create("_id", pipelineConfig, Collections.emptyMap()); + Pipeline.create("_id", pipelineConfig, Collections.emptyMap(), scriptService); fail("should fail, missing required [processors] field"); } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[processors] required property is missing")); @@ -78,8 +79,7 @@ public class PipelineFactoryTests extends ESTestCase { pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description"); pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.emptyList()); - Pipeline.Factory factory = new Pipeline.Factory(); - Pipeline pipeline = factory.create("_id", pipelineConfig, null); + Pipeline pipeline = Pipeline.create("_id", pipelineConfig, null, scriptService); assertThat(pipeline.getId(), equalTo("_id")); assertThat(pipeline.getDescription(), equalTo("_description")); assertThat(pipeline.getVersion(), equalTo(version)); @@ -93,9 +93,8 @@ public class PipelineFactoryTests extends ESTestCase { pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); pipelineConfig.put(Pipeline.ON_FAILURE_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); - Pipeline.Factory factory = new Pipeline.Factory(); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Pipeline pipeline = factory.create("_id", pipelineConfig, processorRegistry); + Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService); assertThat(pipeline.getId(), equalTo("_id")); assertThat(pipeline.getDescription(), equalTo("_description")); assertThat(pipeline.getVersion(), equalTo(version)); @@ -112,9 +111,11 @@ public class PipelineFactoryTests extends ESTestCase { pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); pipelineConfig.put(Pipeline.ON_FAILURE_KEY, Collections.emptyList()); - Pipeline.Factory factory = new Pipeline.Factory(); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create("_id", pipelineConfig, processorRegistry)); + Exception e = expectThrows( + ElasticsearchParseException.class, + () -> Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService) + ); assertThat(e.getMessage(), equalTo("pipeline [_id] cannot have an empty on_failure option defined")); } @@ -125,9 +126,11 @@ public class PipelineFactoryTests extends ESTestCase { pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description"); pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); - Pipeline.Factory factory = new Pipeline.Factory(); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create("_id", pipelineConfig, processorRegistry)); + Exception e = expectThrows( + ElasticsearchParseException.class, + () -> Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService) + ); assertThat(e.getMessage(), equalTo("[on_failure] processors list cannot be empty")); } @@ -136,14 +139,13 @@ public class PipelineFactoryTests extends ESTestCase { processorConfig.put("ignore_failure", true); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Pipeline.Factory factory = new Pipeline.Factory(); Map pipelineConfig = new HashMap<>(); pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description"); pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); - Pipeline pipeline = factory.create("_id", pipelineConfig, processorRegistry); + Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService); assertThat(pipeline.getId(), equalTo("_id")); assertThat(pipeline.getDescription(), equalTo("_description")); assertThat(pipeline.getVersion(), equalTo(version)); @@ -162,9 +164,11 @@ public class PipelineFactoryTests extends ESTestCase { pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description"); pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); - Pipeline.Factory factory = new Pipeline.Factory(); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create("_id", pipelineConfig, processorRegistry)); + Exception e = expectThrows( + ElasticsearchParseException.class, + () -> Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService) + ); assertThat(e.getMessage(), equalTo("processor [test] doesn't support one or more provided configuration parameters [unused]")); } @@ -176,9 +180,8 @@ public class PipelineFactoryTests extends ESTestCase { pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description"); pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); - Pipeline.Factory factory = new Pipeline.Factory(); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Pipeline pipeline = factory.create("_id", pipelineConfig, processorRegistry); + Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService); assertThat(pipeline.getId(), equalTo("_id")); assertThat(pipeline.getDescription(), equalTo("_description")); assertThat(pipeline.getVersion(), equalTo(version)); diff --git a/server/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java b/server/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java deleted file mode 100644 index d0ce465fc9e..00000000000 --- a/server/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java +++ /dev/null @@ -1,377 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.ingest; - -import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.Version; -import org.elasticsearch.action.ingest.DeletePipelineRequest; -import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; - -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; - -public class PipelineStoreTests extends ESTestCase { - - private PipelineStore store; - - @Before - public void init() throws Exception { - Map processorFactories = new HashMap<>(); - processorFactories.put("set", (factories, tag, config) -> { - String field = (String) config.remove("field"); - String value = (String) config.remove("value"); - return new Processor() { - @Override - public void execute(IngestDocument ingestDocument) throws Exception { - ingestDocument.setFieldValue(field, value); - } - - @Override - public String getType() { - return "set"; - } - - @Override - public String getTag() { - return tag; - } - }; - }); - processorFactories.put("remove", (factories, tag, config) -> { - String field = (String) config.remove("field"); - return new Processor() { - @Override - public void execute(IngestDocument ingestDocument) throws Exception { - ingestDocument.removeField(field); - } - - @Override - public String getType() { - return "remove"; - } - - @Override - public String getTag() { - return tag; - } - }; - }); - store = new PipelineStore(Settings.EMPTY, processorFactories); - } - - public void testUpdatePipelines() { - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); - ClusterState previousClusterState = clusterState; - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.pipelines.size(), is(0)); - - PipelineConfiguration pipeline = new PipelineConfiguration( - "_id",new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON - ); - IngestMetadata ingestMetadata = new IngestMetadata(Collections.singletonMap("_id", pipeline)); - clusterState = ClusterState.builder(clusterState) - .metaData(MetaData.builder().putCustom(IngestMetadata.TYPE, ingestMetadata)) - .build(); - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.pipelines.size(), is(1)); - assertThat(store.pipelines.get("_id").getId(), equalTo("_id")); - assertThat(store.pipelines.get("_id").getDescription(), nullValue()); - assertThat(store.pipelines.get("_id").getProcessors().size(), equalTo(1)); - assertThat(store.pipelines.get("_id").getProcessors().get(0).getType(), equalTo("set")); - } - - public void testPut() { - String id = "_id"; - Pipeline pipeline = store.get(id); - assertThat(pipeline, nullValue()); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); - - // add a new pipeline: - PutPipelineRequest putRequest = new PutPipelineRequest(id, new BytesArray("{\"processors\": []}"), XContentType.JSON); - ClusterState previousClusterState = clusterState; - clusterState = store.innerPut(putRequest, clusterState); - store.innerUpdatePipelines(previousClusterState, clusterState); - pipeline = store.get(id); - assertThat(pipeline, notNullValue()); - assertThat(pipeline.getId(), equalTo(id)); - assertThat(pipeline.getDescription(), nullValue()); - assertThat(pipeline.getProcessors().size(), equalTo(0)); - - // overwrite existing pipeline: - putRequest = - new PutPipelineRequest(id, new BytesArray("{\"processors\": [], \"description\": \"_description\"}"), XContentType.JSON); - previousClusterState = clusterState; - clusterState = store.innerPut(putRequest, clusterState); - store.innerUpdatePipelines(previousClusterState, clusterState); - pipeline = store.get(id); - assertThat(pipeline, notNullValue()); - assertThat(pipeline.getId(), equalTo(id)); - assertThat(pipeline.getDescription(), equalTo("_description")); - assertThat(pipeline.getProcessors().size(), equalTo(0)); - } - - public void testPutWithErrorResponse() { - String id = "_id"; - Pipeline pipeline = store.get(id); - assertThat(pipeline, nullValue()); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); - - PutPipelineRequest putRequest = - new PutPipelineRequest(id, new BytesArray("{\"description\": \"empty processors\"}"), XContentType.JSON); - ClusterState previousClusterState = clusterState; - clusterState = store.innerPut(putRequest, clusterState); - try { - store.innerUpdatePipelines(previousClusterState, clusterState); - fail("should fail"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), equalTo("[processors] required property is missing")); - } - pipeline = store.get(id); - assertNotNull(pipeline); - assertThat(pipeline.getId(), equalTo("_id")); - assertThat(pipeline.getDescription(), equalTo("this is a place holder pipeline, because pipeline with" + - " id [_id] could not be loaded")); - assertThat(pipeline.getProcessors().size(), equalTo(1)); - assertNull(pipeline.getProcessors().get(0).getTag()); - assertThat(pipeline.getProcessors().get(0).getType(), equalTo("unknown")); - } - - public void testDelete() { - PipelineConfiguration config = new PipelineConfiguration( - "_id",new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON - ); - IngestMetadata ingestMetadata = new IngestMetadata(Collections.singletonMap("_id", config)); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); - ClusterState previousClusterState = clusterState; - clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder() - .putCustom(IngestMetadata.TYPE, ingestMetadata)).build(); - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.get("_id"), notNullValue()); - - // Delete pipeline: - DeletePipelineRequest deleteRequest = new DeletePipelineRequest("_id"); - previousClusterState = clusterState; - clusterState = store.innerDelete(deleteRequest, clusterState); - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.get("_id"), nullValue()); - - // Delete existing pipeline: - try { - store.innerDelete(deleteRequest, clusterState); - fail("exception expected"); - } catch (ResourceNotFoundException e) { - assertThat(e.getMessage(), equalTo("pipeline [_id] is missing")); - } - } - - public void testDeleteUsingWildcard() { - HashMap pipelines = new HashMap<>(); - BytesArray definition = new BytesArray( - "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}" - ); - pipelines.put("p1", new PipelineConfiguration("p1", definition, XContentType.JSON)); - pipelines.put("p2", new PipelineConfiguration("p2", definition, XContentType.JSON)); - pipelines.put("q1", new PipelineConfiguration("q1", definition, XContentType.JSON)); - IngestMetadata ingestMetadata = new IngestMetadata(pipelines); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); - ClusterState previousClusterState = clusterState; - clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder() - .putCustom(IngestMetadata.TYPE, ingestMetadata)).build(); - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.get("p1"), notNullValue()); - assertThat(store.get("p2"), notNullValue()); - assertThat(store.get("q1"), notNullValue()); - - // Delete pipeline matching wildcard - DeletePipelineRequest deleteRequest = new DeletePipelineRequest("p*"); - previousClusterState = clusterState; - clusterState = store.innerDelete(deleteRequest, clusterState); - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.get("p1"), nullValue()); - assertThat(store.get("p2"), nullValue()); - assertThat(store.get("q1"), notNullValue()); - - // Exception if we used name which does not exist - try { - store.innerDelete(new DeletePipelineRequest("unknown"), clusterState); - fail("exception expected"); - } catch (ResourceNotFoundException e) { - assertThat(e.getMessage(), equalTo("pipeline [unknown] is missing")); - } - - // match all wildcard works on last remaining pipeline - DeletePipelineRequest matchAllDeleteRequest = new DeletePipelineRequest("*"); - previousClusterState = clusterState; - clusterState = store.innerDelete(matchAllDeleteRequest, clusterState); - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.get("p1"), nullValue()); - assertThat(store.get("p2"), nullValue()); - assertThat(store.get("q1"), nullValue()); - - // match all wildcard does not throw exception if none match - store.innerDelete(matchAllDeleteRequest, clusterState); - } - - public void testDeleteWithExistingUnmatchedPipelines() { - HashMap pipelines = new HashMap<>(); - BytesArray definition = new BytesArray( - "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}" - ); - pipelines.put("p1", new PipelineConfiguration("p1", definition, XContentType.JSON)); - IngestMetadata ingestMetadata = new IngestMetadata(pipelines); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); - ClusterState previousClusterState = clusterState; - clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder() - .putCustom(IngestMetadata.TYPE, ingestMetadata)).build(); - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.get("p1"), notNullValue()); - - DeletePipelineRequest deleteRequest = new DeletePipelineRequest("z*"); - try { - store.innerDelete(deleteRequest, clusterState); - fail("exception expected"); - } catch (ResourceNotFoundException e) { - assertThat(e.getMessage(), equalTo("pipeline [z*] is missing")); - } - } - - public void testGetPipelines() { - Map configs = new HashMap<>(); - configs.put("_id1", new PipelineConfiguration( - "_id1", new BytesArray("{\"processors\": []}"), XContentType.JSON - )); - configs.put("_id2", new PipelineConfiguration( - "_id2", new BytesArray("{\"processors\": []}"), XContentType.JSON - )); - - assertThat(store.innerGetPipelines(null, "_id1").isEmpty(), is(true)); - - IngestMetadata ingestMetadata = new IngestMetadata(configs); - List pipelines = store.innerGetPipelines(ingestMetadata, "_id1"); - assertThat(pipelines.size(), equalTo(1)); - assertThat(pipelines.get(0).getId(), equalTo("_id1")); - - pipelines = store.innerGetPipelines(ingestMetadata, "_id1", "_id2"); - assertThat(pipelines.size(), equalTo(2)); - assertThat(pipelines.get(0).getId(), equalTo("_id1")); - assertThat(pipelines.get(1).getId(), equalTo("_id2")); - - pipelines = store.innerGetPipelines(ingestMetadata, "_id*"); - pipelines.sort((o1, o2) -> o1.getId().compareTo(o2.getId())); - assertThat(pipelines.size(), equalTo(2)); - assertThat(pipelines.get(0).getId(), equalTo("_id1")); - assertThat(pipelines.get(1).getId(), equalTo("_id2")); - - // get all variants: (no IDs or '*') - pipelines = store.innerGetPipelines(ingestMetadata); - pipelines.sort((o1, o2) -> o1.getId().compareTo(o2.getId())); - assertThat(pipelines.size(), equalTo(2)); - assertThat(pipelines.get(0).getId(), equalTo("_id1")); - assertThat(pipelines.get(1).getId(), equalTo("_id2")); - - pipelines = store.innerGetPipelines(ingestMetadata, "*"); - pipelines.sort((o1, o2) -> o1.getId().compareTo(o2.getId())); - assertThat(pipelines.size(), equalTo(2)); - assertThat(pipelines.get(0).getId(), equalTo("_id1")); - assertThat(pipelines.get(1).getId(), equalTo("_id2")); - } - - public void testCrud() throws Exception { - String id = "_id"; - Pipeline pipeline = store.get(id); - assertThat(pipeline, nullValue()); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty - - PutPipelineRequest putRequest = new PutPipelineRequest(id, - new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON); - ClusterState previousClusterState = clusterState; - clusterState = store.innerPut(putRequest, clusterState); - store.innerUpdatePipelines(previousClusterState, clusterState); - pipeline = store.get(id); - assertThat(pipeline, notNullValue()); - assertThat(pipeline.getId(), equalTo(id)); - assertThat(pipeline.getDescription(), nullValue()); - assertThat(pipeline.getProcessors().size(), equalTo(1)); - assertThat(pipeline.getProcessors().get(0).getType(), equalTo("set")); - - DeletePipelineRequest deleteRequest = new DeletePipelineRequest(id); - previousClusterState = clusterState; - clusterState = store.innerDelete(deleteRequest, clusterState); - store.innerUpdatePipelines(previousClusterState, clusterState); - pipeline = store.get(id); - assertThat(pipeline, nullValue()); - } - - public void testValidate() throws Exception { - PutPipelineRequest putRequest = new PutPipelineRequest("_id", new BytesArray( - "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\", \"tag\": \"tag1\"}}," + - "{\"remove\" : {\"field\": \"_field\", \"tag\": \"tag2\"}}]}"), - XContentType.JSON); - - DiscoveryNode node1 = new DiscoveryNode("_node_id1", buildNewFakeTransportAddress(), - emptyMap(), emptySet(), Version.CURRENT); - DiscoveryNode node2 = new DiscoveryNode("_node_id2", buildNewFakeTransportAddress(), - emptyMap(), emptySet(), Version.CURRENT); - Map ingestInfos = new HashMap<>(); - ingestInfos.put(node1, new IngestInfo(Arrays.asList(new ProcessorInfo("set"), new ProcessorInfo("remove")))); - ingestInfos.put(node2, new IngestInfo(Arrays.asList(new ProcessorInfo("set")))); - - ElasticsearchParseException e = - expectThrows(ElasticsearchParseException.class, () -> store.validatePipeline(ingestInfos, putRequest)); - assertEquals("Processor type [remove] is not installed on node [" + node2 + "]", e.getMessage()); - assertEquals("remove", e.getMetadata("es.processor_type").get(0)); - assertEquals("tag2", e.getMetadata("es.processor_tag").get(0)); - - ingestInfos.put(node2, new IngestInfo(Arrays.asList(new ProcessorInfo("set"), new ProcessorInfo("remove")))); - store.validatePipeline(ingestInfos, putRequest); - } - - public void testValidateNoIngestInfo() throws Exception { - PutPipelineRequest putRequest = new PutPipelineRequest("_id", new BytesArray( - "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON); - Exception e = expectThrows(IllegalStateException.class, () -> store.validatePipeline(Collections.emptyMap(), putRequest)); - assertEquals("Ingest info is empty", e.getMessage()); - - DiscoveryNode discoveryNode = new DiscoveryNode("_node_id", buildNewFakeTransportAddress(), - emptyMap(), emptySet(), Version.CURRENT); - IngestInfo ingestInfo = new IngestInfo(Collections.singletonList(new ProcessorInfo("set"))); - store.validatePipeline(Collections.singletonMap(discoveryNode, ingestInfo), putRequest); - } -} diff --git a/server/src/test/java/org/elasticsearch/node/NodeTests.java b/server/src/test/java/org/elasticsearch/node/NodeTests.java index dfb1de9e556..d3e1c9641f9 100644 --- a/server/src/test/java/org/elasticsearch/node/NodeTests.java +++ b/server/src/test/java/org/elasticsearch/node/NodeTests.java @@ -156,6 +156,25 @@ public class NodeTests extends ESTestCase { } } + public void testServerNameNodeAttribute() throws IOException { + String attr = "valid-hostname"; + Settings.Builder settings = baseSettings().put(Node.NODE_ATTRIBUTES.getKey() + "server_name", attr); + int i = 0; + try (Node node = new MockNode(settings.build(), basePlugins())) { + final Settings nodeSettings = randomBoolean() ? node.settings() : node.getEnvironment().settings(); + assertEquals(attr, Node.NODE_ATTRIBUTES.getAsMap(nodeSettings).get("server_name")); + } + + // non-LDH hostname not allowed + attr = "invalid_hostname"; + settings = baseSettings().put(Node.NODE_ATTRIBUTES.getKey() + "server_name", attr); + try (Node node = new MockNode(settings.build(), basePlugins())) { + fail("should not allow a server_name attribute with an underscore"); + } catch (IllegalArgumentException e) { + assertEquals("invalid node.attr.server_name [invalid_hostname]", e.getMessage()); + } + } + private static Settings.Builder baseSettings() { final Path tempDir = createTempDir(); return Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java b/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java index c53d798f7b4..d413c0f0be2 100644 --- a/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.plugins; import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.node.MockNode; @@ -32,6 +33,7 @@ import java.util.Map; import java.util.function.Function; import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasToString; public class IndexStorePluginTests extends ESTestCase { @@ -54,7 +56,30 @@ public class IndexStorePluginTests extends ESTestCase { } - public void testDuplicateIndexStoreProviders() { + public static class ConflictingStorePlugin extends Plugin implements IndexStorePlugin { + + public static final String TYPE; + + static { + TYPE = randomFrom(Arrays.asList(IndexModule.Type.values())).getSettingsKey(); + } + + @Override + public Map> getIndexStoreFactories() { + return Collections.singletonMap(TYPE, IndexStore::new); + } + + } + + public void testIndexStoreFactoryConflictsWithBuiltInIndexStoreType() { + final Settings settings = Settings.builder().put("path.home", createTempDir()).build(); + final IllegalStateException e = expectThrows( + IllegalStateException.class, () -> new MockNode(settings, Collections.singletonList(ConflictingStorePlugin.class))); + assertThat(e, hasToString(containsString( + "registered index store type [" + ConflictingStorePlugin.TYPE + "] conflicts with a built-in type"))); + } + + public void testDuplicateIndexStoreFactories() { final Settings settings = Settings.builder().put("path.home", createTempDir()).build(); final IllegalStateException e = expectThrows( IllegalStateException.class, () -> new MockNode(settings, Arrays.asList(BarStorePlugin.class, FooStorePlugin.class))); diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index 5f1d1f612d7..f6649853eda 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -590,10 +590,10 @@ public class PluginsServiceTests extends ESTestCase { } public void testIncompatibleElasticsearchVersion() throws Exception { - PluginInfo info = new PluginInfo("my_plugin", "desc", "1.0", Version.V_5_0_0, + PluginInfo info = new PluginInfo("my_plugin", "desc", "1.0", Version.V_6_0_0, "1.8", "FakePlugin", Collections.emptyList(), false); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginsService.verifyCompatibility(info)); - assertThat(e.getMessage(), containsString("was built for Elasticsearch version 5.0.0")); + assertThat(e.getMessage(), containsString("was built for Elasticsearch version 6.0.0")); } public void testIncompatibleJavaVersion() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java index fa7de2d6291..ba3fa84a196 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.repositories.blobstore; import org.apache.lucene.store.Directory; import org.apache.lucene.util.TestUtil; import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingHelper; import org.elasticsearch.common.UUIDs; @@ -49,7 +50,6 @@ import java.nio.file.Path; import java.util.Arrays; import java.util.List; -import static org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE; import static org.hamcrest.Matchers.containsString; /** @@ -99,7 +99,8 @@ public class BlobStoreRepositoryRestoreTests extends IndexShardTestCase { } // build a new shard using the same store directory as the closed shard - ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(shard.routingEntry(), EXISTING_STORE_INSTANCE); + ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(shard.routingEntry(), + RecoverySource.ExistingStoreRecoverySource.INSTANCE); shard = newShard( shardRouting, shard.shardPath(), diff --git a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java index 348b85a8ba4..cbf55428971 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java @@ -81,6 +81,8 @@ public class RestControllerTests extends ESTestCase { circuitBreakerService = new HierarchyCircuitBreakerService( Settings.builder() .put(HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), BREAKER_LIMIT) + // We want to have reproducible results in this test, hence we disable real memory usage accounting + .put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), false) .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); usageService = new UsageService(settings); diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsActionTests.java new file mode 100644 index 00000000000..29b19739e75 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsActionTests.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.cluster; + +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; +import java.util.Set; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + + +public class RestClusterGetSettingsActionTests extends ESTestCase { + + public void testFilterPersistentSettings() { + runTestFilterSettingsTest(MetaData.Builder::persistentSettings, ClusterGetSettingsResponse::getPersistentSettings); + } + + public void testFilterTransientSettings() { + runTestFilterSettingsTest(MetaData.Builder::transientSettings, ClusterGetSettingsResponse::getTransientSettings); + } + + private void runTestFilterSettingsTest( + final BiConsumer md, final Function s) { + final MetaData.Builder mdBuilder = new MetaData.Builder(); + final Settings settings = Settings.builder().put("foo.filtered", "bar").put("foo.non_filtered", "baz").build(); + md.accept(mdBuilder, settings); + final ClusterState.Builder builder = new ClusterState.Builder(ClusterState.EMPTY_STATE).metaData(mdBuilder); + final SettingsFilter filter = new SettingsFilter(Settings.EMPTY, Collections.singleton("foo.filtered")); + final Setting.Property[] properties = {Setting.Property.Dynamic, Setting.Property.Filtered, Setting.Property.NodeScope}; + final Set> settingsSet = Stream.concat( + ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.stream(), + Stream.concat( + Stream.of(Setting.simpleString("foo.filtered", properties)), + Stream.of(Setting.simpleString("foo.non_filtered", properties)))) + .collect(Collectors.toSet()); + final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, settingsSet); + final ClusterGetSettingsResponse response = + RestClusterGetSettingsAction.response(builder.build(), randomBoolean(), filter, clusterSettings, Settings.EMPTY); + assertFalse(s.apply(response).hasValue("foo.filtered")); + assertTrue(s.apply(response).hasValue("foo.non_filtered")); + } + +} diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestIndicesActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestIndicesActionTests.java index cd592c9ed1e..4535bf7a91b 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestIndicesActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestIndicesActionTests.java @@ -30,8 +30,8 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource; -import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.Table; @@ -143,7 +143,7 @@ public class RestIndicesActionTests extends ESTestCase { boolean primary = (i == primaryIdx); Path path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve(String.valueOf(i)); ShardRouting shardRouting = ShardRouting.newUnassigned(shardId, primary, - primary ? StoreRecoverySource.EMPTY_STORE_INSTANCE : PeerRecoverySource.INSTANCE, + primary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null) ); shardRouting = shardRouting.initialize("node-0", null, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); diff --git a/server/src/test/java/org/elasticsearch/search/SearchHitsTests.java b/server/src/test/java/org/elasticsearch/search/SearchHitsTests.java index 075d5bc2aa3..a42804692fb 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchHitsTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchHitsTests.java @@ -112,8 +112,8 @@ public class SearchHitsTests extends ESTestCase { searchHits.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); assertEquals("{\"hits\":{\"total\":1000,\"max_score\":1.5," + - "\"hits\":[{\"_type\":\"type\",\"_id\":\"id1\",\"_score\":\"-Infinity\"},"+ - "{\"_type\":\"type\",\"_id\":\"id2\",\"_score\":\"-Infinity\"}]}}", Strings.toString(builder)); + "\"hits\":[{\"_type\":\"type\",\"_id\":\"id1\",\"_score\":null},"+ + "{\"_type\":\"type\",\"_id\":\"id2\",\"_score\":null}]}}", Strings.toString(builder)); } } diff --git a/server/src/test/java/org/elasticsearch/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/search/SearchRequestTests.java index 95a9ae9d707..36d2ef2c4db 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchRequestTests.java @@ -28,7 +28,9 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.ArrayUtils; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.rescore.QueryRescorerBuilder; import java.io.IOException; import java.util.ArrayList; @@ -123,6 +125,17 @@ public class SearchRequestTests extends AbstractSearchTestCase { assertEquals(1, validationErrors.validationErrors().size()); assertEquals("[size] cannot be [0] in a scroll context", validationErrors.validationErrors().get(0)); } + { + // Rescore is not allowed on scroll requests + SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder()); + searchRequest.source().addRescorer(new QueryRescorerBuilder(QueryBuilders.matchAllQuery())); + searchRequest.requestCache(false); + searchRequest.scroll(new TimeValue(1000)); + ActionRequestValidationException validationErrors = searchRequest.validate(); + assertNotNull(validationErrors); + assertEquals(1, validationErrors.validationErrors().size()); + assertEquals("using [rescore] is not allowed in a scroll context", validationErrors.validationErrors().get(0)); + } } public void testEqualsAndHashcode() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationCollectorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationCollectorTests.java index 9919e9dcdbb..6a77a89fc58 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationCollectorTests.java @@ -62,7 +62,7 @@ public class AggregationCollectorTests extends ESSingleNodeTestCase { final AggregatorFactories factories = AggregatorFactories.parseAggregators(aggParser).build(context, null); final Aggregator[] aggregators = factories.createTopLevelAggregators(); assertEquals(1, aggregators.length); - return aggregators[0].needsScores(); + return aggregators[0].scoreMode().needsScores(); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java index fcafce3936e..626a2264e1f 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java @@ -59,17 +59,17 @@ import org.elasticsearch.search.aggregations.metrics.InternalMinTests; import org.elasticsearch.search.aggregations.metrics.InternalStatsBucketTests; import org.elasticsearch.search.aggregations.metrics.InternalStatsTests; import org.elasticsearch.search.aggregations.metrics.InternalSumTests; -import org.elasticsearch.search.aggregations.metrics.avg.InternalAvgTests; -import org.elasticsearch.search.aggregations.metrics.cardinality.InternalCardinalityTests; -import org.elasticsearch.search.aggregations.metrics.geobounds.InternalGeoBoundsTests; -import org.elasticsearch.search.aggregations.metrics.geocentroid.InternalGeoCentroidTests; -import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.InternalHDRPercentilesRanksTests; -import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.InternalHDRPercentilesTests; -import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.InternalTDigestPercentilesRanksTests; -import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.InternalTDigestPercentilesTests; -import org.elasticsearch.search.aggregations.metrics.scripted.InternalScriptedMetricTests; -import org.elasticsearch.search.aggregations.metrics.tophits.InternalTopHitsTests; -import org.elasticsearch.search.aggregations.metrics.valuecount.InternalValueCountTests; +import org.elasticsearch.search.aggregations.metrics.InternalAvgTests; +import org.elasticsearch.search.aggregations.metrics.InternalCardinalityTests; +import org.elasticsearch.search.aggregations.metrics.InternalGeoBoundsTests; +import org.elasticsearch.search.aggregations.metrics.InternalGeoCentroidTests; +import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentilesRanksTests; +import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentilesTests; +import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentilesRanksTests; +import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentilesTests; +import org.elasticsearch.search.aggregations.metrics.InternalScriptedMetricTests; +import org.elasticsearch.search.aggregations.metrics.InternalTopHitsTests; +import org.elasticsearch.search.aggregations.metrics.InternalValueCountTests; import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValueTests; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValueTests; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile.InternalPercentilesBucketTests; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/EquivalenceIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/EquivalenceIT.java index 2d9f462d862..28e77e0b9db 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/EquivalenceIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/EquivalenceIT.java @@ -39,7 +39,7 @@ import org.elasticsearch.search.aggregations.bucket.range.Range.Bucket; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.test.ESIntegTestCase; import org.junit.After; import org.junit.Before; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/MetaDataIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/MetaDataIT.java index dfdaa7d9fb2..365b6ddc218 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/MetaDataIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/MetaDataIT.java @@ -22,7 +22,7 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue; import org.elasticsearch.test.ESIntegTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/MissingValueIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/MissingValueIT.java index 5b0b0378e46..2fdacd63d3d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/MissingValueIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/MissingValueIT.java @@ -25,11 +25,11 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInter import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; -import org.elasticsearch.search.aggregations.metrics.cardinality.Cardinality; -import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBounds; -import org.elasticsearch.search.aggregations.metrics.geocentroid.GeoCentroid; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; -import org.elasticsearch.search.aggregations.metrics.stats.Stats; +import org.elasticsearch.search.aggregations.metrics.Cardinality; +import org.elasticsearch.search.aggregations.metrics.GeoBounds; +import org.elasticsearch.search.aggregations.metrics.GeoCentroid; +import org.elasticsearch.search.aggregations.metrics.Percentiles; +import org.elasticsearch.search.aggregations.metrics.Stats; import org.elasticsearch.test.ESIntegTestCase; import static org.elasticsearch.search.aggregations.AggregationBuilders.cardinality; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketCollectorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketCollectorTests.java new file mode 100644 index 00000000000..bc8070d7ae4 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketCollectorTests.java @@ -0,0 +1,241 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations; + +import org.apache.lucene.document.Document; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.CollectionTerminatedException; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Scorable; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.store.Directory; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +public class MultiBucketCollectorTests extends ESTestCase { + private static class ScoreAndDoc extends Scorable { + float score; + int doc = -1; + + @Override + public int docID() { + return doc; + } + + @Override + public float score() { + return score; + } + } + + private static class TerminateAfterBucketCollector extends BucketCollector { + + private int count = 0; + private final int terminateAfter; + private final BucketCollector in; + + TerminateAfterBucketCollector(BucketCollector in, int terminateAfter) { + this.in = in; + this.terminateAfter = terminateAfter; + } + + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext context) throws IOException { + if (count >= terminateAfter) { + throw new CollectionTerminatedException(); + } + final LeafBucketCollector leafCollector = in.getLeafCollector(context); + return new LeafBucketCollectorBase(leafCollector, null) { + @Override + public void collect(int doc, long bucket) throws IOException { + if (count >= terminateAfter) { + throw new CollectionTerminatedException(); + } + super.collect(doc, bucket); + count++; + } + }; + } + + @Override + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE; + } + + @Override + public void preCollection() {} + + @Override + public void postCollection() {} + } + + private static class TotalHitCountBucketCollector extends BucketCollector { + + private int count = 0; + + TotalHitCountBucketCollector() { + } + + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext context) { + return new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + count++; + } + }; + } + + @Override + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE_NO_SCORES; + } + + @Override + public void preCollection() {} + + @Override + public void postCollection() {} + + int getTotalHits() { + return count; + } + } + + private static class SetScorerBucketCollector extends BucketCollector { + private final BucketCollector in; + private final AtomicBoolean setScorerCalled; + + SetScorerBucketCollector(BucketCollector in, AtomicBoolean setScorerCalled) { + this.in = in; + this.setScorerCalled = setScorerCalled; + } + + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext context) throws IOException { + final LeafBucketCollector leafCollector = in.getLeafCollector(context); + return new LeafBucketCollectorBase(leafCollector, null) { + @Override + public void setScorer(Scorable scorer) throws IOException { + super.setScorer(scorer); + setScorerCalled.set(true); + } + }; + } + + @Override + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE; + } + + @Override + public void preCollection() {} + + @Override + public void postCollection() {} + } + + public void testCollectionTerminatedExceptionHandling() throws IOException { + final int iters = atLeast(3); + for (int iter = 0; iter < iters; ++iter) { + Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir); + final int numDocs = randomIntBetween(100, 1000); + final Document doc = new Document(); + for (int i = 0; i < numDocs; ++i) { + w.addDocument(doc); + } + final IndexReader reader = w.getReader(); + w.close(); + final IndexSearcher searcher = newSearcher(reader); + Map expectedCounts = new HashMap<>(); + List collectors = new ArrayList<>(); + final int numCollectors = randomIntBetween(1, 5); + for (int i = 0; i < numCollectors; ++i) { + final int terminateAfter = random().nextInt(numDocs + 10); + final int expectedCount = terminateAfter > numDocs ? numDocs : terminateAfter; + TotalHitCountBucketCollector collector = new TotalHitCountBucketCollector(); + expectedCounts.put(collector, expectedCount); + collectors.add(new TerminateAfterBucketCollector(collector, terminateAfter)); + } + searcher.search(new MatchAllDocsQuery(), MultiBucketCollector.wrap(collectors)); + for (Map.Entry expectedCount : expectedCounts.entrySet()) { + assertEquals(expectedCount.getValue().intValue(), expectedCount.getKey().getTotalHits()); + } + reader.close(); + dir.close(); + } + } + + public void testSetScorerAfterCollectionTerminated() throws IOException { + BucketCollector collector1 = new TotalHitCountBucketCollector(); + BucketCollector collector2 = new TotalHitCountBucketCollector(); + + AtomicBoolean setScorerCalled1 = new AtomicBoolean(); + collector1 = new SetScorerBucketCollector(collector1, setScorerCalled1); + + AtomicBoolean setScorerCalled2 = new AtomicBoolean(); + collector2 = new SetScorerBucketCollector(collector2, setScorerCalled2); + + collector1 = new TerminateAfterBucketCollector(collector1, 1); + collector2 = new TerminateAfterBucketCollector(collector2, 2); + + Scorable scorer = new ScoreAndDoc(); + + List collectors = Arrays.asList(collector1, collector2); + Collections.shuffle(collectors, random()); + BucketCollector collector = MultiBucketCollector.wrap(collectors); + + LeafBucketCollector leafCollector = collector.getLeafCollector(null); + leafCollector.setScorer(scorer); + assertTrue(setScorerCalled1.get()); + assertTrue(setScorerCalled2.get()); + + leafCollector.collect(0); + leafCollector.collect(1); + + setScorerCalled1.set(false); + setScorerCalled2.set(false); + leafCollector.setScorer(scorer); + assertFalse(setScorerCalled1.get()); + assertTrue(setScorerCalled2.get()); + + expectThrows(CollectionTerminatedException.class, () -> { + leafCollector.collect(1); + }); + + setScorerCalled1.set(false); + setScorerCalled2.set(false); + leafCollector.setScorer(scorer); + assertFalse(setScorerCalled1.get()); + assertFalse(setScorerCalled2.get()); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/AdjacencyMatrixIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/AdjacencyMatrixIT.java index 81dce8002e8..b86fd279b31 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/AdjacencyMatrixIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/AdjacencyMatrixIT.java @@ -32,7 +32,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.adjacency.AdjacencyMatrix; import org.elasticsearch.search.aggregations.bucket.adjacency.AdjacencyMatrix.Bucket; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.metrics.avg.Avg; +import org.elasticsearch.search.aggregations.metrics.Avg; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollectorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollectorTests.java index 8d60dde5834..2f99ebbf323 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollectorTests.java @@ -30,6 +30,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; @@ -70,8 +71,8 @@ public class BestBucketsDeferringCollectorTests extends AggregatorTestCase { when(searchContext.query()).thenReturn(rewrittenQuery); BestBucketsDeferringCollector collector = new BestBucketsDeferringCollector(searchContext, false) { @Override - public boolean needsScores() { - return true; + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE; } }; Set deferredCollectedDocIds = new HashSet<>(); @@ -126,8 +127,8 @@ public class BestBucketsDeferringCollectorTests extends AggregatorTestCase { } @Override - public boolean needsScores() { - return false; + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE_NO_SCORES; } }; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BucketUtilsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BucketUtilsTests.java index aa9068b651e..35f3175f7cf 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BucketUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BucketUtilsTests.java @@ -27,18 +27,14 @@ public class BucketUtilsTests extends ESTestCase { public void testBadInput() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> BucketUtils.suggestShardSideQueueSize(0, 10)); + () -> BucketUtils.suggestShardSideQueueSize(0, randomBoolean())); assertEquals(e.getMessage(), "size must be positive, got 0"); - - e = expectThrows(IllegalArgumentException.class, - () -> BucketUtils.suggestShardSideQueueSize(10, 0)); - assertEquals(e.getMessage(), "number of shards must be positive, got 0"); } public void testOptimizesSingleShard() { for (int iter = 0; iter < 10; ++iter) { final int size = randomIntBetween(1, Integer.MAX_VALUE); - assertEquals(size, BucketUtils.suggestShardSideQueueSize( size, 1)); + assertEquals(size, BucketUtils.suggestShardSideQueueSize( size, true)); } } @@ -46,7 +42,7 @@ public class BucketUtilsTests extends ESTestCase { for (int iter = 0; iter < 10; ++iter) { final int size = Integer.MAX_VALUE - randomInt(10); final int numberOfShards = randomIntBetween(1, 10); - final int shardSize = BucketUtils.suggestShardSideQueueSize( size, numberOfShards); + final int shardSize = BucketUtils.suggestShardSideQueueSize( size, numberOfShards == 1); assertThat(shardSize, greaterThanOrEqualTo(shardSize)); } } @@ -55,7 +51,7 @@ public class BucketUtilsTests extends ESTestCase { for (int iter = 0; iter < 10; ++iter) { final int size = randomIntBetween(1, Integer.MAX_VALUE); final int numberOfShards = randomIntBetween(1, 10); - final int shardSize = BucketUtils.suggestShardSideQueueSize( size, numberOfShards); + final int shardSize = BucketUtils.suggestShardSideQueueSize( size, numberOfShards == 1); assertThat(shardSize, greaterThanOrEqualTo(size)); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index c40e3b73c66..58d0ca09ff2 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -40,8 +40,8 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInter import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBounds; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.elasticsearch.search.aggregations.metrics.avg.Avg; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.Avg; +import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; import org.joda.time.DateTime; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java index 98f73b34b56..c076fa827d0 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java @@ -31,7 +31,7 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.range.DateRangeAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.range.Range; import org.elasticsearch.search.aggregations.bucket.range.Range.Bucket; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; import org.joda.time.DateTime; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java index a8bc97682f0..ac601022c78 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java @@ -29,7 +29,7 @@ import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregator; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.max.Max; +import org.elasticsearch.search.aggregations.metrics.Max; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java index 2876fbbaa25..aad828f95db 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java @@ -35,11 +35,11 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; -import org.elasticsearch.search.aggregations.metrics.avg.Avg; -import org.elasticsearch.search.aggregations.metrics.max.Max; -import org.elasticsearch.search.aggregations.metrics.stats.Stats; -import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.Avg; +import org.elasticsearch.search.aggregations.metrics.Max; +import org.elasticsearch.search.aggregations.metrics.Stats; +import org.elasticsearch.search.aggregations.metrics.ExtendedStats; +import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.test.ESIntegTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java index 91c098ff85a..bcc14f09ed8 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java @@ -27,7 +27,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.metrics.avg.Avg; +import org.elasticsearch.search.aggregations.metrics.Avg; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java index 2c9ca8fb447..860a2d662b8 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java @@ -29,7 +29,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filters; import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregator.KeyedFilter; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.metrics.avg.Avg; +import org.elasticsearch.search.aggregations.metrics.Avg; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java index c50fb89f334..ef0651a21c7 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; @@ -35,13 +34,11 @@ import org.elasticsearch.search.aggregations.bucket.range.Range; import org.elasticsearch.search.aggregations.bucket.range.Range.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; import org.hamcrest.Matchers; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -63,12 +60,11 @@ import static org.hamcrest.core.IsNull.nullValue; public class GeoDistanceIT extends ESIntegTestCase { @Override - protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class); // uses index.version.created + protected boolean forbidPrivateIndexSettings() { + return false; } - private Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, - Version.CURRENT); + private Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); private IndexRequestBuilder indexCity(String idx, String name, String... latLons) throws Exception { XContentBuilder source = jsonBuilder().startObject().field("city", name); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java index fc080dd0f04..2b63a2ca633 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.bucket; import com.carrotsearch.hppc.ObjectIntHashMap; import com.carrotsearch.hppc.ObjectIntMap; import com.carrotsearch.hppc.cursors.ObjectIntCursor; - import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -30,19 +29,16 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.GeoBoundingBoxQueryBuilder; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGrid; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGrid.Bucket; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Random; @@ -61,11 +57,11 @@ import static org.hamcrest.Matchers.equalTo; public class GeoHashGridIT extends ESIntegTestCase { @Override - protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class); // uses index.version.created + protected boolean forbidPrivateIndexSettings() { + return false; } - private Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + private Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); static ObjectIntMap expectedDocCountsForGeoHash = null; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalAggregatorTests.java index fc7a24cf798..232c9f07510 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalAggregatorTests.java @@ -33,8 +33,8 @@ import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregator; import org.elasticsearch.search.aggregations.bucket.global.InternalGlobal; -import org.elasticsearch.search.aggregations.metrics.min.InternalMin; -import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.InternalMin; +import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; import java.io.IOException; import java.util.function.BiConsumer; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalIT.java index 4878398c98c..429b8c71f72 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalIT.java @@ -24,7 +24,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.global.Global; -import org.elasticsearch.search.aggregations.metrics.stats.Stats; +import org.elasticsearch.search.aggregations.metrics.Stats; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java index d7bd069f2ba..38f373f131a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java @@ -34,10 +34,10 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.elasticsearch.search.aggregations.metrics.avg.Avg; -import org.elasticsearch.search.aggregations.metrics.max.Max; -import org.elasticsearch.search.aggregations.metrics.stats.Stats; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.Avg; +import org.elasticsearch.search.aggregations.metrics.Max; +import org.elasticsearch.search.aggregations.metrics.Stats; +import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java index e7e64027274..1e67b59ee32 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java @@ -34,11 +34,11 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; -import org.elasticsearch.search.aggregations.metrics.avg.Avg; -import org.elasticsearch.search.aggregations.metrics.max.Max; -import org.elasticsearch.search.aggregations.metrics.stats.Stats; -import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.Avg; +import org.elasticsearch.search.aggregations.metrics.Max; +import org.elasticsearch.search.aggregations.metrics.Stats; +import org.elasticsearch.search.aggregations.metrics.ExtendedStats; +import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.test.ESIntegTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/MissingIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/MissingIT.java index ac4d8ac315f..d51a4a59ff3 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/MissingIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/MissingIT.java @@ -23,7 +23,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.missing.Missing; -import org.elasticsearch.search.aggregations.metrics.avg.Avg; +import org.elasticsearch.search.aggregations.metrics.Avg; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java index 5b8c3b878c1..22b6e252522 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java @@ -26,10 +26,10 @@ import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.avg.Avg; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; -import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStatsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.Avg; +import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ExtendedStats; +import org.elasticsearch.search.aggregations.metrics.ExtendedStatsAggregationBuilder; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java index d5f93f0daa7..10fa2231807 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java @@ -34,9 +34,9 @@ import org.elasticsearch.search.aggregations.bucket.terms.LongTerms; import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; -import org.elasticsearch.search.aggregations.metrics.max.Max; -import org.elasticsearch.search.aggregations.metrics.stats.Stats; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.Max; +import org.elasticsearch.search.aggregations.metrics.Stats; +import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java index 99aeac167e0..894834882f9 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java @@ -33,7 +33,7 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.range.Range; import org.elasticsearch.search.aggregations.bucket.range.Range.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java index 4a69f9d5379..6a3a9731612 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java @@ -28,7 +28,7 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.nested.Nested; import org.elasticsearch.search.aggregations.bucket.nested.ReverseNested; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCount; +import org.elasticsearch.search.aggregations.metrics.ValueCount; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java index 81034a03550..c135f284dd2 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java @@ -28,7 +28,7 @@ import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregator; import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; -import org.elasticsearch.search.aggregations.metrics.max.Max; +import org.elasticsearch.search.aggregations.metrics.Max; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java index b0263cb2dbd..52f6e4227e7 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java @@ -53,10 +53,10 @@ import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.max.InternalMax; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.tophits.TopHits; -import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.InternalMax; +import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.TopHits; +import org.elasticsearch.search.aggregations.metrics.TopHitsAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.sort.SortOrder; import org.joda.time.DateTimeZone; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java index 7cf29e3aa9c..1194e6c69d8 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java @@ -38,7 +38,7 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.MultiBucketConsumerService; -import org.elasticsearch.search.aggregations.metrics.stats.Stats; +import org.elasticsearch.search.aggregations.metrics.Stats; import org.hamcrest.Matchers; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java index b7c5bf03ac5..dd3425c20f4 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java @@ -80,7 +80,7 @@ public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregati } InternalAggregations subAggregations = new InternalAggregations(Collections.emptyList()); BucketInfo bucketInfo = new BucketInfo(roundingInfos, randomIntBetween(0, roundingInfos.length - 1), subAggregations); - return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators, metaData); + return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators, metaData, 1); } /* @@ -94,11 +94,11 @@ public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregati // an innerInterval that is quite large, such that targetBuckets * roundings[i].getMaximumInnerInterval() // will be larger than the estimate. roundings[0] = new RoundingInfo(createRounding(DateTimeUnit.SECOND_OF_MINUTE, timeZone), - 1000L, 1000); + 1000L, "s", 1000); roundings[1] = new RoundingInfo(createRounding(DateTimeUnit.MINUTES_OF_HOUR, timeZone), - 60 * 1000L, 1, 5, 10, 30); + 60 * 1000L, "m", 1, 5, 10, 30); roundings[2] = new RoundingInfo(createRounding(DateTimeUnit.HOUR_OF_DAY, timeZone), - 60 * 60 * 1000L, 1, 3, 12); + 60 * 60 * 1000L, "h", 1, 3, 12); OffsetDateTime timestamp = Instant.parse("2018-01-01T00:00:01.000Z").atOffset(ZoneOffset.UTC); // We want to pass a roundingIdx of zero, because in order to reproduce this bug, we need the function @@ -198,6 +198,14 @@ public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregati (key, oldValue) -> (oldValue == null ? 0 : oldValue) + bucket.getDocCount()); } assertEquals(expectedCounts, actualCounts); + + DateHistogramInterval expectedInterval; + if (reduced.getBuckets().size() == 1) { + expectedInterval = reduced.getInterval(); + } else { + expectedInterval = new DateHistogramInterval(innerIntervalToUse+roundingInfo.unitAbbreviation); + } + assertThat(reduced.getInterval(), equalTo(expectedInterval)); } private int getBucketCount(long lowest, long highest, RoundingInfo roundingInfo, long intervalInMillis) { @@ -252,6 +260,6 @@ public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregati default: throw new AssertionError("Illegal randomisation branch"); } - return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators, metaData); + return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators, metaData, 1); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java index e1206cb8d15..0abfe871e6e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java @@ -56,13 +56,13 @@ import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuil import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.max.InternalMax; -import org.elasticsearch.search.aggregations.metrics.max.Max; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.min.Min; -import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.sum.InternalSum; -import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.InternalMax; +import org.elasticsearch.search.aggregations.metrics.Max; +import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.Min; +import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.InternalSum; +import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.test.VersionUtils; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java index fd831e5076c..e0601cbe2f5 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java @@ -35,8 +35,8 @@ import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.metrics.max.InternalMax; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.InternalMax; +import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollectorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollectorTests.java index 86e937a356b..3a740e868ee 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollectorTests.java @@ -28,6 +28,7 @@ import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; @@ -105,8 +106,8 @@ public class BestDocsDeferringCollectorTests extends AggregatorTestCase { } @Override - public boolean needsScores() { - return false; + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE_NO_SCORES; } }; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregatorTests.java index 2b217f4ff6e..e446dfb3d2b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregatorTests.java @@ -37,8 +37,8 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.TextFieldMapper.TextFieldType; import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.metrics.min.Min; -import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.Min; +import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java index 160e51a67b2..c92681d99a9 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java @@ -37,10 +37,10 @@ import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.AbstractTermsTestCase; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; -import org.elasticsearch.search.aggregations.metrics.avg.Avg; -import org.elasticsearch.search.aggregations.metrics.stats.Stats; -import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.Avg; +import org.elasticsearch.search.aggregations.metrics.Stats; +import org.elasticsearch.search.aggregations.metrics.ExtendedStats; +import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; import org.junit.After; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index 1b33ed47870..819d39cb62b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -67,8 +67,8 @@ import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuil import org.elasticsearch.search.aggregations.bucket.global.InternalGlobal; import org.elasticsearch.search.aggregations.bucket.nested.InternalNested; import org.elasticsearch.search.aggregations.bucket.nested.NestedAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.tophits.InternalTopHits; -import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.InternalTopHits; +import org.elasticsearch.search.aggregations.metrics.TopHitsAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.bucketscript.BucketScriptPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.sort.FieldSortBuilder; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java index b899c86d098..49442e3fbc0 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java @@ -64,7 +64,8 @@ public abstract class AbstractGeoTestCase extends ESIntegTestCase { protected static int numDocs; protected static int numUniqueGeoPoints; protected static GeoPoint[] singleValues, multiValues; - protected static GeoPoint singleTopLeft, singleBottomRight, multiTopLeft, multiBottomRight, singleCentroid, multiCentroid, unmappedCentroid; + protected static GeoPoint singleTopLeft, singleBottomRight, multiTopLeft, multiBottomRight, + singleCentroid, multiCentroid, unmappedCentroid; protected static ObjectIntMap expectedDocCountsForGeoHash = null; protected static ObjectObjectMap expectedCentroidsForGeoHash = null; protected static final double GEOHASH_TOLERANCE = 1E-5D; @@ -135,7 +136,10 @@ public abstract class AbstractGeoTestCase extends ESIntegTestCase { assertAcked(prepareCreate(EMPTY_IDX_NAME).addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=geo_point")); assertAcked(prepareCreate(DATELINE_IDX_NAME) - .addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=geo_point", MULTI_VALUED_FIELD_NAME, "type=geo_point", NUMBER_FIELD_NAME, "type=long", "tag", "type=keyword")); + .addMapping("type", SINGLE_VALUED_FIELD_NAME, + "type=geo_point", MULTI_VALUED_FIELD_NAME, + "type=geo_point", NUMBER_FIELD_NAME, + "type=long", "tag", "type=keyword")); GeoPoint[] geoValues = new GeoPoint[5]; geoValues[0] = new GeoPoint(38, 178); @@ -153,7 +157,11 @@ public abstract class AbstractGeoTestCase extends ESIntegTestCase { .endObject())); } assertAcked(prepareCreate(HIGH_CARD_IDX_NAME).setSettings(Settings.builder().put("number_of_shards", 2)) - .addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=geo_point", MULTI_VALUED_FIELD_NAME, "type=geo_point", NUMBER_FIELD_NAME, "type=long,store=true", "tag", "type=keyword")); + .addMapping("type", SINGLE_VALUED_FIELD_NAME, + "type=geo_point", MULTI_VALUED_FIELD_NAME, + "type=geo_point", NUMBER_FIELD_NAME, + "type=long,store=true", + "tag", "type=keyword")); for (int i = 0; i < 2000; i++) { singleVal = singleValues[i % numUniqueGeoPoints]; @@ -161,8 +169,14 @@ public abstract class AbstractGeoTestCase extends ESIntegTestCase { .startObject() .array(SINGLE_VALUED_FIELD_NAME, singleVal.lon(), singleVal.lat()) .startArray(MULTI_VALUED_FIELD_NAME) - .startArray().value(multiValues[i % numUniqueGeoPoints].lon()).value(multiValues[i % numUniqueGeoPoints].lat()).endArray() - .startArray().value(multiValues[(i + 1) % numUniqueGeoPoints].lon()).value(multiValues[(i + 1) % numUniqueGeoPoints].lat()).endArray() + .startArray() + .value(multiValues[i % numUniqueGeoPoints].lon()) + .value(multiValues[i % numUniqueGeoPoints].lat()) + .endArray() + .startArray() + .value(multiValues[(i + 1) % numUniqueGeoPoints].lon()) + .value(multiValues[(i + 1) % numUniqueGeoPoints].lat()) + .endArray() .endArray() .field(NUMBER_FIELD_NAME, i) .field("tag", "tag" + i) @@ -177,11 +191,12 @@ public abstract class AbstractGeoTestCase extends ESIntegTestCase { indexRandom(true, builders); ensureSearchable(); - // Added to debug a test failure where the terms aggregation seems to be reporting two documents with the same value for NUMBER_FIELD_NAME. This will check that after - // random indexing each document only has 1 value for NUMBER_FIELD_NAME and it is the correct value. Following this initial change its seems that this call was getting - // more that 2000 hits (actual value was 2059) so now it will also check to ensure all hits have the correct index and type - SearchResponse response = client().prepareSearch(HIGH_CARD_IDX_NAME).addStoredField(NUMBER_FIELD_NAME).addSort(SortBuilders.fieldSort(NUMBER_FIELD_NAME) - .order(SortOrder.ASC)).setSize(5000).get(); + // Added to debug a test failure where the terms aggregation seems to be reporting two documents with the same + // value for NUMBER_FIELD_NAME. This will check that after random indexing each document only has 1 value for + // NUMBER_FIELD_NAME and it is the correct value. Following this initial change its seems that this call was getting + // more that 2000 hits (actual value was 2059) so now it will also check to ensure all hits have the correct index and type. + SearchResponse response = client().prepareSearch(HIGH_CARD_IDX_NAME).addStoredField(NUMBER_FIELD_NAME) + .addSort(SortBuilders.fieldSort(NUMBER_FIELD_NAME).order(SortOrder.ASC)).setSize(5000).get(); assertSearchResponse(response); long totalHits = response.getHits().getTotalHits(); XContentBuilder builder = XContentFactory.jsonBuilder(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesTestCase.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractPercentilesTestCase.java similarity index 96% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesTestCase.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractPercentilesTestCase.java index c4a3d3b2ffc..530046b496e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractPercentilesTestCase.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.ToXContent; @@ -26,6 +26,8 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregation.CommonFields; import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.metrics.ParsedPercentiles; +import org.elasticsearch.search.aggregations.metrics.Percentile; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorTests.java similarity index 96% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorTests.java index 7835bf75e72..b83acfcba80 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.avg; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.document.IntPoint; import org.apache.lucene.document.NumericDocValuesField; @@ -35,6 +35,9 @@ import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.AvgAggregator; +import org.elasticsearch.search.aggregations.metrics.InternalAvg; import java.io.IOException; import java.util.Arrays; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java index 98541d0ff58..e18bfd7fcc8 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java @@ -35,7 +35,6 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.avg.Avg; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java index df90dc4f7c3..5e1c0a4ebc3 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; - public class AvgTests extends AbstractNumericMetricTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java index 3544b02e97a..a2789a9ef16 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java @@ -34,9 +34,6 @@ import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.metrics.cardinality.CardinalityAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.cardinality.CardinalityAggregator; -import org.elasticsearch.search.aggregations.metrics.cardinality.InternalCardinality; import org.elasticsearch.search.aggregations.support.ValueType; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java index c770bef7df6..cf155b8690d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java @@ -31,7 +31,6 @@ import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.cardinality.Cardinality; import org.elasticsearch.test.ESIntegTestCase; import java.util.Collection; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityTests.java similarity index 90% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityTests.java index 1b3a1858176..4f631bde8ac 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityTests.java @@ -17,9 +17,10 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.cardinality; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; +import org.elasticsearch.search.aggregations.metrics.CardinalityAggregationBuilder; public class CardinalityTests extends BaseAggregationTestCase { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java index 144305647eb..e65d1269520 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java @@ -32,9 +32,6 @@ import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; -import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStatsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.stats.extended.InternalExtendedStats; import java.io.IOException; import java.util.function.Consumer; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java index 7de333e8127..3daafb8684e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java @@ -30,8 +30,7 @@ import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.missing.Missing; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; -import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats.Bounds; +import org.elasticsearch.search.aggregations.metrics.ExtendedStats.Bounds; import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Collection; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java index 3f78cc17aa9..5135ec46a10 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStatsAggregationBuilder; - public class ExtendedStatsTests extends AbstractNumericMetricTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregatorTests.java similarity index 96% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregatorTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregatorTests.java index 5227c62e6b4..b171e7436ee 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregatorTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.geobounds; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.document.Document; import org.apache.lucene.document.LatLonDocValuesField; @@ -32,7 +32,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.test.geo.RandomGeoGenerator; -import static org.elasticsearch.search.aggregations.metrics.geobounds.InternalGeoBoundsTests.GEOHASH_TOLERANCE; +import static org.elasticsearch.search.aggregations.metrics.InternalGeoBoundsTests.GEOHASH_TOLERANCE; import static org.hamcrest.Matchers.closeTo; public class GeoBoundsAggregatorTests extends AggregatorTestCase { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsIT.java index 1a97cb49164..483cd9f7068 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsIT.java @@ -26,8 +26,6 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; -import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBounds; -import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBoundsAggregator; import org.elasticsearch.test.ESIntegTestCase; import java.util.List; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsTests.java index 9f5bd13b5f6..0dd19b738ee 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBoundsAggregationBuilder; public class GeoBoundsTests extends BaseAggregationTestCase { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregatorTests.java similarity index 97% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregatorTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregatorTests.java index 5ba9b4b01e7..38650707412 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregatorTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.geocentroid; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.document.Document; import org.apache.lucene.document.LatLonDocValuesField; @@ -29,6 +29,8 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.index.mapper.GeoPointFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.metrics.GeoCentroidAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.InternalGeoCentroid; import org.elasticsearch.test.geo.RandomGeoGenerator; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidIT.java index 32b036606d3..f06e5510aed 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidIT.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGrid; import org.elasticsearch.search.aggregations.bucket.global.Global; -import org.elasticsearch.search.aggregations.metrics.geocentroid.GeoCentroid; import org.elasticsearch.test.ESIntegTestCase; import java.util.List; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidTests.java index 90067df6013..59f8ec1a5b8 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.metrics.geocentroid.GeoCentroidAggregationBuilder; public class GeoCentroidTests extends BaseAggregationTestCase { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentileRanksAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorTests.java similarity index 92% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentileRanksAggregatorTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorTests.java index 3513beee668..52bd6a37e6f 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentileRanksAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.hdr; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.document.Document; import org.apache.lucene.document.SortedNumericDocValuesField; @@ -31,10 +31,10 @@ import org.apache.lucene.util.NumericUtils; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanksAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod; +import org.elasticsearch.search.aggregations.metrics.Percentile; +import org.elasticsearch.search.aggregations.metrics.PercentileRanks; +import org.elasticsearch.search.aggregations.metrics.PercentileRanksAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.PercentilesMethod; import org.hamcrest.Matchers; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java index cf994052131..1321c8bca47 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java @@ -30,9 +30,6 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod; import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Arrays; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesAggregatorTests.java similarity index 94% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregatorTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesAggregatorTests.java index 690c561b36e..b68b68dd544 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesAggregatorTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.hdr; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.NumericDocValuesField; @@ -34,8 +34,10 @@ import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod; +import org.elasticsearch.search.aggregations.metrics.HDRPercentilesAggregator; +import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentiles; +import org.elasticsearch.search.aggregations.metrics.PercentilesAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.PercentilesMethod; import java.io.IOException; import java.util.function.Consumer; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java index ae745e1f1ad..67eb4939ae5 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java @@ -31,9 +31,6 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod; import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Arrays; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlusTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlusTests.java similarity index 94% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlusTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlusTests.java index e58899807ab..514af2a6766 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlusTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlusTests.java @@ -17,15 +17,16 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.cardinality; +package org.elasticsearch.search.aggregations.metrics; import com.carrotsearch.hppc.BitMixer; import com.carrotsearch.hppc.IntHashSet; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.search.aggregations.metrics.HyperLogLogPlusPlus; import org.elasticsearch.test.ESTestCase; -import static org.elasticsearch.search.aggregations.metrics.cardinality.HyperLogLogPlusPlus.MAX_PRECISION; -import static org.elasticsearch.search.aggregations.metrics.cardinality.HyperLogLogPlusPlus.MIN_PRECISION; +import static org.elasticsearch.search.aggregations.metrics.HyperLogLogPlusPlus.MAX_PRECISION; +import static org.elasticsearch.search.aggregations.metrics.HyperLogLogPlusPlus.MIN_PRECISION; import static org.hamcrest.Matchers.closeTo; public class HyperLogLogPlusPlusTests extends ESTestCase { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvgTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalAvgTests.java similarity index 96% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvgTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalAvgTests.java index 5adfb11f5bb..10ae10a9af1 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvgTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalAvgTests.java @@ -17,12 +17,14 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.avg; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.ParsedAggregation; +import org.elasticsearch.search.aggregations.metrics.InternalAvg; +import org.elasticsearch.search.aggregations.metrics.ParsedAvg; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinalityTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalCardinalityTests.java similarity index 95% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinalityTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalCardinalityTests.java index fc1095c857f..d20f3620f90 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinalityTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalCardinalityTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.cardinality; +package org.elasticsearch.search.aggregations.metrics; import com.carrotsearch.hppc.BitMixer; @@ -28,6 +28,9 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.search.aggregations.ParsedAggregation; +import org.elasticsearch.search.aggregations.metrics.HyperLogLogPlusPlus; +import org.elasticsearch.search.aggregations.metrics.InternalCardinality; +import org.elasticsearch.search.aggregations.metrics.ParsedCardinality; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; import org.junit.After; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStatsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStatsTests.java index eb6a2e40a01..3c5201bfa8a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStatsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStatsTests.java @@ -23,9 +23,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.ParsedAggregation; -import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats.Bounds; -import org.elasticsearch.search.aggregations.metrics.stats.extended.InternalExtendedStats; -import org.elasticsearch.search.aggregations.metrics.stats.extended.ParsedExtendedStats; +import org.elasticsearch.search.aggregations.metrics.ExtendedStats.Bounds; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBoundsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalGeoBoundsTests.java similarity index 96% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBoundsTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalGeoBoundsTests.java index 3d96d92aeb9..aa2e527b2e6 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBoundsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalGeoBoundsTests.java @@ -17,10 +17,12 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.geobounds; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.aggregations.ParsedAggregation; +import org.elasticsearch.search.aggregations.metrics.InternalGeoBounds; +import org.elasticsearch.search.aggregations.metrics.ParsedGeoBounds; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroidTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalGeoCentroidTests.java similarity index 96% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroidTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalGeoCentroidTests.java index 9dc7896638c..73fc160bcf1 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroidTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalGeoCentroidTests.java @@ -16,12 +16,14 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.geocentroid; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.geo.GeoEncodingUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.aggregations.ParsedAggregation; +import org.elasticsearch.search.aggregations.metrics.InternalGeoCentroid; +import org.elasticsearch.search.aggregations.metrics.ParsedGeoCentroid; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; import org.elasticsearch.test.geo.RandomGeoGenerator; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentilesRanksTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalHDRPercentilesRanksTests.java similarity index 91% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentilesRanksTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalHDRPercentilesRanksTests.java index ee0e3602f20..dfd9403c8cc 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentilesRanksTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalHDRPercentilesRanksTests.java @@ -17,13 +17,15 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.hdr; +package org.elasticsearch.search.aggregations.metrics; import org.HdrHistogram.DoubleHistogram; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentilesRanksTestCase; -import org.elasticsearch.search.aggregations.metrics.percentiles.ParsedPercentiles; +import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentileRanks; +import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentileRanks; +import org.elasticsearch.search.aggregations.metrics.InternalPercentilesRanksTestCase; +import org.elasticsearch.search.aggregations.metrics.ParsedPercentiles; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.util.Arrays; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentilesTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalHDRPercentilesTests.java similarity index 92% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentilesTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalHDRPercentilesTests.java index 7f1362af041..99b8bd5575b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentilesTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalHDRPercentilesTests.java @@ -17,14 +17,16 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.hdr; +package org.elasticsearch.search.aggregations.metrics; import org.HdrHistogram.DoubleHistogram; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentilesTestCase; -import org.elasticsearch.search.aggregations.metrics.percentiles.ParsedPercentiles; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; +import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentiles; +import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentiles; +import org.elasticsearch.search.aggregations.metrics.InternalPercentilesTestCase; +import org.elasticsearch.search.aggregations.metrics.ParsedPercentiles; +import org.elasticsearch.search.aggregations.metrics.Percentile; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.util.Arrays; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalMaxTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalMaxTests.java index ad8bc350fbd..10d649a0c0d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalMaxTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalMaxTests.java @@ -22,8 +22,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.ParsedAggregation; -import org.elasticsearch.search.aggregations.metrics.max.InternalMax; -import org.elasticsearch.search.aggregations.metrics.max.ParsedMax; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalMinTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalMinTests.java index bca0f3cf31a..dba794f9d0e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalMinTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalMinTests.java @@ -22,8 +22,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.ParsedAggregation; -import org.elasticsearch.search.aggregations.metrics.min.InternalMin; -import org.elasticsearch.search.aggregations.metrics.min.ParsedMin; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentilesRanksTestCase.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalPercentilesRanksTestCase.java similarity index 96% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentilesRanksTestCase.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalPercentilesRanksTestCase.java index a63fd42da7d..eba4d783723 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentilesRanksTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalPercentilesRanksTestCase.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.ParsedAggregation; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentilesTestCase.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalPercentilesTestCase.java similarity index 97% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentilesTestCase.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalPercentilesTestCase.java index 1024577a6b6..b145349544e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentilesTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalPercentilesTestCase.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.ParsedAggregation; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetricTests.java similarity index 98% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetricTests.java index 70ddacf5698..89f42355f20 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetricTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.scripted; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.io.stream.Writeable.Reader; @@ -30,6 +30,8 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.Aggregation.CommonFields; import org.elasticsearch.search.aggregations.ParsedAggregation; +import org.elasticsearch.search.aggregations.metrics.InternalScriptedMetric; +import org.elasticsearch.search.aggregations.metrics.ParsedScriptedMetric; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsBucketTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsBucketTests.java index cbb097a7282..cb4b024f99d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsBucketTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsBucketTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.ParsedAggregation; -import org.elasticsearch.search.aggregations.metrics.stats.InternalStats; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.InternalStatsBucket; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.ParsedStatsBucket; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsTests.java index 203d584e66e..8198d6c2e81 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsTests.java @@ -26,8 +26,6 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.ParsedAggregation; -import org.elasticsearch.search.aggregations.metrics.stats.InternalStats; -import org.elasticsearch.search.aggregations.metrics.stats.ParsedStats; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalSumTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalSumTests.java index aa9d25af49e..4f44be7d508 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalSumTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalSumTests.java @@ -22,8 +22,6 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.ParsedAggregation; -import org.elasticsearch.search.aggregations.metrics.sum.InternalSum; -import org.elasticsearch.search.aggregations.metrics.sum.ParsedSum; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentilesRanksTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalTDigestPercentilesRanksTests.java similarity index 91% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentilesRanksTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalTDigestPercentilesRanksTests.java index 35c566c2e80..66e6891f934 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentilesRanksTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalTDigestPercentilesRanksTests.java @@ -17,12 +17,15 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentilesRanksTestCase; -import org.elasticsearch.search.aggregations.metrics.percentiles.ParsedPercentiles; +import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentileRanks; +import org.elasticsearch.search.aggregations.metrics.ParsedTDigestPercentileRanks; +import org.elasticsearch.search.aggregations.metrics.TDigestState; +import org.elasticsearch.search.aggregations.metrics.InternalPercentilesRanksTestCase; +import org.elasticsearch.search.aggregations.metrics.ParsedPercentiles; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.util.Arrays; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentilesTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalTDigestPercentilesTests.java similarity index 91% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentilesTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalTDigestPercentilesTests.java index 73c9b8a1608..25ee09ca5cb 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentilesTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalTDigestPercentilesTests.java @@ -17,12 +17,15 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentilesTestCase; -import org.elasticsearch.search.aggregations.metrics.percentiles.ParsedPercentiles; +import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentiles; +import org.elasticsearch.search.aggregations.metrics.ParsedTDigestPercentiles; +import org.elasticsearch.search.aggregations.metrics.TDigestState; +import org.elasticsearch.search.aggregations.metrics.InternalPercentilesTestCase; +import org.elasticsearch.search.aggregations.metrics.ParsedPercentiles; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.util.Arrays; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHitsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalTopHitsTests.java similarity index 91% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHitsTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalTopHitsTests.java index 3289c5a7f64..3e97ec94f6b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHitsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalTopHitsTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.tophits; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.search.FieldComparator; @@ -26,14 +26,18 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.text.Text; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.ParsedAggregation; +import org.elasticsearch.search.aggregations.metrics.InternalTopHits; +import org.elasticsearch.search.aggregations.metrics.ParsedTopHits; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; @@ -104,12 +108,13 @@ public class InternalTopHitsTests extends InternalAggregationTestCase pipelineAggregators = instance.pipelineAggregators(); Map metaData = instance.getMetaData(); @@ -268,7 +273,8 @@ public class InternalTopHitsTests extends InternalAggregationTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorTests.java index dfee4437fbe..5b279f1ea49 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorTests.java @@ -30,9 +30,6 @@ import org.apache.lucene.store.Directory; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.metrics.min.InternalMin; -import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.min.MinAggregator; public class MinAggregatorTests extends AggregatorTestCase { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinIT.java index 7f2522c04bb..d92d212f4d2 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinIT.java @@ -29,7 +29,6 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.min.Min; import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Collection; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java index eed4059ade7..699ad8117d0 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; - public class MinTests extends AbstractNumericMetricTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksTests.java index a678f69f19b..6483dbbc6e3 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanksAggregationBuilder; public class PercentileRanksTests extends BaseAggregationTestCase { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesMethodTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesMethodTests.java similarity index 95% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesMethodTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesMethodTests.java index 97d5cf1f9ee..70445821cef 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesMethodTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesMethodTests.java @@ -17,10 +17,11 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles; +package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.search.aggregations.metrics.PercentilesMethod; import org.elasticsearch.test.ESTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesTests.java index ea0c9f39696..edc4b7954a3 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesAggregationBuilder; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java similarity index 98% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java index 65e42556461..56b8938b6e5 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.scripted; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; @@ -35,6 +35,8 @@ import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.metrics.ScriptedMetric; +import org.elasticsearch.search.aggregations.metrics.ScriptedMetricAggregationBuilder; import org.junit.BeforeClass; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index c000b7fb228..2643b6c6166 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -37,7 +37,6 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.elasticsearch.search.aggregations.metrics.scripted.ScriptedMetric; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -108,8 +107,14 @@ public class ScriptedMetricIT extends ESIntegTestCase { aggScript(vars, state -> state.put((String) XContentMapValues.extractValue("params.param1", vars), XContentMapValues.extractValue("params.param2", vars)))); - scripts.put("vars.multiplier = 3", vars -> - ((Map) vars.get("vars")).put("multiplier", 3)); + scripts.put("vars.multiplier = 3", vars -> { + ((Map) vars.get("vars")).put("multiplier", 3); + + Map state = (Map) vars.get("state"); + state.put("list", new ArrayList()); + + return state; + }); scripts.put("state.list.add(vars.multiplier)", vars -> aggScript(vars, state -> { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricTests.java index 453d830002a..a624eddea69 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.metrics.scripted.ScriptedMetricAggregationBuilder; import java.util.Collections; import java.util.HashMap; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java index c5c1420fb22..52a45f9c017 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java @@ -31,8 +31,6 @@ import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.metrics.stats.InternalStats; -import org.elasticsearch.search.aggregations.metrics.stats.StatsAggregationBuilder; import java.io.IOException; import java.util.function.Consumer; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java index e277902ace2..a97982cccac 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java @@ -31,7 +31,6 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.stats.Stats; import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Collection; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java index 76a8e9aa98a..e2db3ac2fb4 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.search.aggregations.metrics.stats.StatsAggregationBuilder; - public class StatsTests extends AbstractNumericMetricTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorTests.java index edaf5ae03f9..eb57bc9a511 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorTests.java @@ -39,9 +39,6 @@ import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; -import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.sum.SumAggregator; import java.io.IOException; import java.util.Arrays; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java index b3a5df4dbfc..6967b7ffc3f 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java @@ -30,7 +30,6 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.hamcrest.core.IsNull; import java.util.ArrayList; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java index edc6d4edef0..204ee27df3c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; - public class SumTests extends AbstractNumericMetricTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentileRanksAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorTests.java similarity index 92% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentileRanksAggregatorTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorTests.java index 6545fe9d3ff..363ba141983 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentileRanksAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.document.Document; import org.apache.lucene.document.SortedNumericDocValuesField; @@ -31,10 +31,10 @@ import org.apache.lucene.util.NumericUtils; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanksAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod; +import org.elasticsearch.search.aggregations.metrics.Percentile; +import org.elasticsearch.search.aggregations.metrics.PercentileRanks; +import org.elasticsearch.search.aggregations.metrics.PercentileRanksAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.PercentilesMethod; import org.hamcrest.Matchers; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index 3846168009d..8cbf9883fe5 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -31,10 +31,6 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanksAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod; import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Arrays; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesAggregatorTests.java similarity index 95% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesAggregatorTests.java index 85ab361a8b3..8a4f399cb25 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesAggregatorTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.NumericDocValuesField; @@ -34,8 +34,10 @@ import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod; +import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentiles; +import org.elasticsearch.search.aggregations.metrics.PercentilesAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.PercentilesMethod; +import org.elasticsearch.search.aggregations.metrics.TDigestPercentilesAggregator; import java.io.IOException; import java.util.function.Consumer; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java index 89c7d12c746..73ce6c7ece7 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java @@ -31,10 +31,6 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod; import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Arrays; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorTests.java similarity index 98% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorTests.java index 3fe75b77e7f..c888dbf8d2e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.metrics.tophits; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.document.Document; @@ -49,6 +49,7 @@ import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.metrics.TopHits; import org.elasticsearch.search.sort.SortOrder; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index 952eb22848e..03fa60c6d8e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -46,8 +46,6 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.nested.Nested; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; -import org.elasticsearch.search.aggregations.metrics.max.Max; -import org.elasticsearch.search.aggregations.metrics.tophits.TopHits; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; import org.elasticsearch.search.rescore.QueryRescorerBuilder; @@ -120,7 +118,8 @@ public class TopHitsIT extends ESIntegTestCase { assertAcked(prepareCreate("idx").addMapping("type", TERMS_AGGS_FIELD, "type=keyword")); assertAcked(prepareCreate("field-collapsing").addMapping("type", "group", "type=keyword")); createIndex("empty"); - assertAcked(prepareCreate("articles").addMapping("article", jsonBuilder().startObject().startObject("article").startObject("properties") + assertAcked(prepareCreate("articles").addMapping("article", + jsonBuilder().startObject().startObject("article").startObject("properties") .startObject(TERMS_AGGS_FIELD) .field("type", "keyword") .endObject() @@ -251,15 +250,20 @@ public class TopHitsIT extends ESIntegTestCase { ); builders.add( client().prepareIndex("articles", "article", "2") - .setSource(jsonBuilder().startObject().field("title", "title 2").field("body", "some different text").startArray("comments") + .setSource(jsonBuilder().startObject().field("title", "title 2").field("body", "some different text") + .startArray("comments") .startObject() .field("user", "b").field("date", 3L).field("message", "some comment") .startArray("reviewers") .startObject().field("name", "user f").endObject() .endArray() .endObject() - .startObject().field("user", "c").field("date", 4L).field("message", "some other comment").endObject() - .endArray().endObject()) + .startObject() + .field("user", "c") + .field("date", 4L) + .field("message", "some other comment") + .endObject() + .endArray().endObject()) ); indexRandom(true, builders); @@ -314,14 +318,15 @@ public class TopHitsIT extends ESIntegTestCase { .prepareSearch("field-collapsing") .setSize(0) .setQuery(matchQuery("text", "x y z")) - .addAggregation(terms("terms").executionHint(randomExecutionHint()).field("group").subAggregation(topHits("hits"))) + .addAggregation(terms("terms") + .executionHint(randomExecutionHint()).field("group").subAggregation(topHits("hits"))) .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits(), equalTo(8L)); assertThat(response.getHits().getHits().length, equalTo(0)); - assertThat(response.getHits().getMaxScore(), equalTo(0f)); + assertThat(response.getHits().getMaxScore(), equalTo(Float.NaN)); Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); @@ -356,7 +361,7 @@ public class TopHitsIT extends ESIntegTestCase { assertThat(response.getHits().getTotalHits(), equalTo(8L)); assertThat(response.getHits().getHits().length, equalTo(0)); - assertThat(response.getHits().getMaxScore(), equalTo(0f)); + assertThat(response.getHits().getMaxScore(), equalTo(Float.NaN)); terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); @@ -584,7 +589,8 @@ public class TopHitsIT extends ESIntegTestCase { .explain(true) .storedField("text") .docValueField("field1") - .scriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) + .scriptField("script", + new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) .fetchSource("text", null) .version(true) ) @@ -761,7 +767,8 @@ public class TopHitsIT extends ESIntegTestCase { .subAggregation( nested("to-reviewers", "comments.reviewers").subAggregation( // Also need to sort on _doc because there are two reviewers with the same name - topHits("top-reviewers").sort("comments.reviewers.name", SortOrder.ASC).sort("_doc", SortOrder.DESC).size(7) + topHits("top-reviewers") + .sort("comments.reviewers.name", SortOrder.ASC).sort("_doc", SortOrder.DESC).size(7) ) ) .subAggregation(topHits("top-comments").sort("comments.date", SortOrder.DESC).size(4)) @@ -866,7 +873,9 @@ public class TopHitsIT extends ESIntegTestCase { nested("to-comments", "comments").subAggregation( topHits("top-comments").size(1).highlighter(new HighlightBuilder().field(hlField)).explain(true) .docValueField("comments.user") - .scriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())).fetchSource("comments.message", null) + .scriptField("script", + new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) + .fetchSource("comments.message", null) .version(true).sort("comments.date", SortOrder.ASC))).get(); assertHitCount(searchResponse, 2); Nested nested = searchResponse.getAggregations().get("to-comments"); @@ -883,7 +892,8 @@ public class TopHitsIT extends ESIntegTestCase { assertThat(highlightField.getFragments().length, equalTo(1)); assertThat(highlightField.getFragments()[0].string(), equalTo("some comment")); - // Can't explain nested hit with the main query, since both are in a different scopes, also the nested doc may not even have matched with the main query + // Can't explain nested hit with the main query, since both are in a different scopes, also the nested doc may not + // even have matched with the main query. // If top_hits would have a query option then we can explain that query Explanation explanation = searchHit.getExplanation(); assertFalse(explanation.isMatch()); @@ -913,7 +923,13 @@ public class TopHitsIT extends ESIntegTestCase { .subAggregation( nested("to-comments", "comments") .subAggregation(topHits("comments") - .highlighter(new HighlightBuilder().field(new HighlightBuilder.Field("comments.message").highlightQuery(matchQuery("comments.message", "text")))) + .highlighter( + new HighlightBuilder() + .field( + new HighlightBuilder.Field("comments.message") + .highlightQuery(matchQuery("comments.message", "text")) + ) + ) .sort("comments.id", SortOrder.ASC)) ) ) @@ -953,7 +969,8 @@ public class TopHitsIT extends ESIntegTestCase { .executionHint(randomExecutionHint()) .field(TERMS_AGGS_FIELD) .subAggregation( - topHits("hits").size(ArrayUtil.MAX_ARRAY_LENGTH - 1).sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)) + topHits("hits").size(ArrayUtil.MAX_ARRAY_LENGTH - 1) + .sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)) ) ) .get(); @@ -1052,7 +1069,7 @@ public class TopHitsIT extends ESIntegTestCase { for (SearchHit hit : hits) { assertThat(hit.getSourceAsMap(), nullValue()); assertThat(hit.getId(), nullValue()); - assertThat(hit.getType(), nullValue()); + assertThat(hit.getType(), equalTo("type")); } } } @@ -1064,7 +1081,11 @@ public class TopHitsIT extends ESIntegTestCase { public void testDontCacheScripts() throws Exception { try { assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .setSettings( + Settings.builder() + .put("requests.cache.enable", true) + .put("number_of_shards", 1) + .put("number_of_replicas", 1)) .get()); indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1), client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2)); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java index 4d2331b86f2..006c0fedba5 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregationBuilder; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilderTests; import org.elasticsearch.search.sort.ScriptSortBuilder.ScriptSortType; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorTests.java similarity index 96% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregatorTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorTests.java index 294343c2455..f9118e30a6e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.valuecount; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.document.IntPoint; import org.apache.lucene.document.NumericDocValuesField; @@ -41,6 +41,9 @@ import org.elasticsearch.index.mapper.GeoPointFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.metrics.ValueCount; +import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ValueCountAggregator; import org.elasticsearch.search.aggregations.support.ValueType; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java index 8c5a8e059f7..357c5a94a7a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java @@ -28,7 +28,6 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCount; import org.elasticsearch.test.ESIntegTestCase; import java.util.Collection; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java index 9a3ed326044..0013a65ea18 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountAggregationBuilder; public class ValueCountTests extends BaseAggregationTestCase { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/WeightedAvgAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorTests.java similarity index 98% rename from server/src/test/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/WeightedAvgAggregatorTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorTests.java index 70b1b651723..3836f0cc2ae 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/WeightedAvgAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.metrics.weighted_avg; +package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.document.IntPoint; import org.apache.lucene.document.NumericDocValuesField; @@ -36,6 +36,9 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.metrics.InternalWeightedAvg; +import org.elasticsearch.search.aggregations.metrics.WeightedAvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.WeightedAvgAggregator; import org.elasticsearch.search.aggregations.support.MultiValuesSourceFieldConfig; import org.joda.time.DateTimeZone; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricAggStateV6CompatTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricAggStateV6CompatTests.java deleted file mode 100644 index 4abf68a960b..00000000000 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricAggStateV6CompatTests.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.aggregations.metrics.scripted; - -import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.script.MockScriptEngine; -import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptedMetricAggContexts; -import org.elasticsearch.script.ScriptEngine; -import org.elasticsearch.script.ScriptModule; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.script.ScriptType; -import org.elasticsearch.search.aggregations.Aggregation.CommonFields; -import org.elasticsearch.search.aggregations.ParsedAggregation; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.test.InternalAggregationTestCase; - -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.function.Function; -import java.util.function.Predicate; - -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.sameInstance; - -/** - * This test verifies that the _aggs param is added correctly when the system property - * "es.aggregations.enable_scripted_metric_agg_param" is set to true. - */ -public class InternalScriptedMetricAggStateV6CompatTests extends InternalAggregationTestCase { - - private static final String REDUCE_SCRIPT_NAME = "reduceScript"; - - @Override - protected InternalScriptedMetric createTestInstance(String name, List pipelineAggregators, - Map metaData) { - Script reduceScript = new Script(ScriptType.INLINE, MockScriptEngine.NAME, REDUCE_SCRIPT_NAME, Collections.emptyMap()); - return new InternalScriptedMetric(name, "agg value", reduceScript, pipelineAggregators, metaData); - } - - /** - * Mock of the script service. The script that is run looks at the - * "_aggs" parameter to verify that it was put in place by InternalScriptedMetric. - */ - @Override - protected ScriptService mockScriptService() { - Function, Object> script = params -> { - Object aggs = params.get("_aggs"); - Object states = params.get("states"); - assertThat(aggs, instanceOf(List.class)); - assertThat(aggs, sameInstance(states)); - return aggs; - }; - - @SuppressWarnings("unchecked") - MockScriptEngine scriptEngine = new MockScriptEngine(MockScriptEngine.NAME, - Collections.singletonMap(REDUCE_SCRIPT_NAME, script)); - Map engines = Collections.singletonMap(scriptEngine.getType(), scriptEngine); - return new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS); - } - - @Override - protected void assertReduced(InternalScriptedMetric reduced, List inputs) { - assertWarnings(ScriptedMetricAggContexts.AGG_PARAM_DEPRECATION_WARNING); - } - - @Override - protected Reader instanceReader() { - return InternalScriptedMetric::new; - } - - @Override - protected void assertFromXContent(InternalScriptedMetric aggregation, ParsedAggregation parsedAggregation) {} - - @Override - protected Predicate excludePathsFromXContentInsertion() { - return path -> path.contains(CommonFields.VALUE.getPreferredName()); - } - - @Override - protected InternalScriptedMetric mutateInstance(InternalScriptedMetric instance) { - String name = instance.getName(); - Object value = instance.aggregation(); - Script reduceScript = instance.reduceScript; - List pipelineAggregators = instance.pipelineAggregators(); - Map metaData = instance.getMetaData(); - return new InternalScriptedMetric(name + randomAlphaOfLength(5), value, reduceScript, pipelineAggregators, - metaData); - } -} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorAggStateV6CompatTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorAggStateV6CompatTests.java deleted file mode 100644 index bf78cae711b..00000000000 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorAggStateV6CompatTests.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.aggregations.metrics.scripted; - -import org.apache.lucene.document.SortedNumericDocValuesField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.store.Directory; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.script.MockScriptEngine; -import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptedMetricAggContexts; -import org.elasticsearch.script.ScriptEngine; -import org.elasticsearch.script.ScriptModule; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.script.ScriptType; -import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.junit.BeforeClass; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.function.Function; - -import static java.util.Collections.singleton; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.sameInstance; - -/** - * This test verifies that the _agg param is added correctly when the system property - * "es.aggregations.enable_scripted_metric_agg_param" is set to true. - */ -public class ScriptedMetricAggregatorAggStateV6CompatTests extends AggregatorTestCase { - - private static final String AGG_NAME = "scriptedMetric"; - private static final Script INIT_SCRIPT = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "initScript", Collections.emptyMap()); - private static final Script MAP_SCRIPT = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "mapScript", Collections.emptyMap()); - private static final Script COMBINE_SCRIPT = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "combineScript", - Collections.emptyMap()); - - private static final Script INIT_SCRIPT_EXPLICIT_AGG = new Script(ScriptType.INLINE, MockScriptEngine.NAME, - "initScriptExplicitAgg", Collections.emptyMap()); - private static final Script MAP_SCRIPT_EXPLICIT_AGG = new Script(ScriptType.INLINE, MockScriptEngine.NAME, - "mapScriptExplicitAgg", Collections.emptyMap()); - private static final Script COMBINE_SCRIPT_EXPLICIT_AGG = new Script(ScriptType.INLINE, MockScriptEngine.NAME, - "combineScriptExplicitAgg", Collections.emptyMap()); - private static final String EXPLICIT_AGG_OBJECT = "Explicit agg object"; - - private static final Map, Object>> SCRIPTS = new HashMap<>(); - - @BeforeClass - @SuppressWarnings("unchecked") - public static void initMockScripts() { - // If _agg is provided implicitly, it should be the same objects as "state" from the context. - SCRIPTS.put("initScript", params -> { - Object agg = params.get("_agg"); - Object state = params.get("state"); - assertThat(agg, instanceOf(Map.class)); - assertThat(agg, sameInstance(state)); - return agg; - }); - SCRIPTS.put("mapScript", params -> { - Object agg = params.get("_agg"); - Object state = params.get("state"); - assertThat(agg, instanceOf(Map.class)); - assertThat(agg, sameInstance(state)); - return agg; - }); - SCRIPTS.put("combineScript", params -> { - Object agg = params.get("_agg"); - Object state = params.get("state"); - assertThat(agg, instanceOf(Map.class)); - assertThat(agg, sameInstance(state)); - return agg; - }); - - SCRIPTS.put("initScriptExplicitAgg", params -> { - Object agg = params.get("_agg"); - assertThat(agg, equalTo(EXPLICIT_AGG_OBJECT)); - return agg; - }); - SCRIPTS.put("mapScriptExplicitAgg", params -> { - Object agg = params.get("_agg"); - assertThat(agg, equalTo(EXPLICIT_AGG_OBJECT)); - return agg; - }); - SCRIPTS.put("combineScriptExplicitAgg", params -> { - Object agg = params.get("_agg"); - assertThat(agg, equalTo(EXPLICIT_AGG_OBJECT)); - return agg; - }); - } - - /** - * Test that the _agg param is implicitly added - */ - public void testWithImplicitAggParam() throws IOException { - try (Directory directory = newDirectory()) { - Integer numDocs = 10; - try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { - for (int i = 0; i < numDocs; i++) { - indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i))); - } - } - try (IndexReader indexReader = DirectoryReader.open(directory)) { - ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME); - aggregationBuilder.initScript(INIT_SCRIPT).mapScript(MAP_SCRIPT).combineScript(COMBINE_SCRIPT); - search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder); - } - } - - assertWarnings(ScriptedMetricAggContexts.AGG_PARAM_DEPRECATION_WARNING); - } - - /** - * Test that an explicitly added _agg param is honored - */ - public void testWithExplicitAggParam() throws IOException { - try (Directory directory = newDirectory()) { - Integer numDocs = 10; - try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { - for (int i = 0; i < numDocs; i++) { - indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i))); - } - } - - Map aggParams = new HashMap<>(); - aggParams.put("_agg", EXPLICIT_AGG_OBJECT); - - try (IndexReader indexReader = DirectoryReader.open(directory)) { - ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME); - aggregationBuilder - .params(aggParams) - .initScript(INIT_SCRIPT_EXPLICIT_AGG) - .mapScript(MAP_SCRIPT_EXPLICIT_AGG) - .combineScript(COMBINE_SCRIPT_EXPLICIT_AGG); - search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder); - } - } - - assertWarnings(ScriptedMetricAggContexts.AGG_PARAM_DEPRECATION_WARNING); - } - - /** - * We cannot use Mockito for mocking QueryShardContext in this case because - * script-related methods (e.g. QueryShardContext#getLazyExecutableScript) - * is final and cannot be mocked - */ - @Override - protected QueryShardContext queryShardContextMock(MapperService mapperService) { - MockScriptEngine scriptEngine = new MockScriptEngine(MockScriptEngine.NAME, SCRIPTS); - Map engines = Collections.singletonMap(scriptEngine.getType(), scriptEngine); - ScriptService scriptService = new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS); - return new QueryShardContext(0, mapperService.getIndexSettings(), null, null, mapperService, null, scriptService, - xContentRegistry(), writableRegistry(), null, null, System::currentTimeMillis, null); - } -} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java index 4858582da80..8514b1a0c0d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java @@ -26,7 +26,7 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.elasticsearch.test.ESIntegTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java index 9e85455d96d..bd92c73f997 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java @@ -30,7 +30,7 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.range.Range; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.elasticsearch.test.ESIntegTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorIT.java index 9ea4f813dff..05de849854f 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorIT.java @@ -30,7 +30,7 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.elasticsearch.test.ESIntegTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java index f5dc01f1914..08337ef969f 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java @@ -41,10 +41,10 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggre import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.avg.InternalAvg; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; -import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.InternalAvg; +import org.elasticsearch.search.aggregations.metrics.Sum; +import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.cumulativesum.CumulativeSumPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.derivative.DerivativePipelineAggregationBuilder; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java index b0f5eece900..aaa296fc317 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java @@ -27,7 +27,7 @@ import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.pipeline.derivative.Derivative; import org.elasticsearch.search.aggregations.support.AggregationPath; import org.elasticsearch.test.ESIntegTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DerivativeIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DerivativeIT.java index 447d82084de..5944777b628 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DerivativeIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DerivativeIT.java @@ -29,8 +29,8 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.elasticsearch.search.aggregations.metrics.stats.Stats; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.Stats; +import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.pipeline.derivative.Derivative; import org.elasticsearch.search.aggregations.pipeline.movavg.models.SimpleModel; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java index aa587f3b3c3..40c3bfb500e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java @@ -28,8 +28,8 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats.Bounds; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.ExtendedStats.Bounds; +import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended.ExtendedStatsBucket; import org.elasticsearch.test.ESIntegTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java index 494628eb932..c3075da8271 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java @@ -27,7 +27,7 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue; import org.elasticsearch.test.ESIntegTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java index 51b9973b315..82629363f8d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java @@ -25,7 +25,7 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue; import org.elasticsearch.search.aggregations.BucketOrder; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java index 2c1abcd953d..8f77c305229 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java @@ -27,8 +27,8 @@ import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.Percentile; +import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile.PercentilesBucket; import org.elasticsearch.test.ESIntegTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java index ce9394692de..9f7b33e805b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java @@ -20,10 +20,10 @@ package org.elasticsearch.search.aggregations.pipeline; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.test.ESTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java index c87b4320896..f5d409951e3 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java @@ -26,7 +26,7 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.StatsBucket; import org.elasticsearch.test.ESIntegTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java index f3fea8f6dd7..a803b9fe3d4 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java @@ -26,7 +26,7 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.elasticsearch.test.ESIntegTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/avg/AvgBucketAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/avg/AvgBucketAggregatorTests.java index 5f804c7a8bd..dd8938bc878 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/avg/AvgBucketAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/avg/AvgBucketAggregatorTests.java @@ -38,8 +38,8 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.avg.InternalAvg; +import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.InternalAvg; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucketTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucketTests.java index e3ebd9dc77a..c1d3ffeb0e5 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucketTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucketTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregation.CommonFields; import org.elasticsearch.search.aggregations.ParsedAggregation; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; +import org.elasticsearch.search.aggregations.metrics.Percentile; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; @@ -37,7 +37,7 @@ import java.util.List; import java.util.Map; import java.util.function.Predicate; -import static org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentilesTestCase.randomPercents; +import static org.elasticsearch.search.aggregations.metrics.InternalPercentilesTestCase.randomPercents; public class InternalPercentilesBucketTests extends InternalAggregationTestCase { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/InternalExtendedStatsBucketTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/InternalExtendedStatsBucketTests.java index 5261c686174..03481ab7f65 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/InternalExtendedStatsBucketTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/InternalExtendedStatsBucketTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.ParsedAggregation; import org.elasticsearch.search.aggregations.metrics.InternalExtendedStatsTests; -import org.elasticsearch.search.aggregations.metrics.stats.extended.InternalExtendedStats; +import org.elasticsearch.search.aggregations.metrics.InternalExtendedStats; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.util.Collections; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketsort/BucketSortIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketsort/BucketSortIT.java index 2e2f2a1b0f1..df2d7e64f46 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketsort/BucketSortIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketsort/BucketSortIT.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.avg.Avg; +import org.elasticsearch.search.aggregations.metrics.Avg; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortOrder; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovFnUnitTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovFnUnitTests.java index db3f2d745e1..db333a8ed7a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovFnUnitTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovFnUnitTests.java @@ -40,7 +40,7 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggre import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java index 01af64d26de..d14f93b7a51 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java @@ -27,7 +27,7 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.collect.EvictingQueue; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.elasticsearch.search.aggregations.metrics.avg.Avg; +import org.elasticsearch.search.aggregations.metrics.Avg; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregationHelperTests; import org.elasticsearch.search.aggregations.pipeline.SimpleValue; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java index 1915857302c..6f2bedbdd37 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.search.aggregations.support; import com.carrotsearch.randomizedtesting.generators.RandomStrings; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.apache.lucene.util.BytesRef; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.aggregations.support.values.ScriptBytesValues; @@ -59,7 +59,7 @@ public class ScriptValuesTests extends ESTestCase { } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { } @Override diff --git a/server/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java b/server/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java index 92488a69d6d..d5ceec9d7c2 100644 --- a/server/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java @@ -60,6 +60,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.startsWith; public class TransportTwoNodesSearchIT extends ESIntegTestCase { @@ -146,16 +147,16 @@ public class TransportTwoNodesSearchIT extends ESIntegTestCase { SearchHit hit = hits[i]; assertThat(hit.getExplanation(), notNullValue()); assertThat(hit.getExplanation().getDetails().length, equalTo(1)); - assertThat(hit.getExplanation().getDetails()[0].getDetails().length, equalTo(2)); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails().length, equalTo(2)); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails()[0].getDescription(), - equalTo("docFreq")); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails()[0].getValue(), - equalTo(100.0f)); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails()[1].getDescription(), - equalTo("docCount")); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails()[1].getValue(), - equalTo(100.0f)); + assertThat(hit.getExplanation().getDetails()[0].getDetails().length, equalTo(3)); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails().length, equalTo(2)); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[0].getDescription(), + startsWith("n,")); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[0].getValue(), + equalTo(100L)); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[1].getDescription(), + startsWith("N,")); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[1].getValue(), + equalTo(100L)); assertThat("id[" + hit.getId() + "] -> " + hit.getExplanation().toString(), hit.getId(), equalTo(Integer.toString(100 - total - i - 1))); } total += hits.length; @@ -181,16 +182,16 @@ public class TransportTwoNodesSearchIT extends ESIntegTestCase { SearchHit hit = hits[i]; assertThat(hit.getExplanation(), notNullValue()); assertThat(hit.getExplanation().getDetails().length, equalTo(1)); - assertThat(hit.getExplanation().getDetails()[0].getDetails().length, equalTo(2)); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails().length, equalTo(2)); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails()[0].getDescription(), - equalTo("docFreq")); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails()[0].getValue(), - equalTo(100.0f)); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails()[1].getDescription(), - equalTo("docCount")); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails()[1].getValue(), - equalTo(100.0f)); + assertThat(hit.getExplanation().getDetails()[0].getDetails().length, equalTo(3)); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails().length, equalTo(2)); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[0].getDescription(), + startsWith("n,")); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[0].getValue(), + equalTo(100L)); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[1].getDescription(), + startsWith("N,")); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[1].getValue(), + equalTo(100L)); assertThat("id[" + hit.getId() + "]", hit.getId(), equalTo(Integer.toString(total + i))); } total += hits.length; diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java index 5cc4e2ddc68..7790e8d6576 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.test.TestSearchContext; import java.io.IOException; import java.util.Collections; +import java.util.Map; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -78,6 +79,29 @@ public class FetchSourceSubPhaseTests extends ESTestCase { assertEquals(Collections.singletonMap("field","value"), hitContext.hit().getSourceAsMap()); } + public void testNestedSource() throws IOException { + Map expectedNested = Collections.singletonMap("nested2", Collections.singletonMap("field", "value0")); + XContentBuilder source = XContentFactory.jsonBuilder().startObject() + .field("field", "value") + .field("field2", "value2") + .field("nested1", expectedNested) + .endObject(); + FetchSubPhase.HitContext hitContext = hitExecuteMultiple(source, true, null, null, + new SearchHit.NestedIdentity("nested1", 0,null)); + assertEquals(expectedNested, hitContext.hit().getSourceAsMap()); + hitContext = hitExecuteMultiple(source, true, new String[]{"invalid"}, null, + new SearchHit.NestedIdentity("nested1", 0,null)); + assertEquals(Collections.emptyMap(), hitContext.hit().getSourceAsMap()); + + hitContext = hitExecuteMultiple(source, true, null, null, + new SearchHit.NestedIdentity("nested1", 0, new SearchHit.NestedIdentity("nested2", 0, null))); + assertEquals(Collections.singletonMap("field", "value0"), hitContext.hit().getSourceAsMap()); + + hitContext = hitExecuteMultiple(source, true, new String[]{"invalid"}, null, + new SearchHit.NestedIdentity("nested1", 0, new SearchHit.NestedIdentity("nested2", 0, null))); + assertEquals(Collections.emptyMap(), hitContext.hit().getSourceAsMap()); + } + public void testSourceDisabled() throws IOException { FetchSubPhase.HitContext hitContext = hitExecute(null, true, null, null); assertNull(hitContext.hit().getSourceAsMap()); @@ -96,17 +120,29 @@ public class FetchSourceSubPhaseTests extends ESTestCase { } private FetchSubPhase.HitContext hitExecute(XContentBuilder source, boolean fetchSource, String include, String exclude) { + return hitExecute(source, fetchSource, include, exclude, null); + } + + + private FetchSubPhase.HitContext hitExecute(XContentBuilder source, boolean fetchSource, String include, String exclude, + SearchHit.NestedIdentity nestedIdentity) { return hitExecuteMultiple(source, fetchSource, include == null ? Strings.EMPTY_ARRAY : new String[]{include}, - exclude == null ? Strings.EMPTY_ARRAY : new String[]{exclude}); + exclude == null ? Strings.EMPTY_ARRAY : new String[]{exclude}, nestedIdentity); } private FetchSubPhase.HitContext hitExecuteMultiple(XContentBuilder source, boolean fetchSource, String[] includes, String[] excludes) { + return hitExecuteMultiple(source, fetchSource, includes, excludes, null); + } + + private FetchSubPhase.HitContext hitExecuteMultiple(XContentBuilder source, boolean fetchSource, String[] includes, String[] excludes, + SearchHit.NestedIdentity nestedIdentity) { FetchSourceContext fetchSourceContext = new FetchSourceContext(fetchSource, includes, excludes); SearchContext searchContext = new FetchSourceSubPhaseTestSearchContext(fetchSourceContext, source == null ? null : BytesReference.bytes(source)); FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext(); - hitContext.reset(new SearchHit(1, null, null, null), null, 1, null); + final SearchHit searchHit = new SearchHit(1, null, null, nestedIdentity, null); + hitContext.reset(searchHit, null, 1, null); FetchSourceSubPhase phase = new FetchSourceSubPhase(); phase.hitExecute(searchContext, hitContext); return hitContext; diff --git a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java index aea0243a399..45b6340ba6f 100644 --- a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -810,6 +810,32 @@ public class SearchFieldsIT extends ESIntegTestCase { equalTo(new BytesRef(new byte[] {42, 100}))); assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); + builder = client().prepareSearch().setQuery(matchAllQuery()) + .addDocValueField("*field"); + searchResponse = builder.execute().actionGet(); + + assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(1)); + fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field", + "float_field", "double_field", "date_field", "boolean_field", "text_field", "keyword_field", + "binary_field", "ip_field"))); + + assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValue().toString(), equalTo("1")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValue().toString(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValue(), equalTo((Object) 3L)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValue(), equalTo((Object) 4L)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValue(), equalTo((Object) 5.0)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValue(), equalTo((Object) 6.0d)); + dateField = searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(); + assertThat(dateField.toInstant().toEpochMilli(), equalTo(date.toInstant().toEpochMilli())); + assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValue(), equalTo((Object) true)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), + equalTo(new BytesRef(new byte[] {42, 100}))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); + builder = client().prepareSearch().setQuery(matchAllQuery()) .addDocValueField("text_field", "use_field_mapping") .addDocValueField("keyword_field", "use_field_mapping") @@ -977,6 +1003,70 @@ public class SearchFieldsIT extends ESIntegTestCase { assertThat(fetchedDate, equalTo(date)); } + public void testWildcardDocValueFieldsWithFieldAlias() throws Exception { + XContentBuilder mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_source") + .field("enabled", false) + .endObject() + .startObject("properties") + .startObject("text_field") + .field("type", "text") + .field("fielddata", true) + .endObject() + .startObject("date_field") + .field("type", "date") + .field("format", "yyyy-MM-dd") + .endObject() + .startObject("text_field_alias") + .field("type", "alias") + .field("path", "text_field") + .endObject() + .startObject("date_field_alias") + .field("type", "alias") + .field("path", "date_field") + .endObject() + .endObject() + .endObject() + .endObject(); + assertAcked(prepareCreate("test").addMapping("type", mapping)); + ensureGreen("test"); + + ZonedDateTime date = ZonedDateTime.of(1990, 12, 29, 0, 0, 0, 0, ZoneOffset.UTC); + DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd", Locale.ROOT); + + index("test", "type", "1", "text_field", "foo", "date_field", formatter.format(date)); + refresh("test"); + + SearchRequestBuilder builder = client().prepareSearch().setQuery(matchAllQuery()) + .addDocValueField("*alias", "use_field_mapping") + .addDocValueField("date_field"); + SearchResponse searchResponse = builder.execute().actionGet(); + + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 1); + SearchHit hit = searchResponse.getHits().getAt(0); + + Map fields = hit.getFields(); + assertThat(fields.keySet(), equalTo(newHashSet("text_field_alias", "date_field_alias", "date_field"))); + + DocumentField textFieldAlias = fields.get("text_field_alias"); + assertThat(textFieldAlias.getName(), equalTo("text_field_alias")); + assertThat(textFieldAlias.getValue(), equalTo("foo")); + + DocumentField dateFieldAlias = fields.get("date_field_alias"); + assertThat(dateFieldAlias.getName(), equalTo("date_field_alias")); + assertThat(dateFieldAlias.getValue(), + equalTo("1990-12-29")); + + DocumentField dateField = fields.get("date_field"); + assertThat(dateField.getName(), equalTo("date_field")); + + ZonedDateTime fetchedDate = dateField.getValue(); + assertThat(fetchedDate, equalTo(date)); + } + public void testStoredFieldsWithFieldAlias() throws Exception { XContentBuilder mapping = XContentFactory.jsonBuilder() diff --git a/server/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java b/server/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java index a21893db392..b9b0aa5b201 100644 --- a/server/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java +++ b/server/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java @@ -37,18 +37,14 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder.FilterFunctionBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; import java.util.List; import java.util.Locale; @@ -74,9 +70,10 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; public class DecayFunctionScoreIT extends ESIntegTestCase { + @Override - protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class); // uses index.version.created + protected boolean forbidPrivateIndexSettings() { + return false; } private final QueryBuilder baseQuery = constantScoreQuery(termQuery("test", "value")); @@ -613,7 +610,7 @@ public class DecayFunctionScoreIT extends ESIntegTestCase { } public void testManyDocsLin() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = jsonBuilder().startObject().startObject("type").startObject("properties") .startObject("test").field("type", "text").endObject().startObject("date").field("type", "date") diff --git a/server/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java b/server/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java index 6657ad9823f..c9679ae2ea9 100644 --- a/server/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java +++ b/server/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java @@ -99,7 +99,7 @@ public class ExplainableScriptIT extends ESIntegTestCase { MyScript(Map params, SearchLookup lookup, LeafReaderContext leafContext) { super(params, lookup, leafContext); } - + @Override public Explanation explain(Explanation subQueryScore) throws IOException { Explanation scoreExp = Explanation.match(subQueryScore.getValue(), "_score: ", subQueryScore); @@ -139,10 +139,9 @@ public class ExplainableScriptIT extends ESIntegTestCase { int idCounter = 19; for (SearchHit hit : hits.getHits()) { assertThat(hit.getId(), equalTo(Integer.toString(idCounter))); - assertThat(hit.getExplanation().toString(), - containsString(Double.toString(idCounter) + " = This script returned " + Double.toString(idCounter))); - assertThat(hit.getExplanation().toString(), containsString("freq=1.0")); - assertThat(hit.getExplanation().toString(), containsString("termFreq=1.0")); + assertThat(hit.getExplanation().toString(), containsString(Double.toString(idCounter))); + assertThat(hit.getExplanation().toString(), containsString("1 = n")); + assertThat(hit.getExplanation().toString(), containsString("1 = N")); assertThat(hit.getExplanation().getDetails().length, equalTo(2)); idCounter--; } diff --git a/server/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java b/server/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java index fc11554dfb3..7e96539084e 100644 --- a/server/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java +++ b/server/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java @@ -33,6 +33,7 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.ArrayList; @@ -132,8 +133,8 @@ public class FunctionScoreIT extends ESIntegTestCase { } public void testMinScoreFunctionScoreBasic() throws IOException { - float score = randomFloat(); - float minScore = randomFloat(); + float score = randomValueOtherThanMany((f) -> Float.compare(f, 0) < 0, ESTestCase::randomFloat); + float minScore = randomValueOtherThanMany((f) -> Float.compare(f, 0) < 0, ESTestCase::randomFloat); index(INDEX, TYPE, jsonBuilder().startObject() .field("num", 2) .field("random_score", score) // Pass the random score as a document field so that it can be extracted in the script @@ -167,8 +168,8 @@ public class FunctionScoreIT extends ESIntegTestCase { public void testMinScoreFunctionScoreManyDocsAndRandomMinScore() throws IOException, ExecutionException, InterruptedException { List docs = new ArrayList<>(); int numDocs = randomIntBetween(1, 100); - int scoreOffset = randomIntBetween(-2 * numDocs, 2 * numDocs); - int minScore = randomIntBetween(-2 * numDocs, 2 * numDocs); + int scoreOffset = randomIntBetween(0, 2 * numDocs); + int minScore = randomIntBetween(0, 2 * numDocs); for (int i = 0; i < numDocs; i++) { docs.add(client().prepareIndex(INDEX, TYPE, Integer.toString(i)).setSource("num", i + scoreOffset)); } diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java b/server/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java index 12a64d80a14..32b80089c20 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java @@ -26,15 +26,10 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.GeoValidationMethod; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; -import java.util.Arrays; -import java.util.Collection; - import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; @@ -45,13 +40,14 @@ import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; public class GeoBoundingBoxIT extends ESIntegTestCase { + @Override - protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class); // uses index.version.created + protected boolean forbidPrivateIndexSettings() { + return false; } public void testSimpleBoundingBoxTest() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") @@ -123,7 +119,7 @@ public class GeoBoundingBoxIT extends ESIntegTestCase { } public void testLimit2BoundingBox() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") @@ -176,7 +172,7 @@ public class GeoBoundingBoxIT extends ESIntegTestCase { } public void testCompleteLonRange() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java b/server/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java index 5966ea6a49d..d78e24a05cd 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java @@ -41,12 +41,10 @@ import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.range.InternalGeoDistance; import org.elasticsearch.search.aggregations.bucket.range.Range; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; import org.junit.Before; import java.io.IOException; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -68,7 +66,7 @@ public class GeoDistanceIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(CustomScriptPlugin.class, InternalSettingsPlugin.class); + return Collections.singletonList(CustomScriptPlugin.class); } public static class CustomScriptPlugin extends MockScriptPlugin { @@ -99,9 +97,14 @@ public class GeoDistanceIT extends ESIntegTestCase { } } + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + @Before public void setupTestIndex() throws IOException { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java b/server/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java index fa0531262bb..8d2f7cd6993 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java @@ -49,10 +49,8 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.core.internal.io.Streams; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; import org.junit.BeforeClass; import org.locationtech.spatial4j.context.SpatialContext; @@ -65,8 +63,6 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; import java.util.Random; import java.util.zip.GZIPInputStream; @@ -87,8 +83,8 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; public class GeoFilterIT extends ESIntegTestCase { @Override - protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class); // uses index.version.created + protected boolean forbidPrivateIndexSettings() { + return false; } private static boolean intersectSupport; @@ -366,7 +362,7 @@ public class GeoFilterIT extends ESIntegTestCase { public void testBulk() throws Exception { byte[] bulkAction = unZipData("/org/elasticsearch/search/geo/gzippedmap.gz"); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoPolygonIT.java b/server/src/test/java/org/elasticsearch/search/geo/GeoPolygonIT.java index 2ff7d0c1383..741efa6c595 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoPolygonIT.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoPolygonIT.java @@ -24,15 +24,11 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; import java.util.List; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -47,13 +43,13 @@ import static org.hamcrest.Matchers.equalTo; public class GeoPolygonIT extends ESIntegTestCase { @Override - protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class); // uses index.version.created + protected boolean forbidPrivateIndexSettings() { + return false; } @Override protected void setupSuiteScopeCluster() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); diff --git a/server/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java b/server/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java index bb480527d7a..51bc5cc4e24 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java +++ b/server/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java @@ -25,8 +25,6 @@ import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.sampler.DiversifiedOrdinalsSamplerAggregator; import org.elasticsearch.search.aggregations.bucket.terms.GlobalOrdinalsStringTermsAggregator; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregator; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregator; import org.elasticsearch.search.profile.ProfileResult; import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.test.ESIntegTestCase; @@ -180,7 +178,7 @@ public class AggregationProfilerIT extends ESIntegTestCase { ProfileResult avgAggResult = termsAggResult.getProfiledChildren().get(0); assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getSimpleName())); + assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); assertThat(avgAggResult.getTime(), greaterThan(0L)); Map avgBreakdown = termsAggResult.getTimeBreakdown(); @@ -250,7 +248,7 @@ public class AggregationProfilerIT extends ESIntegTestCase { ProfileResult avgAggResult = termsAggResult.getProfiledChildren().get(0); assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getSimpleName())); + assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); assertThat(avgAggResult.getTime(), greaterThan(0L)); Map avgBreakdown = termsAggResult.getTimeBreakdown(); @@ -303,7 +301,7 @@ public class AggregationProfilerIT extends ESIntegTestCase { ProfileResult maxAggResult = diversifyAggResult.getProfiledChildren().get(0); assertThat(maxAggResult, notNullValue()); - assertThat(maxAggResult.getQueryName(), equalTo(MaxAggregator.class.getSimpleName())); + assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); assertThat(maxAggResult.getLuceneDescription(), equalTo("max")); assertThat(maxAggResult.getTime(), greaterThan(0L)); Map termsBreakdown = maxAggResult.getTimeBreakdown(); @@ -381,7 +379,7 @@ public class AggregationProfilerIT extends ESIntegTestCase { ProfileResult avgAggResult = tagsAggResult.getProfiledChildren().get(0); assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getSimpleName())); + assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); assertThat(avgAggResult.getTime(), greaterThan(0L)); Map avgBreakdown = tagsAggResult.getTimeBreakdown(); @@ -398,7 +396,7 @@ public class AggregationProfilerIT extends ESIntegTestCase { ProfileResult maxAggResult = tagsAggResult.getProfiledChildren().get(1); assertThat(maxAggResult, notNullValue()); - assertThat(maxAggResult.getQueryName(), equalTo(MaxAggregator.class.getSimpleName())); + assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); assertThat(maxAggResult.getLuceneDescription(), equalTo("max")); assertThat(maxAggResult.getTime(), greaterThan(0L)); Map maxBreakdown = tagsAggResult.getTimeBreakdown(); @@ -432,7 +430,7 @@ public class AggregationProfilerIT extends ESIntegTestCase { avgAggResult = stringsAggResult.getProfiledChildren().get(0); assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getSimpleName())); + assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); assertThat(avgAggResult.getTime(), greaterThan(0L)); avgBreakdown = stringsAggResult.getTimeBreakdown(); @@ -449,7 +447,7 @@ public class AggregationProfilerIT extends ESIntegTestCase { maxAggResult = stringsAggResult.getProfiledChildren().get(1); assertThat(maxAggResult, notNullValue()); - assertThat(maxAggResult.getQueryName(), equalTo(MaxAggregator.class.getSimpleName())); + assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); assertThat(maxAggResult.getLuceneDescription(), equalTo("max")); assertThat(maxAggResult.getTime(), greaterThan(0L)); maxBreakdown = stringsAggResult.getTimeBreakdown(); @@ -483,7 +481,7 @@ public class AggregationProfilerIT extends ESIntegTestCase { avgAggResult = tagsAggResult.getProfiledChildren().get(0); assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getSimpleName())); + assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); assertThat(avgAggResult.getTime(), greaterThan(0L)); avgBreakdown = tagsAggResult.getTimeBreakdown(); @@ -500,7 +498,7 @@ public class AggregationProfilerIT extends ESIntegTestCase { maxAggResult = tagsAggResult.getProfiledChildren().get(1); assertThat(maxAggResult, notNullValue()); - assertThat(maxAggResult.getQueryName(), equalTo(MaxAggregator.class.getSimpleName())); + assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); assertThat(maxAggResult.getLuceneDescription(), equalTo("max")); assertThat(maxAggResult.getTime(), greaterThan(0L)); maxBreakdown = tagsAggResult.getTimeBreakdown(); diff --git a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java index 5e10292fa3e..fd924ce07ca 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java @@ -33,6 +33,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.RandomApproximationQuery; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.Sort; @@ -218,7 +219,7 @@ public class QueryProfilerTests extends ESTestCase { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { return new Weight(this) { @Override public void extractTerms(Set terms) { @@ -267,7 +268,7 @@ public class QueryProfilerTests extends ESTestCase { w.close(); IndexSearcher s = newSearcher(reader); s.setQueryCache(null); - Weight weight = s.createNormalizedWeight(new DummyQuery(), randomBoolean()); + Weight weight = s.createWeight(s.rewrite(new DummyQuery()), randomFrom(ScoreMode.values()), 1f); // exception when getting the scorer expectThrows(UnsupportedOperationException.class, () -> weight.scorer(s.getIndexReader().leaves().get(0))); // no exception, means scorerSupplier is delegated diff --git a/server/src/test/java/org/elasticsearch/search/profile/query/RandomQueryGenerator.java b/server/src/test/java/org/elasticsearch/search/profile/query/RandomQueryGenerator.java index 14fe8d58132..00b859394c6 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/query/RandomQueryGenerator.java +++ b/server/src/test/java/org/elasticsearch/search/profile/query/RandomQueryGenerator.java @@ -61,8 +61,7 @@ public class RandomQueryGenerator { case 2: return randomBoolQuery(stringFields, numericFields, numDocs, depth); case 3: - // disabled for now because of https://issues.apache.org/jira/browse/LUCENE-6781 - //return randomBoostingQuery(stringFields, numericFields, numDocs, depth); + return randomBoostingQuery(stringFields, numericFields, numDocs, depth); case 4: return randomConstantScoreQuery(stringFields, numericFields, numDocs, depth); case 5: diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java index 16365d829a8..7e9c0153b72 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java @@ -102,7 +102,7 @@ public class QueryPhaseTests extends IndexShardTestCase { final boolean rescore = QueryPhase.execute(context, searcher, checkCancelled -> {}); assertFalse(rescore); - assertEquals(searcher.count(query), context.queryResult().topDocs().totalHits); + assertEquals(searcher.count(query), context.queryResult().topDocs().topDocs.totalHits.value); } private void countTestCase(boolean withDeletions) throws Exception { @@ -171,12 +171,12 @@ public class QueryPhaseTests extends IndexShardTestCase { context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); - assertEquals(1, context.queryResult().topDocs().totalHits); + assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); contextSearcher = new IndexSearcher(reader); context.parsedPostFilter(new ParsedQuery(new MatchNoDocsQuery())); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); - assertEquals(0, context.queryResult().topDocs().totalHits); + assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value); reader.close(); dir.close(); } @@ -204,14 +204,13 @@ public class QueryPhaseTests extends IndexShardTestCase { for (int i = 0; i < 10; i++) { context.parsedPostFilter(new ParsedQuery(new TermQuery(new Term("foo", Integer.toString(i))))); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); - assertEquals(1, context.queryResult().topDocs().totalHits); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); + assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); } reader.close(); dir.close(); } - public void testMinScoreDisablesCountOptimization() throws Exception { Directory dir = newDirectory(); final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); @@ -229,12 +228,12 @@ public class QueryPhaseTests extends IndexShardTestCase { context.setSize(0); context.setTask(new SearchTask(123L, "", "", "", null, Collections.emptyMap())); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); - assertEquals(1, context.queryResult().topDocs().totalHits); + assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); contextSearcher = new IndexSearcher(reader); context.minimumScore(100); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); - assertEquals(0, context.queryResult().topDocs().totalHits); + assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value); reader.close(); dir.close(); } @@ -281,25 +280,25 @@ public class QueryPhaseTests extends IndexShardTestCase { ScrollContext scrollContext = new ScrollContext(); scrollContext.lastEmittedDoc = null; scrollContext.maxScore = Float.NaN; - scrollContext.totalHits = -1; + scrollContext.totalHits = null; context.scrollContext(scrollContext); context.setTask(new SearchTask(123L, "", "", "", null, Collections.emptyMap())); int size = randomIntBetween(2, 5); context.setSize(size); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); - assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); assertNull(context.queryResult().terminatedEarly()); assertThat(context.terminateAfter(), equalTo(0)); - assertThat(context.queryResult().getTotalHits(), equalTo((long) numDocs)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); contextSearcher = getAssertingEarlyTerminationSearcher(reader, size); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); - assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.terminateAfter(), equalTo(size)); - assertThat(context.queryResult().getTotalHits(), equalTo((long) numDocs)); - assertThat(context.queryResult().topDocs().scoreDocs[0].doc, greaterThanOrEqualTo(size)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0].doc, greaterThanOrEqualTo(size)); reader.close(); dir.close(); } @@ -333,22 +332,22 @@ public class QueryPhaseTests extends IndexShardTestCase { context.setSize(1); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); context.setSize(0); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(0)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); } { context.setSize(1); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); } { context.setSize(1); @@ -359,15 +358,15 @@ public class QueryPhaseTests extends IndexShardTestCase { context.parsedQuery(new ParsedQuery(bq)); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); context.setSize(0); context.parsedQuery(new ParsedQuery(bq)); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(0)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); } { context.setSize(1); @@ -375,8 +374,8 @@ public class QueryPhaseTests extends IndexShardTestCase { context.queryCollectors().put(TotalHitCountCollector.class, collector); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); assertThat(collector.getTotalHits(), equalTo(1)); context.queryCollectors().clear(); } @@ -386,8 +385,8 @@ public class QueryPhaseTests extends IndexShardTestCase { context.queryCollectors().put(TotalHitCountCollector.class, collector); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(0)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); assertThat(collector.getTotalHits(), equalTo(1)); } @@ -424,19 +423,19 @@ public class QueryPhaseTests extends IndexShardTestCase { final IndexReader reader = DirectoryReader.open(dir); IndexSearcher contextSearcher = new IndexSearcher(reader); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); - assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); - assertThat(context.queryResult().topDocs().scoreDocs[0], instanceOf(FieldDoc.class)); - FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().scoreDocs[0]; + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); + FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; assertThat(fieldDoc.fields[0], equalTo(1)); { context.parsedPostFilter(new ParsedQuery(new MinDocQuery(1))); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertNull(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(numDocs - 1L)); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); - assertThat(context.queryResult().topDocs().scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(numDocs - 1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); context.parsedPostFilter(null); @@ -444,9 +443,9 @@ public class QueryPhaseTests extends IndexShardTestCase { context.queryCollectors().put(TotalHitCountCollector.class, totalHitCountCollector); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertNull(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); - assertThat(context.queryResult().topDocs().scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); assertThat(totalHitCountCollector.getTotalHits(), equalTo(numDocs)); context.queryCollectors().clear(); @@ -457,14 +456,14 @@ public class QueryPhaseTests extends IndexShardTestCase { context.trackTotalHits(false); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertNull(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); - assertThat(context.queryResult().topDocs().scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertNull(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); - assertThat(context.queryResult().topDocs().scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); } reader.close(); @@ -503,27 +502,27 @@ public class QueryPhaseTests extends IndexShardTestCase { ScrollContext scrollContext = new ScrollContext(); scrollContext.lastEmittedDoc = null; scrollContext.maxScore = Float.NaN; - scrollContext.totalHits = -1; + scrollContext.totalHits = null; context.scrollContext(scrollContext); context.setTask(new SearchTask(123L, "", "", "", null, Collections.emptyMap())); context.setSize(10); context.sort(searchSortAndFormat); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); - assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); assertNull(context.queryResult().terminatedEarly()); assertThat(context.terminateAfter(), equalTo(0)); - assertThat(context.queryResult().getTotalHits(), equalTo((long) numDocs)); - int sizeMinus1 = context.queryResult().topDocs().scoreDocs.length - 1; - FieldDoc lastDoc = (FieldDoc) context.queryResult().topDocs().scoreDocs[sizeMinus1]; + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + int sizeMinus1 = context.queryResult().topDocs().topDocs.scoreDocs.length - 1; + FieldDoc lastDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[sizeMinus1]; contextSearcher = getAssertingEarlyTerminationSearcher(reader, 10); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertNull(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); assertThat(context.terminateAfter(), equalTo(0)); - assertThat(context.queryResult().getTotalHits(), equalTo((long) numDocs)); - FieldDoc firstDoc = (FieldDoc) context.queryResult().topDocs().scoreDocs[0]; + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + FieldDoc firstDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; for (int i = 0; i < searchSortAndFormat.sort.getSort().length; i++) { @SuppressWarnings("unchecked") FieldComparator comparator = (FieldComparator) searchSortAndFormat.sort.getSort()[i].getComparator(1, i); @@ -539,19 +538,19 @@ public class QueryPhaseTests extends IndexShardTestCase { dir.close(); } - static IndexSearcher getAssertingEarlyTerminationSearcher(IndexReader reader, int size) { + private static IndexSearcher getAssertingEarlyTerminationSearcher(IndexReader reader, int size) { return new IndexSearcher(reader) { protected void search(List leaves, Weight weight, Collector collector) throws IOException { - final Collector in = new AssertingEalyTerminationFilterCollector(collector, size); + final Collector in = new AssertingEarlyTerminationFilterCollector(collector, size); super.search(leaves, weight, in); } }; } - private static class AssertingEalyTerminationFilterCollector extends FilterCollector { + private static class AssertingEarlyTerminationFilterCollector extends FilterCollector { private final int size; - AssertingEalyTerminationFilterCollector(Collector in, int size) { + AssertingEarlyTerminationFilterCollector(Collector in, int size) { super(in); this.size = size; } diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryStringIT.java b/server/src/test/java/org/elasticsearch/search/query/QueryStringIT.java index 5caab8c9dfe..f2d69fc377d 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryStringIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryStringIT.java @@ -31,17 +31,13 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryStringQueryBuilder; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; import org.junit.Before; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -58,10 +54,6 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class QueryStringIT extends ESIntegTestCase { - @Override - protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class); // uses index.version.created - } @Before public void setup() throws Exception { @@ -430,8 +422,8 @@ public class QueryStringIT extends ESIntegTestCase { indexRequests.add(client().prepareIndex("test", "_doc", "1").setSource("f3", "text", "f2", "one")); indexRandom(true, false, indexRequests); - // The wildcard field matches aliases for both a text and boolean field. - // By default, the boolean field should be ignored when building the query. + // The wildcard field matches aliases for both a text and geo_point field. + // By default, the geo_point field should be ignored when building the query. SearchResponse response = client().prepareSearch("test") .setQuery(queryStringQuery("text").field("f*_alias")) .execute().actionGet(); diff --git a/server/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java b/server/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java index 5176c327ac7..598f5625588 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java @@ -44,12 +44,11 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -79,7 +78,7 @@ import static org.hamcrest.Matchers.equalTo; public class SimpleQueryStringIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockAnalysisPlugin.class, InternalSettingsPlugin.class); // uses index.version.created + return Collections.singletonList(MockAnalysisPlugin.class); } public void testSimpleQueryString() throws ExecutionException, InterruptedException { diff --git a/server/src/test/java/org/elasticsearch/search/slice/DocValuesSliceQueryTests.java b/server/src/test/java/org/elasticsearch/search/slice/DocValuesSliceQueryTests.java index 846c411881f..76807f4722a 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/DocValuesSliceQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/slice/DocValuesSliceQueryTests.java @@ -27,11 +27,12 @@ import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Collector; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; -import org.apache.lucene.search.Scorer; import org.apache.lucene.search.QueryUtils; +import org.apache.lucene.search.Scorable; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.store.Directory; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.common.UUIDs; @@ -98,7 +99,7 @@ public class DocValuesSliceQueryTests extends ESTestCase { public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { return new LeafCollector() { @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { } @Override @@ -112,8 +113,8 @@ public class DocValuesSliceQueryTests extends ESTestCase { } @Override - public boolean needsScores() { - return false; + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE_NO_SCORES; } }); } diff --git a/server/src/test/java/org/elasticsearch/search/slice/TermsSliceQueryTests.java b/server/src/test/java/org/elasticsearch/search/slice/TermsSliceQueryTests.java index 3fa4ce41052..881dc6f9587 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/TermsSliceQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/slice/TermsSliceQueryTests.java @@ -26,12 +26,12 @@ import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Collector; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; -import org.apache.lucene.search.Scorer; import org.apache.lucene.search.QueryUtils; - +import org.apache.lucene.search.Scorable; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.StringHelper; @@ -92,7 +92,7 @@ public class TermsSliceQueryTests extends ESTestCase { public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { return new LeafCollector() { @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { } @Override @@ -106,8 +106,8 @@ public class TermsSliceQueryTests extends ESTestCase { } @Override - public boolean needsScores() { - return false; + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE_NO_SCORES; } }); } diff --git a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java index 965dcb3e8cc..6925e80ca27 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java +++ b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java @@ -29,14 +29,10 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; @@ -51,15 +47,15 @@ import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; - public class GeoDistanceIT extends ESIntegTestCase { + @Override - protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class); + protected boolean forbidPrivateIndexSettings() { + return false; } public void testDistanceSortingMVFields() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") @@ -189,7 +185,7 @@ public class GeoDistanceIT extends ESIntegTestCase { // Regression bug: // https://github.com/elastic/elasticsearch/issues/2851 public void testDistanceSortingWithMissingGeoPoint() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") @@ -234,7 +230,7 @@ public class GeoDistanceIT extends ESIntegTestCase { } public void testDistanceSortingNestedFields() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("company").startObject("properties") @@ -383,7 +379,7 @@ public class GeoDistanceIT extends ESIntegTestCase { * Issue 3073 */ public void testGeoDistanceFilter() throws IOException { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); double lat = 40.720611; diff --git a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java index 200043a6668..a0a919b5a62 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java @@ -28,16 +28,13 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.GeoValidationMethod; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.List; import java.util.concurrent.ExecutionException; @@ -50,11 +47,12 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSort import static org.hamcrest.Matchers.closeTo; public class GeoDistanceSortBuilderIT extends ESIntegTestCase { + private static final String LOCATION_FIELD = "location"; @Override - protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class); + protected boolean forbidPrivateIndexSettings() { + return false; } public void testManyToManyGeoPoints() throws ExecutionException, InterruptedException, IOException { @@ -70,7 +68,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase { * 1 2 3 4 5 6 7 */ Version version = randomBoolean() ? Version.CURRENT - : VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); + : VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); @@ -136,7 +134,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase { * d2 = (0, 1), (0, 5), (0, 6); so avg. distance is 4, median distance is 5 */ Version version = randomBoolean() ? Version.CURRENT - : VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); + : VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); @@ -197,7 +195,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase { * 1 2 3 4 5 6 */ Version version = randomBoolean() ? Version.CURRENT - : VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); + : VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); diff --git a/server/src/test/java/org/elasticsearch/search/source/MetadataFetchingIT.java b/server/src/test/java/org/elasticsearch/search/source/MetadataFetchingIT.java index 460fd11fbd9..c64ae840923 100644 --- a/server/src/test/java/org/elasticsearch/search/source/MetadataFetchingIT.java +++ b/server/src/test/java/org/elasticsearch/search/source/MetadataFetchingIT.java @@ -18,12 +18,20 @@ */ package org.elasticsearch.search.source; +import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.index.query.InnerHitBuilder; +import org.elasticsearch.index.query.NestedQueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.search.SearchContextException; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.test.ESIntegTestCase; +import java.util.Collections; + import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; @@ -33,7 +41,7 @@ public class MetadataFetchingIT extends ESIntegTestCase { assertAcked(prepareCreate("test")); ensureGreen(); - client().prepareIndex("test", "type1", "1").setSource("field", "value").execute().actionGet(); + client().prepareIndex("test", "_doc", "1").setSource("field", "value").execute().actionGet(); refresh(); SearchResponse response = client() @@ -42,7 +50,7 @@ public class MetadataFetchingIT extends ESIntegTestCase { .setFetchSource(false) .get(); assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getType(), nullValue()); + assertThat(response.getHits().getAt(0).getType(), equalTo("_doc")); assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); response = client() @@ -50,15 +58,45 @@ public class MetadataFetchingIT extends ESIntegTestCase { .storedFields("_none_") .get(); assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getType(), nullValue()); + assertThat(response.getHits().getAt(0).getType(), equalTo("_doc")); assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); } + public void testInnerHits() { + assertAcked(prepareCreate("test").addMapping("_doc", "nested", "type=nested")); + ensureGreen(); + client().prepareIndex("test", "_doc", "1") + .setSource("field", "value", "nested", Collections.singletonMap("title", "foo")).execute().actionGet(); + refresh(); + + SearchResponse response = client() + .prepareSearch("test") + .storedFields("_none_") + .setFetchSource(false) + .setQuery( + new NestedQueryBuilder("nested", new TermQueryBuilder("nested.title", "foo"), ScoreMode.Total) + .innerHit(new InnerHitBuilder() + .setStoredFieldNames(Collections.singletonList("_none_")) + .setFetchSourceContext(new FetchSourceContext(false))) + ) + .get(); + assertThat(response.getHits().totalHits, equalTo(1L)); + assertThat(response.getHits().getAt(0).getId(), nullValue()); + assertThat(response.getHits().getAt(0).getType(), equalTo("_doc")); + assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits hits = response.getHits().getAt(0).getInnerHits().get("nested"); + assertThat(hits.totalHits, equalTo(1L)); + assertThat(hits.getAt(0).getId(), nullValue()); + assertThat(hits.getAt(0).getType(), equalTo("_doc")); + assertThat(hits.getAt(0).getSourceAsString(), nullValue()); + } + public void testWithRouting() { assertAcked(prepareCreate("test")); ensureGreen(); - client().prepareIndex("test", "type1", "1").setSource("field", "value").setRouting("toto").execute().actionGet(); + client().prepareIndex("test", "_doc", "1").setSource("field", "value").setRouting("toto").execute().actionGet(); refresh(); SearchResponse response = client() @@ -67,7 +105,7 @@ public class MetadataFetchingIT extends ESIntegTestCase { .setFetchSource(false) .get(); assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getType(), nullValue()); + assertThat(response.getHits().getAt(0).getType(), equalTo("_doc")); assertThat(response.getHits().getAt(0).field("_routing"), nullValue()); assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); @@ -76,7 +114,7 @@ public class MetadataFetchingIT extends ESIntegTestCase { .storedFields("_none_") .get(); assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getType(), nullValue()); + assertThat(response.getHits().getAt(0).getType(), equalTo("_doc")); assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); } diff --git a/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index 58b2b863963..ca21cbc86ca 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -31,12 +31,10 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.FieldMemoryStats; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.aggregations.AggregationBuilders; @@ -528,7 +526,7 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { Settings.Builder settingsBuilder = Settings.builder() .put("analysis.analyzer.suggest_analyzer_synonyms.type", "custom") .put("analysis.analyzer.suggest_analyzer_synonyms.tokenizer", "standard") - .putList("analysis.analyzer.suggest_analyzer_synonyms.filter", "standard", "lowercase", "my_synonyms") + .putList("analysis.analyzer.suggest_analyzer_synonyms.filter", "lowercase", "my_synonyms") .put("analysis.filter.my_synonyms.type", "synonym") .putList("analysis.filter.my_synonyms.synonyms", "foo,renamed"); completionMappingBuilder.searchAnalyzer("suggest_analyzer_synonyms").indexAnalyzer("suggest_analyzer_synonyms"); @@ -806,7 +804,7 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { public void testThatSuggestStopFilterWorks() throws Exception { Settings.Builder settingsBuilder = Settings.builder() .put("index.analysis.analyzer.stoptest.tokenizer", "standard") - .putList("index.analysis.analyzer.stoptest.filter", "standard", "suggest_stop_filter") + .putList("index.analysis.analyzer.stoptest.filter", "suggest_stop_filter") .put("index.analysis.filter.suggest_stop_filter.type", "stop") .put("index.analysis.filter.suggest_stop_filter.remove_trailing", false); @@ -1111,35 +1109,6 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { } } - // see issue #6399 - public void testIndexingUnrelatedNullValue() throws Exception { - String mapping = Strings - .toString(jsonBuilder() - .startObject() - .startObject(TYPE) - .startObject("properties") - .startObject(FIELD) - .field("type", "completion") - .endObject() - .endObject() - .endObject() - .endObject()); - - assertAcked(client().admin().indices().prepareCreate(INDEX).addMapping(TYPE, mapping, XContentType.JSON).get()); - ensureGreen(); - - client().prepareIndex(INDEX, TYPE, "1").setSource(FIELD, "strings make me happy", FIELD + "_1", "nulls make me sad") - .setRefreshPolicy(IMMEDIATE).get(); - - try { - client().prepareIndex(INDEX, TYPE, "2").setSource(FIELD, null, FIELD + "_1", "nulls make me sad").get(); - fail("Expected MapperParsingException for null value"); - } catch (MapperParsingException e) { - // make sure that the exception has the name of the field causing the error - assertTrue(e.getDetailedMessage().contains(FIELD)); - } - } - public void testMultiDocSuggestions() throws Exception { final CompletionMappingBuilder mapping = new CompletionMappingBuilder(); createIndexAndMapping(mapping); diff --git a/server/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java b/server/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java index aaeaadd4c9f..995a2c10fe5 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java @@ -687,7 +687,7 @@ public class SuggestSearchIT extends ESIntegTestCase { .put(indexSettings()) .put(IndexSettings.MAX_SHINGLE_DIFF_SETTING.getKey(), 4) .put("index.analysis.analyzer.suggest.tokenizer", "standard") - .putList("index.analysis.analyzer.suggest.filter", "standard", "lowercase", "shingler") + .putList("index.analysis.analyzer.suggest.filter", "lowercase", "shingler") .put("index.analysis.filter.shingler.type", "shingle") .put("index.analysis.filter.shingler.min_shingle_size", 2) .put("index.analysis.filter.shingler.max_shingle_size", 5) @@ -748,7 +748,7 @@ public class SuggestSearchIT extends ESIntegTestCase { .put(indexSettings()) .put(IndexSettings.MAX_SHINGLE_DIFF_SETTING.getKey(), 4) .put("index.analysis.analyzer.suggest.tokenizer", "standard") - .putList("index.analysis.analyzer.suggest.filter", "standard", "lowercase", "shingler") + .putList("index.analysis.analyzer.suggest.filter", "lowercase", "shingler") .put("index.analysis.filter.shingler.type", "shingle") .put("index.analysis.filter.shingler.min_shingle_size", 2) .put("index.analysis.filter.shingler.max_shingle_size", 5) diff --git a/server/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java b/server/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java index 925526323a5..ca95310cd50 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.search.suggest.phrase; import org.apache.lucene.search.spell.DirectSpellChecker; import org.apache.lucene.search.spell.JaroWinklerDistance; -import org.apache.lucene.search.spell.LevensteinDistance; +import org.apache.lucene.search.spell.LevenshteinDistance; import org.apache.lucene.search.spell.LuceneLevenshteinDistance; import org.apache.lucene.search.spell.NGramDistance; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -76,7 +76,7 @@ public class DirectCandidateGeneratorTests extends ESTestCase { public void testFromString() { assertThat(DirectCandidateGeneratorBuilder.resolveDistance("internal"), equalTo(DirectSpellChecker.INTERNAL_LEVENSHTEIN)); assertThat(DirectCandidateGeneratorBuilder.resolveDistance("damerau_levenshtein"), instanceOf(LuceneLevenshteinDistance.class)); - assertThat(DirectCandidateGeneratorBuilder.resolveDistance("levenshtein"), instanceOf(LevensteinDistance.class)); + assertThat(DirectCandidateGeneratorBuilder.resolveDistance("levenshtein"), instanceOf(LevenshteinDistance.class)); assertThat(DirectCandidateGeneratorBuilder.resolveDistance("jaro_winkler"), instanceOf(JaroWinklerDistance.class)); assertThat(DirectCandidateGeneratorBuilder.resolveDistance("ngram"), instanceOf(NGramDistance.class)); diff --git a/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 23c56688e00..c25cad61e07 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -27,6 +27,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.snapshots.mockstore.MockRepository; import org.elasticsearch.test.ESIntegTestCase; +import org.junit.After; import java.io.IOException; import java.nio.file.FileVisitResult; @@ -58,6 +59,11 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase { return Arrays.asList(MockRepository.Plugin.class); } + @After + public void assertConsistentHistoryInLuceneIndex() throws Exception { + internalCluster().assertConsistentHistoryBetweenTranslogAndLuceneIndex(); + } + public static long getFailureCount(String repository) { long failureCount = 0; for (RepositoriesService repositoriesService : diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 1230d594b98..632a1ecbee1 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -122,6 +122,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING; +import static org.elasticsearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.index.shard.IndexShardTests.getEngineFromShard; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -2048,7 +2049,9 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); // only one shard - assertAcked(prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1))); + final Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).build(); + assertAcked(prepareCreate("test").setSettings(indexSettings)); ensureGreen(); logger.info("--> indexing"); @@ -2094,7 +2097,13 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-2").get().getSnapshots().get(0); List shards = snapshotStatus.getShards(); for (SnapshotIndexShardStatus status : shards) { - assertThat(status.getStats().getProcessedFileCount(), equalTo(2)); // we flush before the snapshot such that we have to process the segments_N files plus the .del file + // we flush before the snapshot such that we have to process the segments_N files plus the .del file + if (INDEX_SOFT_DELETES_SETTING.get(indexSettings)) { + // soft-delete generates DV files. + assertThat(status.getStats().getProcessedFileCount(), greaterThan(2)); + } else { + assertThat(status.getStats().getProcessedFileCount(), equalTo(2)); + } } } } diff --git a/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java b/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java index 3c099c32bde..bff5a2b122d 100644 --- a/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java @@ -159,7 +159,7 @@ public class ConnectionManagerTests extends ESTestCase { assertFalse(connection.isClosed()); assertTrue(connectionManager.nodeConnected(node)); assertSame(connection, connectionManager.getConnection(node)); - assertEquals(1, connectionManager.connectedNodeCount()); + assertEquals(1, connectionManager.size()); assertEquals(1, nodeConnectedCount.get()); assertEquals(0, nodeDisconnectedCount.get()); @@ -169,7 +169,7 @@ public class ConnectionManagerTests extends ESTestCase { connection.close(); } assertTrue(connection.isClosed()); - assertEquals(0, connectionManager.connectedNodeCount()); + assertEquals(0, connectionManager.size()); assertEquals(1, nodeConnectedCount.get()); assertEquals(1, nodeDisconnectedCount.get()); } @@ -205,7 +205,7 @@ public class ConnectionManagerTests extends ESTestCase { assertTrue(connection.isClosed()); assertFalse(connectionManager.nodeConnected(node)); expectThrows(NodeNotConnectedException.class, () -> connectionManager.getConnection(node)); - assertEquals(0, connectionManager.connectedNodeCount()); + assertEquals(0, connectionManager.size()); assertEquals(0, nodeConnectedCount.get()); assertEquals(0, nodeDisconnectedCount.get()); } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java index 8cfec0a07f9..3f85d927e92 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java @@ -52,7 +52,7 @@ public class RemoteClusterClientTests extends ESTestCase { Settings localSettings = Settings.builder() .put(RemoteClusterService.ENABLE_REMOTE_CLUSTERS.getKey(), true) - .put("search.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()).build(); + .put("cluster.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()).build(); try (MockTransportService service = MockTransportService.createNewService(localSettings, Version.CURRENT, threadPool, null)) { service.start(); service.acceptIncomingRequests(); @@ -77,17 +77,19 @@ public class RemoteClusterClientTests extends ESTestCase { DiscoveryNode remoteNode = remoteTransport.getLocalDiscoNode(); Settings localSettings = Settings.builder() .put(RemoteClusterService.ENABLE_REMOTE_CLUSTERS.getKey(), true) - .put("search.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()).build(); + .put("cluster.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()).build(); try (MockTransportService service = MockTransportService.createNewService(localSettings, Version.CURRENT, threadPool, null)) { Semaphore semaphore = new Semaphore(1); service.start(); - service.addConnectionListener(new TransportConnectionListener() { - @Override - public void onNodeDisconnected(DiscoveryNode node) { - if (remoteNode.equals(node)) { - semaphore.release(); + service.getRemoteClusterService().getConnections().forEach(con -> { + con.getConnectionManager().addListener(new TransportConnectionListener() { + @Override + public void onNodeDisconnected(DiscoveryNode node) { + if (remoteNode.equals(node)) { + semaphore.release(); + } } - } + }); }); // this test is not perfect since we might reconnect concurrently but it will fail most of the time if we don't have // the right calls in place in the RemoteAwareClient @@ -95,7 +97,9 @@ public class RemoteClusterClientTests extends ESTestCase { for (int i = 0; i < 10; i++) { semaphore.acquire(); try { - service.disconnectFromNode(remoteNode); + service.getRemoteClusterService().getConnections().forEach(con -> { + con.getConnectionManager().disconnectFromNode(remoteNode); + }); semaphore.acquire(); RemoteClusterService remoteClusterService = service.getRemoteClusterService(); Client client = remoteClusterService.getRemoteClusterClient(threadPool, "test"); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 3d0388ccfad..88b01c66898 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.transport; +import java.util.HashMap; +import java.util.Map; import java.util.function.Supplier; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; @@ -52,6 +54,7 @@ import org.elasticsearch.mocksocket.MockServerSocket; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.test.transport.StubbableTransport; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -145,7 +148,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { } } - public void testLocalProfileIsUsedForLocalCluster() throws Exception { + public void testRemoteProfileIsUsedForLocalCluster() throws Exception { List knownNodes = new CopyOnWriteArrayList<>(); try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT); MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) { @@ -159,7 +162,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -175,9 +178,12 @@ public class RemoteClusterConnectionTests extends ESTestCase { }); TransportRequestOptions options = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.BULK) .build(); - service.sendRequest(connection.getConnection(), ClusterSearchShardsAction.NAME, new ClusterSearchShardsRequest(), - options, futureHandler); - futureHandler.txGet(); + IllegalStateException ise = (IllegalStateException) expectThrows(SendRequestTransportException.class, () -> { + service.sendRequest(discoverableNode, + ClusterSearchShardsAction.NAME, new ClusterSearchShardsRequest(), options, futureHandler); + futureHandler.txGet(); + }).getCause(); + assertEquals(ise.getMessage(), "can't select channel size is 0 for types: [RECOVERY, BULK, STATE]"); } } } @@ -199,7 +205,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -255,7 +261,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -284,7 +290,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true)) { + seedNodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { updateSeedNodes(connection, seedNodes); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -311,7 +317,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -360,7 +366,8 @@ public class RemoteClusterConnectionTests extends ESTestCase { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> n.equals(rejectedNode) == false)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, + n -> n.equals(rejectedNode) == false)) { updateSeedNodes(connection, Arrays.asList(() -> seedNode)); if (rejectedNode.equals(seedNode)) { assertFalse(service.nodeConnected(seedNode)); @@ -374,15 +381,19 @@ public class RemoteClusterConnectionTests extends ESTestCase { } } } - private void updateSeedNodes(RemoteClusterConnection connection, List> seedNodes) throws Exception { + updateSeedNodes(connection, seedNodes, null); + } + + private void updateSeedNodes(RemoteClusterConnection connection, List> seedNodes, String proxyAddress) + throws Exception { CountDownLatch latch = new CountDownLatch(1); AtomicReference exceptionAtomicReference = new AtomicReference<>(); ActionListener listener = ActionListener.wrap(x -> latch.countDown(), x -> { exceptionAtomicReference.set(x); latch.countDown(); }); - connection.updateSeedNodes(seedNodes, listener); + connection.updateSeedNodes(proxyAddress, seedNodes, listener); latch.await(); if (exceptionAtomicReference.get() != null) { throw exceptionAtomicReference.get(); @@ -399,7 +410,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { expectThrows(Exception.class, () -> updateSeedNodes(connection, Arrays.asList(() -> seedNode))); assertFalse(service.nodeConnected(seedNode)); assertTrue(connection.assertNoRunningConnections()); @@ -462,7 +473,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { connection.addConnectedNode(seedNode); for (DiscoveryNode node : knownNodes) { final Transport.Connection transportConnection = connection.getConnection(node); @@ -505,7 +516,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { CountDownLatch listenerCalled = new CountDownLatch(1); AtomicReference exceptionReference = new AtomicReference<>(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { ActionListener listener = ActionListener.wrap(x -> { listenerCalled.countDown(); fail("expected exception"); @@ -513,7 +524,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { exceptionReference.set(x); listenerCalled.countDown(); }); - connection.updateSeedNodes(Arrays.asList(() -> seedNode), listener); + connection.updateSeedNodes(null, Arrays.asList(() -> seedNode), listener); acceptedLatch.await(); connection.close(); // now close it, this should trigger an interrupt on the socket and we can move on assertTrue(connection.assertNoRunningConnections()); @@ -542,7 +553,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { service.acceptIncomingRequests(); List> nodes = Collections.singletonList(() -> seedNode); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - nodes, service, Integer.MAX_VALUE, n -> true)) { + nodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { if (randomBoolean()) { updateSeedNodes(connection, nodes); } @@ -582,7 +593,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { service.acceptIncomingRequests(); List> nodes = Collections.singletonList(() -> seedNode); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - nodes, service, Integer.MAX_VALUE, n -> true)) { + nodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { SearchRequest request = new SearchRequest("test-index"); Thread[] threads = new Thread[10]; for (int i = 0; i < threads.length; i++) { @@ -636,7 +647,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Collections.singletonList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Collections.singletonList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { SearchRequest request = new SearchRequest("test-index"); ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest("test-index") @@ -746,7 +757,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true)) { + seedNodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { int numThreads = randomIntBetween(4, 10); Thread[] threads = new Thread[numThreads]; CyclicBarrier barrier = new CyclicBarrier(numThreads); @@ -783,7 +794,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { throw new AssertionError(x); } }); - connection.updateSeedNodes(seedNodes, listener); + connection.updateSeedNodes(null, seedNodes, listener); } latch.await(); } catch (Exception ex) { @@ -824,7 +835,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true)) { + seedNodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { int numThreads = randomIntBetween(4, 10); Thread[] threads = new Thread[numThreads]; CyclicBarrier barrier = new CyclicBarrier(numThreads + 1); @@ -871,7 +882,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { } }); try { - connection.updateSeedNodes(seedNodes, listener); + connection.updateSeedNodes(null, seedNodes, listener); } catch (Exception e) { // it's ok if we're shutting down assertThat(e.getMessage(), containsString("threadcontext is already closed")); @@ -913,7 +924,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { service.acceptIncomingRequests(); int maxNumConnections = randomIntBetween(1, 5); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, maxNumConnections, n -> true)) { + seedNodes, service, service.connectionManager(), maxNumConnections, n -> true)) { // test no nodes connected RemoteConnectionInfo remoteConnectionInfo = assertSerialization(connection.getConnectionInfo()); assertNotNull(remoteConnectionInfo); @@ -1060,7 +1071,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { assertFalse(service.nodeConnected(seedNode)); assertFalse(service.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); @@ -1109,7 +1120,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { if (randomBoolean()) { updateSeedNodes(connection, Arrays.asList(() -> seedNode)); } @@ -1157,7 +1168,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true)) { + seedNodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { final int numGetThreads = randomIntBetween(4, 10); final Thread[] getThreads = new Thread[numGetThreads]; final int numModifyingThreads = randomIntBetween(4, 10); @@ -1247,7 +1258,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList( () -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList( () -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -1327,7 +1338,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Collections.singletonList(() -> connectedNode), service, Integer.MAX_VALUE, n -> true)) { + Collections.singletonList(() -> connectedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { connection.addConnectedNode(connectedNode); for (int i = 0; i < 10; i++) { //always a direct connection as the remote node is already connected @@ -1335,9 +1346,9 @@ public class RemoteClusterConnectionTests extends ESTestCase { assertSame(seedConnection, remoteConnection); } for (int i = 0; i < 10; i++) { - //always a direct connection as the remote node is already connected + // we don't use the transport service connection manager so we will get a proxy connection for the local node Transport.Connection remoteConnection = connection.getConnection(service.getLocalNode()); - assertThat(remoteConnection, not(instanceOf(RemoteClusterConnection.ProxyConnection.class))); + assertThat(remoteConnection, instanceOf(RemoteClusterConnection.ProxyConnection.class)); assertThat(remoteConnection.getNode(), equalTo(service.getLocalNode())); } for (int i = 0; i < 10; i++) { @@ -1369,7 +1380,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { return seedNode; }; try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedSupplier), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(seedSupplier), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { updateSeedNodes(connection, Arrays.asList(seedSupplier)); // Closing connections leads to RemoteClusterConnection.ConnectHandler.collectRemoteNodes // being called again so we try to resolve the same seed node's host twice @@ -1380,4 +1391,97 @@ public class RemoteClusterConnectionTests extends ESTestCase { } } } + + public void testProxyMode() throws Exception { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService seedTransport = startTransport("node_0", knownNodes, Version.CURRENT); + MockTransportService discoverableTransport = startTransport("node_1", knownNodes, Version.CURRENT)) { + knownNodes.add(seedTransport.getLocalDiscoNode()); + knownNodes.add(discoverableTransport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + final String proxyAddress = "1.1.1.1:99"; + Map nodes = new HashMap<>(); + nodes.put("node_0", seedTransport.getLocalDiscoNode()); + nodes.put("node_1", discoverableTransport.getLocalDiscoNode()); + Transport mockTcpTransport = getProxyTransport(threadPool, Collections.singletonMap(proxyAddress, nodes)); + try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, mockTcpTransport, Version.CURRENT, + threadPool, null, Collections.emptySet())) { + service.start(); + service.acceptIncomingRequests(); + Supplier seedSupplier = () -> + RemoteClusterAware.buildSeedNode("some-remote-cluster", "node_0:" + randomIntBetween(1, 10000), true); + try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", + Arrays.asList(seedSupplier), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true, proxyAddress)) { + updateSeedNodes(connection, Arrays.asList(seedSupplier), proxyAddress); + assertEquals(2, connection.getNumNodesConnected()); + assertNotNull(connection.getConnection(discoverableTransport.getLocalDiscoNode())); + assertNotNull(connection.getConnection(seedTransport.getLocalDiscoNode())); + assertEquals(proxyAddress, connection.getConnection(seedTransport.getLocalDiscoNode()) + .getNode().getAddress().toString()); + assertEquals(proxyAddress, connection.getConnection(discoverableTransport.getLocalDiscoNode()) + .getNode().getAddress().toString()); + service.getConnectionManager().disconnectFromNode(knownNodes.get(0)); + // ensure we reconnect + assertBusy(() -> { + assertEquals(2, connection.getNumNodesConnected()); + }); + discoverableTransport.close(); + seedTransport.close(); + } + } + } + } + + public static Transport getProxyTransport(ThreadPool threadPool, Map> nodeMap) { + if (nodeMap.isEmpty()) { + throw new IllegalArgumentException("nodeMap must be non-empty"); + } + + StubbableTransport stubbableTransport = new StubbableTransport(MockTransportService.newMockTransport(Settings.EMPTY, Version + .CURRENT, threadPool)); + stubbableTransport.setDefaultConnectBehavior((t, node, profile) -> { + Map proxyMapping = nodeMap.get(node.getAddress().toString()); + if (proxyMapping == null) { + throw new IllegalStateException("no proxy mapping for node: " + node); + } + DiscoveryNode proxyNode = proxyMapping.get(node.getName()); + if (proxyNode == null) { + // this is a seednode - lets pick one randomly + assertEquals("seed node must not have a port in the hostname: " + node.getHostName(), + -1, node.getHostName().lastIndexOf(':')); + assertTrue("missing hostname: " + node, proxyMapping.containsKey(node.getHostName())); + // route by seed hostname + proxyNode = proxyMapping.get(node.getHostName()); + } + Transport.Connection connection = t.openConnection(proxyNode, profile); + return new Transport.Connection() { + @Override + public DiscoveryNode getNode() { + return node; + } + + @Override + public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) + throws IOException, TransportException { + connection.sendRequest(requestId, action, request, options); + } + + @Override + public void addCloseListener(ActionListener listener) { + connection.addCloseListener(listener); + } + + @Override + public boolean isClosed() { + return connection.isClosed(); + } + + @Override + public void close() { + connection.close(); + } + }; + }); + return stubbableTransport; + } } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index c94b1cbdef5..9732edb4227 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.AbstractScopedSettings; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -55,6 +56,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; import java.util.function.Predicate; +import java.util.stream.Collectors; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; @@ -96,17 +98,17 @@ public class RemoteClusterServiceTests extends ESTestCase { public void testRemoteClusterSeedSetting() { // simple validation Settings settings = Settings.builder() - .put("search.remote.foo.seeds", "192.168.0.1:8080") - .put("search.remote.bar.seed", "[::1]:9090").build(); + .put("cluster.remote.foo.seeds", "192.168.0.1:8080") + .put("cluster.remote.bar.seed", "[::1]:9090").build(); RemoteClusterAware.REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings).forEach(setting -> setting.get(settings)); Settings brokenSettings = Settings.builder() - .put("search.remote.foo.seeds", "192.168.0.1").build(); + .put("cluster.remote.foo.seeds", "192.168.0.1").build(); expectThrows(IllegalArgumentException.class, () -> RemoteClusterAware.REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(brokenSettings).forEach(setting -> setting.get(brokenSettings))); Settings brokenPortSettings = Settings.builder() - .put("search.remote.foo.seeds", "192.168.0.1:123456789123456789").build(); + .put("cluster.remote.foo.seeds", "192.168.0.1:123456789123456789").build(); Exception e = expectThrows( IllegalArgumentException.class, () -> RemoteClusterAware.REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(brokenSettings) @@ -115,25 +117,38 @@ public class RemoteClusterServiceTests extends ESTestCase { assertEquals("failed to parse port", e.getMessage()); } - public void testBuiltRemoteClustersSeeds() throws Exception { - Map>> map = RemoteClusterService.buildRemoteClustersSeeds( - Settings.builder().put("search.remote.foo.seeds", "192.168.0.1:8080").put("search.remote.bar.seeds", "[::1]:9090").build()); - assertEquals(2, map.size()); + public void testBuildRemoteClustersDynamicConfig() throws Exception { + Map>>> map = RemoteClusterService.buildRemoteClustersDynamicConfig( + Settings.builder().put("cluster.remote.foo.seeds", "192.168.0.1:8080") + .put("cluster.remote.bar.seeds", "[::1]:9090") + .put("cluster.remote.boom.seeds", "boom-node1.internal:1000") + .put("cluster.remote.boom.proxy", "foo.bar.com:1234").build()); + assertEquals(3, map.size()); assertTrue(map.containsKey("foo")); assertTrue(map.containsKey("bar")); - assertEquals(1, map.get("foo").size()); - assertEquals(1, map.get("bar").size()); - - DiscoveryNode foo = map.get("foo").get(0).get(); + assertTrue(map.containsKey("boom")); + assertEquals(1, map.get("foo").v2().size()); + assertEquals(1, map.get("bar").v2().size()); + assertEquals(1, map.get("boom").v2().size()); + DiscoveryNode foo = map.get("foo").v2().get(0).get(); + assertEquals("", map.get("foo").v1()); assertEquals(foo.getAddress(), new TransportAddress(new InetSocketAddress(InetAddress.getByName("192.168.0.1"), 8080))); assertEquals(foo.getId(), "foo#192.168.0.1:8080"); assertEquals(foo.getVersion(), Version.CURRENT.minimumCompatibilityVersion()); - DiscoveryNode bar = map.get("bar").get(0).get(); + DiscoveryNode bar = map.get("bar").v2().get(0).get(); assertEquals(bar.getAddress(), new TransportAddress(new InetSocketAddress(InetAddress.getByName("[::1]"), 9090))); assertEquals(bar.getId(), "bar#[::1]:9090"); + assertEquals("", map.get("bar").v1()); assertEquals(bar.getVersion(), Version.CURRENT.minimumCompatibilityVersion()); + + DiscoveryNode boom = map.get("boom").v2().get(0).get(); + assertEquals(boom.getAddress(), new TransportAddress(TransportAddress.META_ADDRESS, 0)); + assertEquals("boom-node1.internal", boom.getHostName()); + assertEquals(boom.getId(), "boom#boom-node1.internal:1000"); + assertEquals("foo.bar.com:1234", map.get("boom").v1()); + assertEquals(boom.getVersion(), Version.CURRENT.minimumCompatibilityVersion()); } @@ -152,8 +167,8 @@ public class RemoteClusterServiceTests extends ESTestCase { transportService.start(); transportService.acceptIncomingRequests(); Settings.Builder builder = Settings.builder(); - builder.putList("search.remote.cluster_1.seeds", seedNode.getAddress().toString()); - builder.putList("search.remote.cluster_2.seeds", otherSeedNode.getAddress().toString()); + builder.putList("cluster.remote.cluster_1.seeds", seedNode.getAddress().toString()); + builder.putList("cluster.remote.cluster_2.seeds", otherSeedNode.getAddress().toString()); try (RemoteClusterService service = new RemoteClusterService(builder.build(), transportService)) { assertFalse(service.isCrossClusterSearchEnabled()); service.initializeRemoteClusters(); @@ -198,23 +213,23 @@ public class RemoteClusterServiceTests extends ESTestCase { transportService.start(); transportService.acceptIncomingRequests(); Settings.Builder builder = Settings.builder(); - builder.putList("search.remote.cluster_1.seeds", seedNode.getAddress().toString()); - builder.putList("search.remote.cluster_2.seeds", otherSeedNode.getAddress().toString()); + builder.putList("cluster.remote.cluster_1.seeds", seedNode.getAddress().toString()); + builder.putList("cluster.remote.cluster_2.seeds", otherSeedNode.getAddress().toString()); try (RemoteClusterService service = new RemoteClusterService(Settings.EMPTY, transportService)) { assertFalse(service.isCrossClusterSearchEnabled()); service.initializeRemoteClusters(); assertFalse(service.isCrossClusterSearchEnabled()); - service.updateRemoteCluster("cluster_1", Collections.singletonList(seedNode.getAddress().toString())); + service.updateRemoteCluster("cluster_1", Collections.singletonList(seedNode.getAddress().toString()), null); assertTrue(service.isCrossClusterSearchEnabled()); assertTrue(service.isRemoteClusterRegistered("cluster_1")); - service.updateRemoteCluster("cluster_2", Collections.singletonList(otherSeedNode.getAddress().toString())); + service.updateRemoteCluster("cluster_2", Collections.singletonList(otherSeedNode.getAddress().toString()), null); assertTrue(service.isCrossClusterSearchEnabled()); assertTrue(service.isRemoteClusterRegistered("cluster_1")); assertTrue(service.isRemoteClusterRegistered("cluster_2")); - service.updateRemoteCluster("cluster_2", Collections.emptyList()); + service.updateRemoteCluster("cluster_2", Collections.emptyList(), null); assertFalse(service.isRemoteClusterRegistered("cluster_2")); IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, - () -> service.updateRemoteCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, Collections.emptyList())); + () -> service.updateRemoteCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, Collections.emptyList(), null)); assertEquals("remote clusters must not have the empty string as its key", iae.getMessage()); } } @@ -223,7 +238,7 @@ public class RemoteClusterServiceTests extends ESTestCase { public void testRemoteNodeAttribute() throws IOException, InterruptedException { final Settings settings = - Settings.builder().put("search.remote.node.attr", "gateway").build(); + Settings.builder().put("cluster.remote.node.attr", "gateway").build(); final List knownNodes = new CopyOnWriteArrayList<>(); final Settings gateway = Settings.builder().put("node.attr.gateway", true).build(); try (MockTransportService c1N1 = @@ -253,9 +268,9 @@ public class RemoteClusterServiceTests extends ESTestCase { transportService.acceptIncomingRequests(); final Settings.Builder builder = Settings.builder(); builder.putList( - "search.remote.cluster_1.seeds", c1N1Node.getAddress().toString()); + "cluster.remote.cluster_1.seeds", c1N1Node.getAddress().toString()); builder.putList( - "search.remote.cluster_2.seeds", c2N1Node.getAddress().toString()); + "cluster.remote.cluster_2.seeds", c2N1Node.getAddress().toString()); try (RemoteClusterService service = new RemoteClusterService(settings, transportService)) { assertFalse(service.isCrossClusterSearchEnabled()); @@ -265,14 +280,14 @@ public class RemoteClusterServiceTests extends ESTestCase { final CountDownLatch firstLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_1", - Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()), + Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()), null, connectionListener(firstLatch)); firstLatch.await(); final CountDownLatch secondLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_2", - Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()), + Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()), null, connectionListener(secondLatch)); secondLatch.await(); @@ -283,6 +298,7 @@ public class RemoteClusterServiceTests extends ESTestCase { assertTrue(service.isRemoteClusterRegistered("cluster_2")); assertFalse(service.isRemoteNodeConnected("cluster_2", c2N1Node)); assertTrue(service.isRemoteNodeConnected("cluster_2", c2N2Node)); + assertEquals(0, transportService.getConnectionManager().size()); } } } @@ -319,8 +335,8 @@ public class RemoteClusterServiceTests extends ESTestCase { transportService.start(); transportService.acceptIncomingRequests(); final Settings.Builder builder = Settings.builder(); - builder.putList("search.remote.cluster_1.seeds", c1N1Node.getAddress().toString()); - builder.putList("search.remote.cluster_2.seeds", c2N1Node.getAddress().toString()); + builder.putList("cluster.remote.cluster_1.seeds", c1N1Node.getAddress().toString()); + builder.putList("cluster.remote.cluster_2.seeds", c2N1Node.getAddress().toString()); try (RemoteClusterService service = new RemoteClusterService(settings, transportService)) { assertFalse(service.isCrossClusterSearchEnabled()); service.initializeRemoteClusters(); @@ -329,14 +345,14 @@ public class RemoteClusterServiceTests extends ESTestCase { final CountDownLatch firstLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_1", - Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()), + Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()), null, connectionListener(firstLatch)); firstLatch.await(); final CountDownLatch secondLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_2", - Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()), + Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()), null, connectionListener(secondLatch)); secondLatch.await(); @@ -347,6 +363,7 @@ public class RemoteClusterServiceTests extends ESTestCase { assertTrue(service.isRemoteClusterRegistered("cluster_2")); assertFalse(service.isRemoteNodeConnected("cluster_2", c2N1Node)); assertTrue(service.isRemoteNodeConnected("cluster_2", c2N2Node)); + assertEquals(0, transportService.getConnectionManager().size()); } } } @@ -389,9 +406,9 @@ public class RemoteClusterServiceTests extends ESTestCase { transportService.acceptIncomingRequests(); final Settings.Builder builder = Settings.builder(); builder.putList( - "search.remote.cluster_1.seeds", c1N1Node.getAddress().toString()); + "cluster.remote.cluster_1.seeds", c1N1Node.getAddress().toString()); builder.putList( - "search.remote.cluster_2.seeds", c2N1Node.getAddress().toString()); + "cluster.remote.cluster_2.seeds", c2N1Node.getAddress().toString()); try (RemoteClusterService service = new RemoteClusterService(settings, transportService)) { assertFalse(service.isCrossClusterSearchEnabled()); @@ -401,14 +418,14 @@ public class RemoteClusterServiceTests extends ESTestCase { final CountDownLatch firstLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_1", - Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()), + Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()), null, connectionListener(firstLatch)); firstLatch.await(); final CountDownLatch secondLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_2", - Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()), + Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()), null, connectionListener(secondLatch)); secondLatch.await(); CountDownLatch latch = new CountDownLatch(1); @@ -523,7 +540,7 @@ public class RemoteClusterServiceTests extends ESTestCase { DiscoveryNode remoteSeedNode = remoteSeedTransport.getLocalDiscoNode(); knownNodes.add(remoteSeedNode); nodes[i] = remoteSeedNode; - builder.put("search.remote.remote" + i + ".seeds", remoteSeedNode.getAddress().toString()); + builder.put("cluster.remote.remote" + i + ".seeds", remoteSeedNode.getAddress().toString()); remoteIndicesByCluster.put("remote" + i, new OriginalIndices(new String[]{"index"}, IndicesOptions.lenientExpandOpen())); } Settings settings = builder.build(); @@ -579,14 +596,16 @@ public class RemoteClusterServiceTests extends ESTestCase { } CountDownLatch disconnectedLatch = new CountDownLatch(numDisconnectedClusters); - service.addConnectionListener(new TransportConnectionListener() { - @Override - public void onNodeDisconnected(DiscoveryNode node) { - if (disconnectedNodes.remove(node)) { - disconnectedLatch.countDown(); + for (RemoteClusterConnection connection : remoteClusterService.getConnections()) { + connection.getConnectionManager().addListener(new TransportConnectionListener() { + @Override + public void onNodeDisconnected(DiscoveryNode node) { + if (disconnectedNodes.remove(node)) { + disconnectedLatch.countDown(); + } } - } - }); + }); + } for (DiscoveryNode disconnectedNode : disconnectedNodes) { service.addFailToSendNoConnectRule(disconnectedNode.getAddress()); @@ -664,6 +683,7 @@ public class RemoteClusterServiceTests extends ESTestCase { assertTrue(shardsResponse != ClusterSearchShardsResponse.EMPTY); } } + assertEquals(0, service.getConnectionManager().size()); } } } finally { @@ -676,13 +696,13 @@ public class RemoteClusterServiceTests extends ESTestCase { public void testRemoteClusterSkipIfDisconnectedSetting() { { Settings settings = Settings.builder() - .put("search.remote.foo.skip_unavailable", true) - .put("search.remote.bar.skip_unavailable", false).build(); + .put("cluster.remote.foo.skip_unavailable", true) + .put("cluster.remote.bar.skip_unavailable", false).build(); RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE.getAllConcreteSettings(settings).forEach(setting -> setting.get(settings)); } { Settings brokenSettings = Settings.builder() - .put("search.remote.foo.skip_unavailable", "broken").build(); + .put("cluster.remote.foo.skip_unavailable", "broken").build(); expectThrows(IllegalArgumentException.class, () -> RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE.getAllConcreteSettings(brokenSettings) .forEach(setting -> setting.get(brokenSettings))); @@ -692,22 +712,22 @@ public class RemoteClusterServiceTests extends ESTestCase { new HashSet<>(Arrays.asList(RemoteClusterAware.REMOTE_CLUSTERS_SEEDS, RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE))); { - Settings settings = Settings.builder().put("search.remote.foo.skip_unavailable", randomBoolean()).build(); + Settings settings = Settings.builder().put("cluster.remote.foo.skip_unavailable", randomBoolean()).build(); IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> service.validate(settings, true)); - assertEquals("Missing required setting [search.remote.foo.seeds] for setting [search.remote.foo.skip_unavailable]", + assertEquals("missing required setting [cluster.remote.foo.seeds] for setting [cluster.remote.foo.skip_unavailable]", iae.getMessage()); } { try (MockTransportService remoteSeedTransport = startTransport("seed", new CopyOnWriteArrayList<>(), Version.CURRENT)) { String seed = remoteSeedTransport.getLocalDiscoNode().getAddress().toString(); - service.validate(Settings.builder().put("search.remote.foo.skip_unavailable", randomBoolean()) - .put("search.remote.foo.seeds", seed).build(), true); - service.validate(Settings.builder().put("search.remote.foo.seeds", seed).build(), true); + service.validate(Settings.builder().put("cluster.remote.foo.skip_unavailable", randomBoolean()) + .put("cluster.remote.foo.seeds", seed).build(), true); + service.validate(Settings.builder().put("cluster.remote.foo.seeds", seed).build(), true); - AbstractScopedSettings service2 = new ClusterSettings(Settings.builder().put("search.remote.foo.seeds", seed).build(), + AbstractScopedSettings service2 = new ClusterSettings(Settings.builder().put("cluster.remote.foo.seeds", seed).build(), new HashSet<>(Arrays.asList(RemoteClusterAware.REMOTE_CLUSTERS_SEEDS, RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE))); - service2.validate(Settings.builder().put("search.remote.foo.skip_unavailable", randomBoolean()).build(), false); + service2.validate(Settings.builder().put("cluster.remote.foo.skip_unavailable", randomBoolean()).build(), false); } } } @@ -769,7 +789,7 @@ public class RemoteClusterServiceTests extends ESTestCase { public void testGetNodePredicateNodeAttrs() { TransportAddress address = new TransportAddress(TransportAddress.META_ADDRESS, 0); Set roles = new HashSet<>(EnumSet.allOf(DiscoveryNode.Role.class)); - Settings settings = Settings.builder().put("search.remote.node.attr", "gateway").build(); + Settings settings = Settings.builder().put("cluster.remote.node.attr", "gateway").build(); Predicate nodePredicate = RemoteClusterService.getNodePredicate(settings); { DiscoveryNode nonGatewayNode = new DiscoveryNode("id", address, Collections.singletonMap("gateway", "false"), @@ -792,7 +812,7 @@ public class RemoteClusterServiceTests extends ESTestCase { public void testGetNodePredicatesCombination() { TransportAddress address = new TransportAddress(TransportAddress.META_ADDRESS, 0); - Settings settings = Settings.builder().put("search.remote.node.attr", "gateway").build(); + Settings settings = Settings.builder().put("cluster.remote.node.attr", "gateway").build(); Predicate nodePredicate = RemoteClusterService.getNodePredicate(settings); Set allRoles = new HashSet<>(EnumSet.allOf(DiscoveryNode.Role.class)); Set dedicatedMasterRoles = new HashSet<>(EnumSet.of(DiscoveryNode.Role.MASTER)); @@ -816,10 +836,77 @@ public class RemoteClusterServiceTests extends ESTestCase { allRoles, Version.CURRENT); assertTrue(nodePredicate.test(node)); } - { - DiscoveryNode node = new DiscoveryNode("id", address, Collections.singletonMap("gateway", "true"), - allRoles, Version.V_5_3_0); - assertFalse(nodePredicate.test(node)); + } + + public void testRemoteClusterWithProxy() throws Exception { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService cluster_1_node0 = startTransport("cluster_1_node0", knownNodes, Version.CURRENT); + MockTransportService cluster_1_node_1 = startTransport("cluster_1_node1", knownNodes, Version.CURRENT); + MockTransportService cluster_2_node0 = startTransport("cluster_2_node0", Collections.emptyList(), Version.CURRENT)) { + knownNodes.add(cluster_1_node0.getLocalDiscoNode()); + knownNodes.add(cluster_1_node_1.getLocalDiscoNode()); + String cluster1Proxy = "1.1.1.1:99"; + String cluster2Proxy = "2.2.2.2:99"; + Map nodesCluster1 = new HashMap<>(); + nodesCluster1.put("cluster_1_node0", cluster_1_node0.getLocalDiscoNode()); + nodesCluster1.put("cluster_1_node1", cluster_1_node_1.getLocalDiscoNode()); + Map> mapping = new HashMap<>(); + mapping.put(cluster1Proxy, nodesCluster1); + mapping.put(cluster2Proxy, Collections.singletonMap("cluster_2_node0", cluster_2_node0.getLocalDiscoNode())); + + Collections.shuffle(knownNodes, random()); + Transport proxyTransport = RemoteClusterConnectionTests.getProxyTransport(threadPool, mapping); + try (MockTransportService transportService = MockTransportService.createNewService(Settings.EMPTY, proxyTransport, + Version.CURRENT, threadPool, null, Collections.emptySet());) { + transportService.start(); + transportService.acceptIncomingRequests(); + Settings.Builder builder = Settings.builder(); + builder.putList("cluster.remote.cluster_1.seeds", "cluster_1_node0:8080"); + builder.put("cluster.remote.cluster_1.proxy", cluster1Proxy); + try (RemoteClusterService service = new RemoteClusterService(builder.build(), transportService)) { + assertFalse(service.isCrossClusterSearchEnabled()); + service.initializeRemoteClusters(); + assertTrue(service.isCrossClusterSearchEnabled()); + updateRemoteCluster(service, "cluster_1", Collections.singletonList("cluster_1_node1:8081"), cluster1Proxy); + assertTrue(service.isCrossClusterSearchEnabled()); + assertTrue(service.isRemoteClusterRegistered("cluster_1")); + assertFalse(service.isRemoteClusterRegistered("cluster_2")); + updateRemoteCluster(service, "cluster_2", Collections.singletonList("cluster_2_node0:9300"), cluster2Proxy); + assertTrue(service.isCrossClusterSearchEnabled()); + assertTrue(service.isRemoteClusterRegistered("cluster_1")); + assertTrue(service.isRemoteClusterRegistered("cluster_2")); + List infos = service.getRemoteConnectionInfos().collect(Collectors.toList()); + for (RemoteConnectionInfo info : infos) { + switch (info.clusterAlias) { + case "cluster_1": + assertEquals(2, info.numNodesConnected); + break; + case "cluster_2": + assertEquals(1, info.numNodesConnected); + break; + default: + fail("unknown cluster: " + info.clusterAlias); + } + } + service.updateRemoteCluster("cluster_2", Collections.emptyList(), randomBoolean() ? cluster2Proxy : null); + assertFalse(service.isRemoteClusterRegistered("cluster_2")); + } + } + } + } + + private void updateRemoteCluster(RemoteClusterService service, String clusterAlias, List addresses, String proxyAddress) + throws Exception { + CountDownLatch latch = new CountDownLatch(1); + AtomicReference exceptionAtomicReference = new AtomicReference<>(); + ActionListener listener = ActionListener.wrap(x -> latch.countDown(), x -> { + exceptionAtomicReference.set(x); + latch.countDown(); + }); + service.updateRemoteCluster(clusterAlias, addresses, proxyAddress, listener); + latch.await(); + if (exceptionAtomicReference.get() != null) { + throw exceptionAtomicReference.get(); } } } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java new file mode 100644 index 00000000000..cfffc383946 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java @@ -0,0 +1,146 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.transport.RemoteClusterAware.REMOTE_CLUSTERS_PROXY; +import static org.elasticsearch.transport.RemoteClusterAware.REMOTE_CLUSTERS_SEEDS; +import static org.elasticsearch.transport.RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_PROXY; +import static org.elasticsearch.transport.RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_SEEDS; +import static org.elasticsearch.transport.RemoteClusterService.ENABLE_REMOTE_CLUSTERS; +import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE; +import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CONNECTIONS_PER_CLUSTER; +import static org.elasticsearch.transport.RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING; +import static org.elasticsearch.transport.RemoteClusterService.REMOTE_NODE_ATTRIBUTE; +import static org.elasticsearch.transport.RemoteClusterService.SEARCH_ENABLE_REMOTE_CLUSTERS; +import static org.elasticsearch.transport.RemoteClusterService.SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE; +import static org.elasticsearch.transport.RemoteClusterService.SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER; +import static org.elasticsearch.transport.RemoteClusterService.SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING; +import static org.elasticsearch.transport.RemoteClusterService.SEARCH_REMOTE_NODE_ATTRIBUTE; +import static org.hamcrest.Matchers.emptyCollectionOf; +import static org.hamcrest.Matchers.equalTo; + +public class RemoteClusterSettingsTests extends ESTestCase { + + public void testConnectionsPerClusterFallback() { + final int value = randomIntBetween(1, 8); + final Settings settings = Settings.builder().put(SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER.getKey(), value).build(); + assertThat(REMOTE_CONNECTIONS_PER_CLUSTER.get(settings), equalTo(value)); + assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER}); + } + + public void testConnectionsPerClusterDefault() { + assertThat(REMOTE_CONNECTIONS_PER_CLUSTER.get(Settings.EMPTY), equalTo(3)); + } + + public void testInitialConnectTimeoutFallback() { + final String value = randomTimeValue(30, 300, "s"); + final Settings settings = Settings.builder().put(SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.getKey(), value).build(); + assertThat( + REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings), + equalTo(TimeValue.parseTimeValue(value, SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.getKey()))); + assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING}); + } + + public void testInitialConnectTimeoutDefault() { + assertThat(REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(Settings.EMPTY), equalTo(new TimeValue(30, TimeUnit.SECONDS))); + } + + public void testRemoteNodeAttributeFallback() { + final String attribute = randomAlphaOfLength(8); + final Settings settings = Settings.builder().put(SEARCH_REMOTE_NODE_ATTRIBUTE.getKey(), attribute).build(); + assertThat(REMOTE_NODE_ATTRIBUTE.get(settings), equalTo(attribute)); + assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_NODE_ATTRIBUTE}); + } + + public void testRemoteNodeAttributeDefault() { + assertThat(REMOTE_NODE_ATTRIBUTE.get(Settings.EMPTY), equalTo("")); + } + + public void testEnableRemoteClustersFallback() { + final boolean enable = randomBoolean(); + final Settings settings = Settings.builder().put(SEARCH_ENABLE_REMOTE_CLUSTERS.getKey(), enable).build(); + assertThat(ENABLE_REMOTE_CLUSTERS.get(settings), equalTo(enable)); + assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_ENABLE_REMOTE_CLUSTERS}); + } + + public void testEnableRemoteClustersDefault() { + assertTrue(ENABLE_REMOTE_CLUSTERS.get(Settings.EMPTY)); + } + + public void testSkipUnavailableFallback() { + final String alias = randomAlphaOfLength(8); + final boolean skip = randomBoolean(); + final Settings settings = + Settings.builder().put(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias).getKey(), skip).build(); + assertThat(REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias).get(settings), equalTo(skip)); + assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias)}); + } + + public void testSkipUnavailableDefault() { + final String alias = randomAlphaOfLength(8); + assertFalse(REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias).get(Settings.EMPTY)); + } + + public void testSeedsFallback() { + final String alias = randomAlphaOfLength(8); + final int numberOfSeeds = randomIntBetween(1, 8); + final List seeds = new ArrayList<>(numberOfSeeds); + for (int i = 0; i < numberOfSeeds; i++) { + seeds.add("localhost:" + Integer.toString(9200 + i)); + } + final Settings settings = + Settings.builder() + .put(SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(alias).getKey(), String.join(",", seeds)).build(); + assertThat(REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(alias).get(settings), equalTo(seeds)); + assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(alias)}); + } + + public void testSeedsDefault() { + final String alias = randomAlphaOfLength(8); + assertThat(REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(alias).get(Settings.EMPTY), emptyCollectionOf(String.class)); + } + + public void testProxyFallback() { + final String alias = randomAlphaOfLength(8); + final String proxy = randomAlphaOfLength(8); + final int port = randomIntBetween(9200, 9300); + final String value = proxy + ":" + port; + final Settings settings = + Settings.builder() + .put(SEARCH_REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(alias).getKey(), value).build(); + assertThat(REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(alias).get(settings), equalTo(value)); + assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(alias)}); + } + + public void testProxyDefault() { + final String alias = randomAlphaOfLength(8); + assertThat(REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(alias).get(Settings.EMPTY), equalTo("")); + } + +} \ No newline at end of file diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index a3d2e1bbc57..0bf12ba82c8 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -156,19 +156,26 @@ public class TcpTransportTests extends ESTestCase { TcpTransport.ensureVersionCompatibility(VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT), Version.CURRENT, randomBoolean()); - TcpTransport.ensureVersionCompatibility(Version.fromString("5.0.0"), Version.fromString("6.0.0"), true); + TcpTransport.ensureVersionCompatibility(Version.fromString("6.0.0"), Version.fromString("7.0.0"), true); IllegalStateException ise = expectThrows(IllegalStateException.class, () -> - TcpTransport.ensureVersionCompatibility(Version.fromString("5.0.0"), Version.fromString("6.0.0"), false)); - assertEquals("Received message from unsupported version: [5.0.0] minimal compatible version is: [5.6.0]", ise.getMessage()); + TcpTransport.ensureVersionCompatibility(Version.fromString("6.0.0"), Version.fromString("7.0.0"), false)); + assertEquals("Received message from unsupported version: [6.0.0] minimal compatible version is: [6.5.0]", ise.getMessage()); + // For handshake we are compatible with N-2 + TcpTransport.ensureVersionCompatibility(Version.fromString("5.6.0"), Version.fromString("7.0.0"), true); ise = expectThrows(IllegalStateException.class, () -> - TcpTransport.ensureVersionCompatibility(Version.fromString("2.3.0"), Version.fromString("6.0.0"), true)); - assertEquals("Received handshake message from unsupported version: [2.3.0] minimal compatible version is: [5.6.0]", + TcpTransport.ensureVersionCompatibility(Version.fromString("5.6.0"), Version.fromString("7.0.0"), false)); + assertEquals("Received message from unsupported version: [5.6.0] minimal compatible version is: [6.5.0]", ise.getMessage()); ise = expectThrows(IllegalStateException.class, () -> - TcpTransport.ensureVersionCompatibility(Version.fromString("2.3.0"), Version.fromString("6.0.0"), false)); - assertEquals("Received message from unsupported version: [2.3.0] minimal compatible version is: [5.6.0]", + TcpTransport.ensureVersionCompatibility(Version.fromString("2.3.0"), Version.fromString("7.0.0"), true)); + assertEquals("Received handshake message from unsupported version: [2.3.0] minimal compatible version is: [6.5.0]", + ise.getMessage()); + + ise = expectThrows(IllegalStateException.class, () -> + TcpTransport.ensureVersionCompatibility(Version.fromString("2.3.0"), Version.fromString("7.0.0"), false)); + assertEquals("Received message from unsupported version: [2.3.0] minimal compatible version is: [6.5.0]", ise.getMessage()); } @@ -188,7 +195,7 @@ public class TcpTransportTests extends ESTestCase { } @Override - protected FakeChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException { + protected FakeChannel initiateChannel(DiscoveryNode node, ActionListener connectListener) throws IOException { return new FakeChannel(messageCaptor); } diff --git a/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java b/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java new file mode 100644 index 00000000000..42a61008820 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java @@ -0,0 +1,116 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport; + +import org.apache.logging.log4j.Level; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction; +import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.CompositeBytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.junit.annotations.TestLogging; + +import java.io.IOException; + +import static org.mockito.Mockito.mock; + +@TestLogging(value = "org.elasticsearch.transport.TransportLogger:trace") +public class TransportLoggerTests extends ESTestCase { + + private MockLogAppender appender; + + public void setUp() throws Exception { + super.setUp(); + appender = new MockLogAppender(); + Loggers.addAppender(Loggers.getLogger(TransportLogger.class), appender); + appender.start(); + } + + public void tearDown() throws Exception { + Loggers.removeAppender(Loggers.getLogger(TransportLogger.class), appender); + appender.stop(); + super.tearDown(); + } + + public void testLoggingHandler() throws IOException { + TransportLogger transportLogger = new TransportLogger(Settings.EMPTY); + + final String writePattern = + ".*\\[length: \\d+" + + ", request id: \\d+" + + ", type: request" + + ", version: .*" + + ", action: cluster:monitor/stats]" + + " WRITE: \\d+B"; + final MockLogAppender.LoggingExpectation writeExpectation = + new MockLogAppender.PatternSeenEventExcpectation( + "hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, writePattern); + + final String readPattern = + ".*\\[length: \\d+" + + ", request id: \\d+" + + ", type: request" + + ", version: .*" + + ", action: cluster:monitor/stats]" + + " READ: \\d+B"; + + final MockLogAppender.LoggingExpectation readExpectation = + new MockLogAppender.PatternSeenEventExcpectation( + "cluster monitor request", TransportLogger.class.getCanonicalName(), Level.TRACE, readPattern); + + appender.addExpectation(writeExpectation); + appender.addExpectation(readExpectation); + BytesReference bytesReference = buildRequest(); + transportLogger.logInboundMessage(mock(TcpChannel.class), bytesReference.slice(6, bytesReference.length() - 6)); + transportLogger.logOutboundMessage(mock(TcpChannel.class), bytesReference); + appender.assertAllExpectationsMatched(); + } + + private BytesReference buildRequest() throws IOException { + try (BytesStreamOutput messageOutput = new BytesStreamOutput()) { + messageOutput.setVersion(Version.CURRENT); + try (ThreadContext context = new ThreadContext(Settings.EMPTY)) { + context.writeTo(messageOutput); + } + messageOutput.writeStringArray(new String[0]); + messageOutput.writeString(ClusterStatsAction.NAME); + new ClusterStatsRequest().writeTo(messageOutput); + BytesReference messageBody = messageOutput.bytes(); + final BytesReference header = buildHeader(randomInt(30), messageBody.length()); + return new CompositeBytesReference(header, messageBody); + } + } + + private BytesReference buildHeader(long requestId, int length) throws IOException { + try (BytesStreamOutput headerOutput = new BytesStreamOutput(TcpHeader.HEADER_SIZE)) { + headerOutput.setVersion(Version.CURRENT); + TcpHeader.writeHeader(headerOutput, requestId, TransportStatus.setRequest((byte) 0), Version.CURRENT, length); + final BytesReference bytes = headerOutput.bytes(); + assert bytes.length() == TcpHeader.HEADER_SIZE : "header size mismatch expected: " + TcpHeader.HEADER_SIZE + " but was: " + + bytes.length(); + return bytes; + } + } +} diff --git a/server/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java b/server/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java index caf4f725fa4..588118db4ae 100644 --- a/server/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java +++ b/server/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.VersionType; @@ -785,4 +786,26 @@ public class SimpleVersioningIT extends ESIntegTestCase { .getVersion(), equalTo(-1L)); } + + public void testSpecialVersioning() { + internalCluster().ensureAtLeastNumDataNodes(2); + createIndex("test", Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build()); + IndexResponse doc1 = client().prepareIndex("test", "type", "1").setSource("field", "value1") + .setVersion(0).setVersionType(VersionType.EXTERNAL).execute().actionGet(); + assertThat(doc1.getVersion(), equalTo(0L)); + IndexResponse doc2 = client().prepareIndex("test", "type", "1").setSource("field", "value2") + .setVersion(Versions.MATCH_ANY).setVersionType(VersionType.INTERNAL).execute().actionGet(); + assertThat(doc2.getVersion(), equalTo(1L)); + client().prepareDelete("test", "type", "1").get(); //v2 + IndexResponse doc3 = client().prepareIndex("test", "type", "1").setSource("field", "value3") + .setVersion(Versions.MATCH_DELETED).setVersionType(VersionType.INTERNAL).execute().actionGet(); + assertThat(doc3.getVersion(), equalTo(3L)); + IndexResponse doc4 = client().prepareIndex("test", "type", "1").setSource("field", "value4") + .setVersion(4L).setVersionType(VersionType.EXTERNAL_GTE).execute().actionGet(); + assertThat(doc4.getVersion(), equalTo(4L)); + // Make sure that these versions are replicated correctly + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)).get(); + ensureGreen("test"); + } } diff --git a/server/src/test/resources/org/elasticsearch/search/query/all-query-index.json b/server/src/test/resources/org/elasticsearch/search/query/all-query-index.json index abdc1192822..9ab8995813e 100644 --- a/server/src/test/resources/org/elasticsearch/search/query/all-query-index.json +++ b/server/src/test/resources/org/elasticsearch/search/query/all-query-index.json @@ -46,10 +46,6 @@ "format": "yyyy/MM/dd||epoch_millis" }, "f_bool": {"type": "boolean"}, - "f_bool_alias": { - "type": "alias", - "path": "f_bool" - }, "f_byte": {"type": "byte"}, "f_short": {"type": "short"}, "f_int": {"type": "integer"}, @@ -60,6 +56,10 @@ "f_binary": {"type": "binary"}, "f_suggest": {"type": "completion"}, "f_geop": {"type": "geo_point"}, + "f_geop_alias": { + "type": "alias", + "path": "f_geop" + }, "f_geos": {"type": "geo_shape"} } } diff --git a/settings.gradle b/settings.gradle index bdd866e622b..dedf3520bbb 100644 --- a/settings.gradle +++ b/settings.gradle @@ -78,12 +78,18 @@ addSubProjects('', new File(rootProject.projectDir, 'plugins')) addSubProjects('', new File(rootProject.projectDir, 'qa')) addSubProjects('', new File(rootProject.projectDir, 'x-pack')) -boolean isEclipse = System.getProperty("eclipse.launcher") != null || gradle.startParameter.taskNames.contains('eclipse') || gradle.startParameter.taskNames.contains('cleanEclipse') +List startTasks = gradle.startParameter.taskNames +boolean isEclipse = + System.getProperty("eclipse.launcher") != null || // Detects gradle launched from the Eclipse IDE + System.getProperty("eclipse.application") != null || // Detects gradle launched from the Eclipse compiler server + startTasks.contains("eclipse") || // Detects gradle launched from the command line to do Eclipse stuff + startTasks.contains("cleanEclipse"); if (isEclipse) { // eclipse cannot handle an intermediate dependency between main and test, so we must create separate projects // for server-src and server-tests projects << 'server-tests' projects << 'libs:core-tests' + projects << 'libs:dissect-tests' projects << 'libs:nio-tests' projects << 'libs:x-content-tests' projects << 'libs:secure-sm-tests' @@ -103,6 +109,10 @@ if (isEclipse) { project(":libs:core").buildFileName = 'eclipse-build.gradle' project(":libs:core-tests").projectDir = new File(rootProject.projectDir, 'libs/core/src/test') project(":libs:core-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:dissect").projectDir = new File(rootProject.projectDir, 'libs/dissect/src/main') + project(":libs:dissect").buildFileName = 'eclipse-build.gradle' + project(":libs:dissect-tests").projectDir = new File(rootProject.projectDir, 'libs/dissect/src/test') + project(":libs:dissect-tests").buildFileName = 'eclipse-build.gradle' project(":libs:nio").projectDir = new File(rootProject.projectDir, 'libs/nio/src/main') project(":libs:nio").buildFileName = 'eclipse-build.gradle' project(":libs:nio-tests").projectDir = new File(rootProject.projectDir, 'libs/nio/src/test') diff --git a/test/framework/build.gradle b/test/framework/build.gradle index ab513a1b0bb..8179e3d096a 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -16,9 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks; - dependencies { compile "org.elasticsearch.client:elasticsearch-rest-client:${version}" compile "org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}" @@ -41,9 +38,7 @@ compileTestJava.options.compilerArgs << '-Xlint:-rawtypes' // the main files are actually test files, so use the appropriate forbidden api sigs forbiddenApisMain { - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), - PrecommitTasks.getResource('/forbidden/es-all-signatures.txt'), - PrecommitTasks.getResource('/forbidden/es-test-signatures.txt')] + replaceSignatureFiles 'jdk-signatures', 'es-all-signatures', 'es-test-signatures' } // TODO: should we have licenses for our test deps? diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index 35dac2e99e0..c50e7cf066b 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -177,8 +177,11 @@ public class BootstrapForTesting { private static void addClassCodebase(Map codebases, String name, String classname) { try { Class clazz = BootstrapForTesting.class.getClassLoader().loadClass(classname); - if (codebases.put(name, clazz.getProtectionDomain().getCodeSource().getLocation()) != null) { - throw new IllegalStateException("Already added " + name + " codebase for testing"); + URL location = clazz.getProtectionDomain().getCodeSource().getLocation(); + if (location.toString().endsWith(".jar") == false) { + if (codebases.put(name, location) != null) { + throw new IllegalStateException("Already added " + name + " codebase for testing"); + } } } catch (ClassNotFoundException e) { // no class, fall through to not add. this can happen for any tests that do not include diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java index 2291c3d39e2..ee270ee6e48 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java @@ -39,10 +39,6 @@ public class TestShardRouting { return newShardRouting(new ShardId(index, IndexMetaData.INDEX_UUID_NA_VALUE, shardId), currentNodeId, primary, state); } - public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, boolean primary, RecoverySource recoverySource, ShardRoutingState state) { - return new ShardRouting(shardId, currentNodeId, null, primary, state, recoverySource, buildUnassignedInfo(state), buildAllocationId(state), -1); - } - public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, boolean primary, ShardRoutingState state) { return new ShardRouting(shardId, currentNodeId, null, primary, state, buildRecoveryTarget(primary, state), buildUnassignedInfo(state), buildAllocationId(state), -1); } @@ -88,8 +84,8 @@ public class TestShardRouting { case UNASSIGNED: case INITIALIZING: if (primary) { - return ESTestCase.randomFrom(RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE, - RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE); + return ESTestCase.randomFrom(RecoverySource.EmptyStoreRecoverySource.INSTANCE, + RecoverySource.ExistingStoreRecoverySource.INSTANCE); } else { return RecoverySource.PeerRecoverySource.INSTANCE; } @@ -130,8 +126,8 @@ public class TestShardRouting { } public static RecoverySource randomRecoverySource() { - return ESTestCase.randomFrom(RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE, - RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE, + return ESTestCase.randomFrom(RecoverySource.EmptyStoreRecoverySource.INSTANCE, + RecoverySource.ExistingStoreRecoverySource.INSTANCE, RecoverySource.PeerRecoverySource.INSTANCE, RecoverySource.LocalShardsRecoverySource.INSTANCE, new RecoverySource.SnapshotRecoverySource( diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/NodeNameInLogsIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/logging/NodeNameInLogsIntegTestCase.java new file mode 100644 index 00000000000..5b57c015895 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/NodeNameInLogsIntegTestCase.java @@ -0,0 +1,96 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.BufferedReader; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.regex.Pattern; +import java.util.regex.Matcher; + +import static org.hamcrest.Matchers.containsString; + +/** + * Tests that extend this class verify that the node name appears in the first + * few log lines on startup. Note that this won't pass for clusters that don't + * the node name defined in elasticsearch.yml and start with + * DEBUG or TRACE level logging. Those nodes log a few lines before they + * resolve the node name. + */ +public abstract class NodeNameInLogsIntegTestCase extends ESRestTestCase { + /** + * Number of lines in the log file to check for the node name. We don't + * just check the entire log file because it could be quite long and + * exceptions don't include the node name. + */ + private static final int LINES_TO_CHECK = 10; + + /** + * Open the log file. This is delegated to subclasses because the test + * framework doesn't have permission to read from the log file but + * subclasses can grant themselves that permission. + */ + protected abstract BufferedReader openReader(Path logFile) throws IOException ; + + public void testNodeNameIsOnAllLinesOfLog() throws IOException { + BufferedReader logReader = openReader(getLogFile()); + try { + String line = logReader.readLine(); + assertNotNull("no logs at all?!", line); + Matcher m = Pattern.compile("\\] \\[([^\\]]+)\\] ").matcher(line); + if (false == m.find()) { + fail("Didn't see the node name in [" + line + "]"); + } + String nodeName = m.group(1); + + assertNotEquals("unknown", nodeName); + + int lineNumber = 1; + while (true) { + if (lineNumber < LINES_TO_CHECK) { + break; + } + line = logReader.readLine(); + if (line == null) { + break; // eof + } + lineNumber++; + assertThat(line, containsString("] [" + nodeName + "] ")); + } + } finally { + logReader.close(); + } + } + + @SuppressForbidden(reason = "PathUtils doesn't have permission to read this file") + private Path getLogFile() { + String logFileString = System.getProperty("tests.logfile"); + if (null == logFileString) { + fail("tests.logfile must be set to run this test. It is automatically " + + "set by gradle. If you must set it yourself then it should be the absolute path to the " + + "log file."); + } + return Paths.get(logFileString); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/index/analysis/MyFilterTokenFilterFactory.java b/test/framework/src/main/java/org/elasticsearch/index/analysis/MyFilterTokenFilterFactory.java index 921a09e98e6..157adf9e55c 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/analysis/MyFilterTokenFilterFactory.java +++ b/test/framework/src/main/java/org/elasticsearch/index/analysis/MyFilterTokenFilterFactory.java @@ -20,11 +20,10 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.StopFilter; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.core.StopAnalyzer; +import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; public class MyFilterTokenFilterFactory extends AbstractTokenFilterFactory { @@ -34,6 +33,6 @@ public class MyFilterTokenFilterFactory extends AbstractTokenFilterFactory { @Override public TokenStream create(TokenStream tokenStream) { - return new StopFilter(tokenStream, StopAnalyzer.ENGLISH_STOP_WORDS_SET); + return new StopFilter(tokenStream, EnglishAnalyzer.ENGLISH_STOP_WORDS_SET); } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 2a84a8f4246..283a7b13753 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -19,14 +19,18 @@ package org.elasticsearch.index.engine; +import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.codecs.Codec; +import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.StoredField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LiveIndexWriterConfig; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.Term; @@ -34,37 +38,45 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.Sort; +import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.store.Directory; +import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.MapperTestUtils; +import org.elasticsearch.index.VersionType; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.index.seqno.LocalCheckpointTracker; import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogConfig; @@ -80,17 +92,30 @@ import org.junit.Before; import java.io.IOException; import java.nio.charset.Charset; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiFunction; +import java.util.function.Function; import java.util.function.LongSupplier; import java.util.function.ToLongBiFunction; +import java.util.stream.Collectors; import static java.util.Collections.emptyList; +import static java.util.Collections.shuffle; +import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY; +import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA; import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; public abstract class EngineTestCase extends ESTestCase { @@ -99,6 +124,7 @@ public abstract class EngineTestCase extends ESTestCase { protected static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings("index", Settings.EMPTY); protected ThreadPool threadPool; + protected TranslogHandler translogHandler; protected Store store; protected Store storeReplica; @@ -128,6 +154,20 @@ public abstract class EngineTestCase extends ESTestCase { } } + protected Settings indexSettings() { + // TODO randomize more settings + return Settings.builder() + .put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), "1h") // make sure this doesn't kick in on us + .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codecName) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.getKey(), + between(10, 10 * IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.get(Settings.EMPTY))) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), + randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000)) + .build(); + } + @Override @Before public void setUp() throws Exception { @@ -142,19 +182,14 @@ public abstract class EngineTestCase extends ESTestCase { } else { codecName = "default"; } - defaultSettings = IndexSettingsModule.newIndexSettings("test", Settings.builder() - .put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), "1h") // make sure this doesn't kick in on us - .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codecName) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.getKey(), - between(10, 10 * IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.get(Settings.EMPTY))) - .build()); // TODO randomize more settings + defaultSettings = IndexSettingsModule.newIndexSettings("test", indexSettings()); threadPool = new TestThreadPool(getClass().getName()); store = createStore(); storeReplica = createStore(); Lucene.cleanLuceneIndex(store.directory()); Lucene.cleanLuceneIndex(storeReplica.directory()); primaryTranslogDir = createTempDir("translog-primary"); + translogHandler = createTranslogHandler(defaultSettings); engine = createEngine(store, primaryTranslogDir); LiveIndexWriterConfig currentIndexWriterConfig = engine.getCurrentIndexWriterConfig(); @@ -179,8 +214,8 @@ public abstract class EngineTestCase extends ESTestCase { config.getWarmer(), config.getStore(), config.getMergePolicy(), config.getAnalyzer(), config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter(), - config.getExternalRefreshListener(), Collections.emptyList(), config.getIndexSort(), config.getTranslogRecoveryRunner(), - config.getCircuitBreakerService(), globalCheckpointSupplier, config.getPrimaryTermSupplier()); + config.getExternalRefreshListener(), Collections.emptyList(), config.getIndexSort(), + config.getCircuitBreakerService(), globalCheckpointSupplier, config.getPrimaryTermSupplier(), tombstoneDocSupplier()); } public EngineConfig copy(EngineConfig config, Analyzer analyzer) { @@ -188,8 +223,19 @@ public abstract class EngineTestCase extends ESTestCase { config.getWarmer(), config.getStore(), config.getMergePolicy(), analyzer, config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter(), - config.getExternalRefreshListener(), Collections.emptyList(), config.getIndexSort(), config.getTranslogRecoveryRunner(), - config.getCircuitBreakerService(), config.getGlobalCheckpointSupplier(), config.getPrimaryTermSupplier()); + config.getExternalRefreshListener(), Collections.emptyList(), config.getIndexSort(), + config.getCircuitBreakerService(), config.getGlobalCheckpointSupplier(), config.getPrimaryTermSupplier(), + config.getTombstoneDocSupplier()); + } + + public EngineConfig copy(EngineConfig config, MergePolicy mergePolicy) { + return new EngineConfig(config.getShardId(), config.getAllocationId(), config.getThreadPool(), config.getIndexSettings(), + config.getWarmer(), config.getStore(), mergePolicy, config.getAnalyzer(), config.getSimilarity(), + new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), + config.getTranslogConfig(), config.getFlushMergesAfter(), + config.getExternalRefreshListener(), Collections.emptyList(), config.getIndexSort(), + config.getCircuitBreakerService(), config.getGlobalCheckpointSupplier(), config.getPrimaryTermSupplier(), + config.getTombstoneDocSupplier()); } @Override @@ -198,9 +244,11 @@ public abstract class EngineTestCase extends ESTestCase { super.tearDown(); if (engine != null && engine.isClosed.get() == false) { engine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); + assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, createMapperService("test")); } if (replicaEngine != null && replicaEngine.isClosed.get() == false) { replicaEngine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); + assertConsistentHistoryBetweenTranslogAndLuceneIndex(replicaEngine, createMapperService("test")); } IOUtils.close( replicaEngine, storeReplica, @@ -228,8 +276,18 @@ public abstract class EngineTestCase extends ESTestCase { return testParsedDocument(id, routing, testDocumentWithTextField(), new BytesArray("{ \"value\" : \"test\" }"), null); } + public static ParsedDocument createParsedDoc(String id, String routing, boolean recoverySource) { + return testParsedDocument(id, routing, testDocumentWithTextField(), new BytesArray("{ \"value\" : \"test\" }"), null, + recoverySource); + } + protected static ParsedDocument testParsedDocument( String id, String routing, ParseContext.Document document, BytesReference source, Mapping mappingUpdate) { + return testParsedDocument(id, routing, document, source, mappingUpdate, false); + } + protected static ParsedDocument testParsedDocument( + String id, String routing, ParseContext.Document document, BytesReference source, Mapping mappingUpdate, + boolean recoverySource) { Field uidField = new Field("_id", Uid.encodeId(id), IdFieldMapper.Defaults.FIELD_TYPE); Field versionField = new NumericDocValuesField("_version", 0); SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); @@ -239,11 +297,57 @@ public abstract class EngineTestCase extends ESTestCase { document.add(seqID.seqNoDocValue); document.add(seqID.primaryTerm); BytesRef ref = source.toBytesRef(); - document.add(new StoredField(SourceFieldMapper.NAME, ref.bytes, ref.offset, ref.length)); + if (recoverySource) { + document.add(new StoredField(SourceFieldMapper.RECOVERY_SOURCE_NAME, ref.bytes, ref.offset, ref.length)); + document.add(new NumericDocValuesField(SourceFieldMapper.RECOVERY_SOURCE_NAME, 1)); + } else { + document.add(new StoredField(SourceFieldMapper.NAME, ref.bytes, ref.offset, ref.length)); + } return new ParsedDocument(versionField, seqID, id, "test", routing, Arrays.asList(document), source, XContentType.JSON, mappingUpdate); } + /** + * Creates a tombstone document that only includes uid, seq#, term and version fields. + */ + public static EngineConfig.TombstoneDocSupplier tombstoneDocSupplier(){ + return new EngineConfig.TombstoneDocSupplier() { + @Override + public ParsedDocument newDeleteTombstoneDoc(String type, String id) { + final ParseContext.Document doc = new ParseContext.Document(); + Field uidField = new Field(IdFieldMapper.NAME, Uid.encodeId(id), IdFieldMapper.Defaults.FIELD_TYPE); + doc.add(uidField); + Field versionField = new NumericDocValuesField(VersionFieldMapper.NAME, 0); + doc.add(versionField); + SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); + doc.add(seqID.seqNo); + doc.add(seqID.seqNoDocValue); + doc.add(seqID.primaryTerm); + seqID.tombstoneField.setLongValue(1); + doc.add(seqID.tombstoneField); + return new ParsedDocument(versionField, seqID, id, type, null, + Collections.singletonList(doc), new BytesArray("{}"), XContentType.JSON, null); + } + + @Override + public ParsedDocument newNoopTombstoneDoc(String reason) { + final ParseContext.Document doc = new ParseContext.Document(); + SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); + doc.add(seqID.seqNo); + doc.add(seqID.seqNoDocValue); + doc.add(seqID.primaryTerm); + seqID.tombstoneField.setLongValue(1); + doc.add(seqID.tombstoneField); + Field versionField = new NumericDocValuesField(VersionFieldMapper.NAME, 0); + doc.add(versionField); + BytesRef byteRef = new BytesRef(reason); + doc.add(new StoredField(SourceFieldMapper.NAME, byteRef.bytes, byteRef.offset, byteRef.length)); + return new ParsedDocument(versionField, seqID, null, null, null, + Collections.singletonList(doc), null, XContentType.JSON, null); + } + }; + } + protected Store createStore() throws IOException { return createStore(newDirectory()); } @@ -253,13 +357,7 @@ public abstract class EngineTestCase extends ESTestCase { } protected Store createStore(final IndexSettings indexSettings, final Directory directory) throws IOException { - final DirectoryService directoryService = new DirectoryService(shardId, indexSettings) { - @Override - public Directory newDirectory() throws IOException { - return directory; - } - }; - return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId)); + return new Store(shardId, indexSettings, directory, new DummyShardLock(shardId)); } protected Translog createTranslog(LongSupplier primaryTermSupplier) throws IOException { @@ -274,6 +372,10 @@ public abstract class EngineTestCase extends ESTestCase { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTermSupplier); } + protected TranslogHandler createTranslogHandler(IndexSettings indexSettings) { + return new TranslogHandler(xContentRegistry(), indexSettings); + } + protected InternalEngine createEngine(Store store, Path translogPath) throws IOException { return createEngine(defaultSettings, store, translogPath, newMergePolicy(), null); } @@ -375,7 +477,7 @@ public abstract class EngineTestCase extends ESTestCase { } InternalEngine internalEngine = createInternalEngine(indexWriterFactory, localCheckpointTrackerSupplier, seqNoForOperation, config); - internalEngine.recoverFromTranslog(); + internalEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); return internalEngine; } @@ -450,18 +552,16 @@ public abstract class EngineTestCase extends ESTestCase { // we don't need to notify anybody in this test } }; - final TranslogHandler handler = new TranslogHandler(xContentRegistry(), IndexSettingsModule.newIndexSettings(shardId.getIndexName(), - indexSettings.getSettings())); final List refreshListenerList = refreshListener == null ? emptyList() : Collections.singletonList(refreshListener); EngineConfig config = new EngineConfig(shardId, allocationId.getId(), threadPool, indexSettings, null, store, mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), listener, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, - TimeValue.timeValueMinutes(5), refreshListenerList, Collections.emptyList(), indexSort, handler, + TimeValue.timeValueMinutes(5), refreshListenerList, Collections.emptyList(), indexSort, new NoneCircuitBreakerService(), globalCheckpointSupplier == null ? new ReplicationTracker(shardId, allocationId.getId(), indexSettings, SequenceNumbers.NO_OPS_PERFORMED, update -> {}) : - globalCheckpointSupplier, primaryTerm::get); + globalCheckpointSupplier, primaryTerm::get, tombstoneDocSupplier()); return config; } @@ -474,7 +574,7 @@ public abstract class EngineTestCase extends ESTestCase { return new BytesArray(string.getBytes(Charset.defaultCharset())); } - protected Term newUid(String id) { + protected static Term newUid(String id) { return new Term("_id", Uid.encodeId(id)); } @@ -499,6 +599,279 @@ public abstract class EngineTestCase extends ESTestCase { protected Engine.Delete replicaDeleteForDoc(String id, long version, long seqNo, long startTime) { return new Engine.Delete("test", id, newUid(id), seqNo, 1, version, null, Engine.Operation.Origin.REPLICA, startTime); } + protected static void assertVisibleCount(InternalEngine engine, int numDocs) throws IOException { + assertVisibleCount(engine, numDocs, true); + } + + protected static void assertVisibleCount(InternalEngine engine, int numDocs, boolean refresh) throws IOException { + if (refresh) { + engine.refresh("test"); + } + try (Engine.Searcher searcher = engine.acquireSearcher("test")) { + final TotalHitCountCollector collector = new TotalHitCountCollector(); + searcher.searcher().search(new MatchAllDocsQuery(), collector); + assertThat(collector.getTotalHits(), equalTo(numDocs)); + } + } + + public static List generateSingleDocHistory(boolean forReplica, VersionType versionType, + long primaryTerm, int minOpCount, int maxOpCount, String docId) { + final int numOfOps = randomIntBetween(minOpCount, maxOpCount); + final List ops = new ArrayList<>(); + final Term id = newUid(docId); + final int startWithSeqNo = 0; + final String valuePrefix = (forReplica ? "r_" : "p_" ) + docId + "_"; + final boolean incrementTermWhenIntroducingSeqNo = randomBoolean(); + for (int i = 0; i < numOfOps; i++) { + final Engine.Operation op; + final long version; + switch (versionType) { + case INTERNAL: + version = forReplica ? i : Versions.MATCH_ANY; + break; + case EXTERNAL: + version = i; + break; + case EXTERNAL_GTE: + version = randomBoolean() ? Math.max(i - 1, 0) : i; + break; + case FORCE: + version = randomNonNegativeLong(); + break; + default: + throw new UnsupportedOperationException("unknown version type: " + versionType); + } + if (randomBoolean()) { + op = new Engine.Index(id, testParsedDocument(docId, null, testDocumentWithTextField(valuePrefix + i), B_1, null), + forReplica && i >= startWithSeqNo ? i * 2 : SequenceNumbers.UNASSIGNED_SEQ_NO, + forReplica && i >= startWithSeqNo && incrementTermWhenIntroducingSeqNo ? primaryTerm + 1 : primaryTerm, + version, + forReplica ? null : versionType, + forReplica ? REPLICA : PRIMARY, + System.currentTimeMillis(), -1, false + ); + } else { + op = new Engine.Delete("test", docId, id, + forReplica && i >= startWithSeqNo ? i * 2 : SequenceNumbers.UNASSIGNED_SEQ_NO, + forReplica && i >= startWithSeqNo && incrementTermWhenIntroducingSeqNo ? primaryTerm + 1 : primaryTerm, + version, + forReplica ? null : versionType, + forReplica ? REPLICA : PRIMARY, + System.currentTimeMillis()); + } + ops.add(op); + } + return ops; + } + + public static void assertOpsOnReplica( + final List ops, + final InternalEngine replicaEngine, + boolean shuffleOps, + final Logger logger) throws IOException { + final Engine.Operation lastOp = ops.get(ops.size() - 1); + final String lastFieldValue; + if (lastOp instanceof Engine.Index) { + Engine.Index index = (Engine.Index) lastOp; + lastFieldValue = index.docs().get(0).get("value"); + } else { + // delete + lastFieldValue = null; + } + if (shuffleOps) { + int firstOpWithSeqNo = 0; + while (firstOpWithSeqNo < ops.size() && ops.get(firstOpWithSeqNo).seqNo() < 0) { + firstOpWithSeqNo++; + } + // shuffle ops but make sure legacy ops are first + shuffle(ops.subList(0, firstOpWithSeqNo), random()); + shuffle(ops.subList(firstOpWithSeqNo, ops.size()), random()); + } + boolean firstOp = true; + for (Engine.Operation op : ops) { + logger.info("performing [{}], v [{}], seq# [{}], term [{}]", + op.operationType().name().charAt(0), op.version(), op.seqNo(), op.primaryTerm()); + if (op instanceof Engine.Index) { + Engine.IndexResult result = replicaEngine.index((Engine.Index) op); + // replicas don't really care to about creation status of documents + // this allows to ignore the case where a document was found in the live version maps in + // a delete state and return false for the created flag in favor of code simplicity + // as deleted or not. This check is just signal regression so a decision can be made if it's + // intentional + assertThat(result.isCreated(), equalTo(firstOp)); + assertThat(result.getVersion(), equalTo(op.version())); + assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS)); + + } else { + Engine.DeleteResult result = replicaEngine.delete((Engine.Delete) op); + // Replicas don't really care to about found status of documents + // this allows to ignore the case where a document was found in the live version maps in + // a delete state and return true for the found flag in favor of code simplicity + // his check is just signal regression so a decision can be made if it's + // intentional + assertThat(result.isFound(), equalTo(firstOp == false)); + assertThat(result.getVersion(), equalTo(op.version())); + assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS)); + } + if (randomBoolean()) { + replicaEngine.refresh("test"); + } + if (randomBoolean()) { + replicaEngine.flush(); + replicaEngine.refresh("test"); + } + firstOp = false; + } + + assertVisibleCount(replicaEngine, lastFieldValue == null ? 0 : 1); + if (lastFieldValue != null) { + try (Engine.Searcher searcher = replicaEngine.acquireSearcher("test")) { + final TotalHitCountCollector collector = new TotalHitCountCollector(); + searcher.searcher().search(new TermQuery(new Term("value", lastFieldValue)), collector); + assertThat(collector.getTotalHits(), equalTo(1)); + } + } + } + + protected void concurrentlyApplyOps(List ops, InternalEngine engine) throws InterruptedException { + Thread[] thread = new Thread[randomIntBetween(3, 5)]; + CountDownLatch startGun = new CountDownLatch(thread.length); + AtomicInteger offset = new AtomicInteger(-1); + for (int i = 0; i < thread.length; i++) { + thread[i] = new Thread(() -> { + startGun.countDown(); + try { + startGun.await(); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + int docOffset; + while ((docOffset = offset.incrementAndGet()) < ops.size()) { + try { + final Engine.Operation op = ops.get(docOffset); + if (op instanceof Engine.Index) { + engine.index((Engine.Index) op); + } else if (op instanceof Engine.Delete){ + engine.delete((Engine.Delete) op); + } else { + engine.noOp((Engine.NoOp) op); + } + if ((docOffset + 1) % 4 == 0) { + engine.refresh("test"); + } + if (rarely()) { + engine.flush(); + } + } catch (IOException e) { + throw new AssertionError(e); + } + } + }); + thread[i].start(); + } + for (int i = 0; i < thread.length; i++) { + thread[i].join(); + } + } + + /** + * Gets all docId from the given engine. + */ + public static Set getDocIds(Engine engine, boolean refresh) throws IOException { + if (refresh) { + engine.refresh("test_get_doc_ids"); + } + try (Engine.Searcher searcher = engine.acquireSearcher("test_get_doc_ids")) { + Set ids = new HashSet<>(); + for (LeafReaderContext leafContext : searcher.reader().leaves()) { + LeafReader reader = leafContext.reader(); + Bits liveDocs = reader.getLiveDocs(); + for (int i = 0; i < reader.maxDoc(); i++) { + if (liveDocs == null || liveDocs.get(i)) { + Document uuid = reader.document(i, Collections.singleton(IdFieldMapper.NAME)); + BytesRef binaryID = uuid.getBinaryValue(IdFieldMapper.NAME); + ids.add(Uid.decodeId(Arrays.copyOfRange(binaryID.bytes, binaryID.offset, binaryID.offset + binaryID.length))); + } + } + } + return ids; + } + } + + /** + * Reads all engine operations that have been processed by the engine from Lucene index. + * The returned operations are sorted and de-duplicated, thus each sequence number will be have at most one operation. + */ + public static List readAllOperationsInLucene(Engine engine, MapperService mapper) throws IOException { + final List operations = new ArrayList<>(); + long maxSeqNo = Math.max(0, ((InternalEngine)engine).getLocalCheckpointTracker().getMaxSeqNo()); + try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", mapper, 0, maxSeqNo, false)) { + Translog.Operation op; + while ((op = snapshot.next()) != null){ + operations.add(op); + } + } + return operations; + } + + /** + * Asserts the provided engine has a consistent document history between translog and Lucene index. + */ + public static void assertConsistentHistoryBetweenTranslogAndLuceneIndex(Engine engine, MapperService mapper) throws IOException { + if (mapper.documentMapper() == null || engine.config().getIndexSettings().isSoftDeleteEnabled() == false) { + return; + } + final long maxSeqNo = ((InternalEngine) engine).getLocalCheckpointTracker().getMaxSeqNo(); + if (maxSeqNo < 0) { + return; // nothing to check + } + final Map translogOps = new HashMap<>(); + try (Translog.Snapshot snapshot = EngineTestCase.getTranslog(engine).newSnapshot()) { + Translog.Operation op; + while ((op = snapshot.next()) != null) { + translogOps.put(op.seqNo(), op); + } + } + final Map luceneOps = readAllOperationsInLucene(engine, mapper).stream() + .collect(Collectors.toMap(Translog.Operation::seqNo, Function.identity())); + final long globalCheckpoint = EngineTestCase.getTranslog(engine).getLastSyncedGlobalCheckpoint(); + final long retainedOps = engine.config().getIndexSettings().getSoftDeleteRetentionOperations(); + final long seqNoForRecovery; + try (Engine.IndexCommitRef safeCommit = engine.acquireSafeIndexCommit()) { + seqNoForRecovery = Long.parseLong(safeCommit.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) + 1; + } + final long minSeqNoToRetain = Math.min(seqNoForRecovery, globalCheckpoint + 1 - retainedOps); + for (Translog.Operation translogOp : translogOps.values()) { + final Translog.Operation luceneOp = luceneOps.get(translogOp.seqNo()); + if (luceneOp == null) { + if (minSeqNoToRetain <= translogOp.seqNo() && translogOp.seqNo() <= maxSeqNo) { + fail("Operation not found seq# [" + translogOp.seqNo() + "], global checkpoint [" + globalCheckpoint + "], " + + "retention policy [" + retainedOps + "], maxSeqNo [" + maxSeqNo + "], translog op [" + translogOp + "]"); + } else { + continue; + } + } + assertThat(luceneOp, notNullValue()); + assertThat(luceneOp.toString(), luceneOp.primaryTerm(), equalTo(translogOp.primaryTerm())); + assertThat(luceneOp.opType(), equalTo(translogOp.opType())); + if (luceneOp.opType() == Translog.Operation.Type.INDEX) { + assertThat(luceneOp.getSource().source, equalTo(translogOp.getSource().source)); + } + } + } + + protected MapperService createMapperService(String type) throws IOException { + IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)) + .putMapping(type, "{\"properties\": {}}") + .build(); + MapperService mapperService = MapperTestUtils.newMapperService(new NamedXContentRegistry(ClusterModule.getNamedXWriteables()), + createTempDir(), Settings.EMPTY, "test"); + mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_UPDATE); + return mapperService; + } /** * Exposes a translog associated with the given engine for testing purpose. diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java index 9999a3b3748..12785841ef2 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java @@ -46,7 +46,7 @@ import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static org.elasticsearch.index.mapper.SourceToParse.source; -public class TranslogHandler implements EngineConfig.TranslogRecoveryRunner { +public class TranslogHandler implements Engine.TranslogRecoveryRunner { private final MapperService mapperService; public Mapping mappingUpdate = null; diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 77bc644909a..8717d7ba146 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -37,6 +37,7 @@ import org.elasticsearch.action.resync.ResyncReplicationRequest; import org.elasticsearch.action.resync.ResyncReplicationResponse; import org.elasticsearch.action.resync.TransportResyncReplicationAction; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.action.support.replication.ReplicationRequest; @@ -59,6 +60,7 @@ import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; @@ -98,10 +100,14 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase protected final Index index = new Index("test", "uuid"); private final ShardId shardId = new ShardId(index, 0); - private final Map indexMapping = Collections.singletonMap("type", "{ \"type\": {} }"); + protected final Map indexMapping = Collections.singletonMap("type", "{ \"type\": {} }"); protected ReplicationGroup createGroup(int replicas) throws IOException { - IndexMetaData metaData = buildIndexMetaData(replicas); + return createGroup(replicas, Settings.EMPTY); + } + + protected ReplicationGroup createGroup(int replicas, Settings settings) throws IOException { + IndexMetaData metaData = buildIndexMetaData(replicas, settings, indexMapping); return new ReplicationGroup(metaData); } @@ -110,9 +116,17 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } protected IndexMetaData buildIndexMetaData(int replicas, Map mappings) throws IOException { + return buildIndexMetaData(replicas, Settings.EMPTY, mappings); + } + + protected IndexMetaData buildIndexMetaData(int replicas, Settings indexSettings, Map mappings) throws IOException { Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, replicas) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), + randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000)) + .put(indexSettings) .build(); IndexMetaData.Builder metaData = IndexMetaData.builder(index.getName()) .settings(settings) @@ -145,7 +159,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } }); - ReplicationGroup(final IndexMetaData indexMetaData) throws IOException { + protected ReplicationGroup(final IndexMetaData indexMetaData) throws IOException { final ShardRouting primaryRouting = this.createShardRouting("s0", true); primary = newShard(primaryRouting, indexMetaData, null, getEngineFactory(primaryRouting), () -> {}); replicas = new CopyOnWriteArrayList<>(); @@ -158,7 +172,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase private ShardRouting createShardRouting(String nodeId, boolean primary) { return TestShardRouting.newShardRouting(shardId, nodeId, primary, ShardRoutingState.INITIALIZING, - primary ? RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE); + primary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE); } protected EngineFactory getEngineFactory(ShardRouting routing) { @@ -193,14 +207,23 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } public BulkItemResponse index(IndexRequest indexRequest) throws Exception { + return executeWriteRequest(indexRequest, indexRequest.getRefreshPolicy()); + } + + public BulkItemResponse delete(DeleteRequest deleteRequest) throws Exception { + return executeWriteRequest(deleteRequest, deleteRequest.getRefreshPolicy()); + } + + private BulkItemResponse executeWriteRequest( + DocWriteRequest writeRequest, WriteRequest.RefreshPolicy refreshPolicy) throws Exception { PlainActionFuture listener = new PlainActionFuture<>(); final ActionListener wrapBulkListener = ActionListener.wrap( - bulkShardResponse -> listener.onResponse(bulkShardResponse.getResponses()[0]), - listener::onFailure); + bulkShardResponse -> listener.onResponse(bulkShardResponse.getResponses()[0]), + listener::onFailure); BulkItemRequest[] items = new BulkItemRequest[1]; - items[0] = new BulkItemRequest(0, indexRequest); - BulkShardRequest request = new BulkShardRequest(shardId, indexRequest.getRefreshPolicy(), items); - new IndexingAction(request, wrapBulkListener, this).execute(); + items[0] = new BulkItemRequest(0, writeRequest); + BulkShardRequest request = new BulkShardRequest(shardId, refreshPolicy, items); + new WriteReplicationAction(request, wrapBulkListener, this).execute(); return listener.get(); } @@ -438,7 +461,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } } - abstract class ReplicationAction, + protected abstract class ReplicationAction, ReplicaRequest extends ReplicationRequest, Response extends ReplicationResponse> { private final Request request; @@ -446,7 +469,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase private final ReplicationGroup replicationGroup; private final String opType; - ReplicationAction(Request request, ActionListener listener, ReplicationGroup group, String opType) { + protected ReplicationAction(Request request, ActionListener listener, ReplicationGroup group, String opType) { this.request = request; this.listener = listener; this.replicationGroup = group; @@ -572,11 +595,11 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } } - class PrimaryResult implements ReplicationOperation.PrimaryResult { + protected class PrimaryResult implements ReplicationOperation.PrimaryResult { final ReplicaRequest replicaRequest; final Response finalResponse; - PrimaryResult(ReplicaRequest replicaRequest, Response finalResponse) { + public PrimaryResult(ReplicaRequest replicaRequest, Response finalResponse) { this.replicaRequest = replicaRequest; this.finalResponse = finalResponse; } @@ -598,9 +621,9 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } - class IndexingAction extends ReplicationAction { + class WriteReplicationAction extends ReplicationAction { - IndexingAction(BulkShardRequest request, ActionListener listener, ReplicationGroup replicationGroup) { + WriteReplicationAction(BulkShardRequest request, ActionListener listener, ReplicationGroup replicationGroup) { super(request, listener, replicationGroup, "indexing"); } @@ -728,7 +751,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase @Override protected void performOnReplica(ResyncReplicationRequest request, IndexShard replica) throws Exception { - executeResyncOnReplica(replica, request); + executeResyncOnReplica(replica, request, getPrimaryShard().getPendingPrimaryTerm(), getPrimaryShard().getGlobalCheckpoint()); } } @@ -741,8 +764,15 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase return result; } - private void executeResyncOnReplica(IndexShard replica, ResyncReplicationRequest request) throws Exception { - final Translog.Location location = TransportResyncReplicationAction.performOnReplica(request, replica); + private void executeResyncOnReplica(IndexShard replica, ResyncReplicationRequest request, + long operationPrimaryTerm, long globalCheckpointOnPrimary) throws Exception { + final Translog.Location location; + final PlainActionFuture acquirePermitFuture = new PlainActionFuture<>(); + replica.acquireReplicaOperationPermit( + operationPrimaryTerm, globalCheckpointOnPrimary, acquirePermitFuture, ThreadPool.Names.SAME, request); + try (Releasable ignored = acquirePermitFuture.actionGet()) { + location = TransportResyncReplicationAction.performOnReplica(request, replica); + } TransportWriteActionTestHelper.performPostWriteActions(replica, request, location, logger); } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index d2a84589669..ca2156144b3 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -18,13 +18,8 @@ */ package org.elasticsearch.index.shard; -import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexNotFoundException; -import org.apache.lucene.index.LeafReader; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.store.Directory; -import org.apache.lucene.util.Bits; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.index.IndexRequest; @@ -37,6 +32,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingHelper; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.lucene.uid.Versions; @@ -57,15 +53,12 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.InternalEngineFactory; -import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.SourceToParse; -import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; -import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.breaker.CircuitBreakerService; @@ -76,7 +69,6 @@ import org.elasticsearch.indices.recovery.RecoverySourceHandler; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.recovery.RecoveryTarget; import org.elasticsearch.indices.recovery.StartRecoveryRequest; -import org.elasticsearch.node.Node; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.Repository; import org.elasticsearch.snapshots.Snapshot; @@ -163,54 +155,82 @@ public abstract class IndexShardTestCase extends ESTestCase { return Settings.EMPTY; } - protected Store createStore(IndexSettings indexSettings, ShardPath shardPath) throws IOException { return createStore(shardPath.getShardId(), indexSettings, newFSDirectory(shardPath.resolveIndex())); } protected Store createStore(ShardId shardId, IndexSettings indexSettings, Directory directory) throws IOException { - final DirectoryService directoryService = new DirectoryService(shardId, indexSettings) { - @Override - public Directory newDirectory() throws IOException { - return directory; - } - }; - return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId)); - + return new Store(shardId, indexSettings, directory, new DummyShardLock(shardId)); } /** - * creates a new initializing shard. The shard will have its own unique data path. + * Creates a new initializing shard. The shard will have its own unique data path. * - * @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica - * (ready to recover from another shard) + * @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica (ready to recover from + * another shard) */ protected IndexShard newShard(boolean primary) throws IOException { - ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId("index", "_na_", 0), randomAlphaOfLength(10), primary, - ShardRoutingState.INITIALIZING, - primary ? RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE); - return newShard(shardRouting); + return newShard(primary, Settings.EMPTY); } /** - * creates a new initializing shard. The shard will have its own unique data path. + * Creates a new initializing shard. The shard will have its own unique data path. * - * @param shardRouting the {@link ShardRouting} to use for this shard - * @param listeners an optional set of listeners to add to the shard + * @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica (ready to recover from + * another shard) + */ + protected IndexShard newShard(final boolean primary, final Settings settings) throws IOException { + return newShard(primary, settings, new InternalEngineFactory()); + } + + /** + * Creates a new initializing shard. The shard will have its own unique data path. + * + * @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica (ready to recover from + * another shard) + * @param settings the settings to use for this shard + * @param engineFactory the engine factory to use for this shard + */ + protected IndexShard newShard(boolean primary, Settings settings, EngineFactory engineFactory) throws IOException { + final RecoverySource recoverySource = + primary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE; + final ShardRouting shardRouting = + TestShardRouting.newShardRouting( + new ShardId("index", "_na_", 0), randomAlphaOfLength(10), primary, ShardRoutingState.INITIALIZING, recoverySource); + return newShard(shardRouting, settings, engineFactory); + } + + protected IndexShard newShard(ShardRouting shardRouting, final IndexingOperationListener... listeners) throws IOException { + return newShard(shardRouting, Settings.EMPTY, new InternalEngineFactory(), listeners); + } + + /** + * Creates a new initializing shard. The shard will have its own unique data path. + * + * @param shardRouting the {@link ShardRouting} to use for this shard + * @param settings the settings to use for this shard + * @param engineFactory the engine factory to use for this shard + * @param listeners an optional set of listeners to add to the shard */ protected IndexShard newShard( final ShardRouting shardRouting, + final Settings settings, + final EngineFactory engineFactory, final IndexingOperationListener... listeners) throws IOException { assert shardRouting.initializing() : shardRouting; - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .build(); + Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), + randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000)) + .put(settings) + .build(); IndexMetaData.Builder metaData = IndexMetaData.builder(shardRouting.getIndexName()) - .settings(settings) + .settings(indexSettings) .primaryTerm(0, primaryTerm) .putMapping("_doc", "{ \"properties\": {} }"); - return newShard(shardRouting, metaData.build(), listeners); + return newShard(shardRouting, metaData.build(), engineFactory, listeners); } /** @@ -224,8 +244,8 @@ public abstract class IndexShardTestCase extends ESTestCase { protected IndexShard newShard(ShardId shardId, boolean primary, IndexingOperationListener... listeners) throws IOException { ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, randomAlphaOfLength(5), primary, ShardRoutingState.INITIALIZING, - primary ? RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE); - return newShard(shardRouting, listeners); + primary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE); + return newShard(shardRouting, Settings.EMPTY, new InternalEngineFactory(), listeners); } /** @@ -252,7 +272,7 @@ public abstract class IndexShardTestCase extends ESTestCase { protected IndexShard newShard(ShardId shardId, boolean primary, String nodeId, IndexMetaData indexMetaData, @Nullable IndexSearcherWrapper searcherWrapper, Runnable globalCheckpointSyncer) throws IOException { ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, nodeId, primary, ShardRoutingState.INITIALIZING, - primary ? RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE); + primary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE); return newShard(shardRouting, indexMetaData, searcherWrapper, new InternalEngineFactory(), globalCheckpointSyncer); } @@ -265,9 +285,10 @@ public abstract class IndexShardTestCase extends ESTestCase { * @param indexMetaData indexMetaData for the shard, including any mapping * @param listeners an optional set of listeners to add to the shard */ - protected IndexShard newShard(ShardRouting routing, IndexMetaData indexMetaData, IndexingOperationListener... listeners) + protected IndexShard newShard( + ShardRouting routing, IndexMetaData indexMetaData, EngineFactory engineFactory, IndexingOperationListener... listeners) throws IOException { - return newShard(routing, indexMetaData, null, new InternalEngineFactory(), () -> {}, listeners); + return newShard(routing, indexMetaData, null, engineFactory, () -> {}, listeners); } /** @@ -298,23 +319,25 @@ public abstract class IndexShardTestCase extends ESTestCase { * @param routing shard routing to use * @param shardPath path to use for shard data * @param indexMetaData indexMetaData for the shard, including any mapping - * @param store an optional custom store to use. If null a default file based store will be created + * @param storeProvider an optional custom store provider to use. If null a default file based store will be created * @param indexSearcherWrapper an optional wrapper to be used during searchers * @param globalCheckpointSyncer callback for syncing global checkpoints * @param indexEventListener index event listener * @param listeners an optional set of listeners to add to the shard */ protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMetaData indexMetaData, - @Nullable Store store, @Nullable IndexSearcherWrapper indexSearcherWrapper, + @Nullable CheckedFunction storeProvider, + @Nullable IndexSearcherWrapper indexSearcherWrapper, @Nullable EngineFactory engineFactory, Runnable globalCheckpointSyncer, IndexEventListener indexEventListener, IndexingOperationListener... listeners) throws IOException { final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build(); final IndexSettings indexSettings = new IndexSettings(indexMetaData, nodeSettings); final IndexShard indexShard; - if (store == null) { - store = createStore(indexSettings, shardPath); + if (storeProvider == null) { + storeProvider = is -> createStore(is, shardPath); } + final Store store = storeProvider.apply(indexSettings); boolean success = false; try { IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings), null); @@ -348,7 +371,7 @@ public abstract class IndexShardTestCase extends ESTestCase { protected IndexShard reinitShard(IndexShard current, IndexingOperationListener... listeners) throws IOException { final ShardRouting shardRouting = current.routingEntry(); return reinitShard(current, ShardRoutingHelper.initWithSameId(shardRouting, - shardRouting.primary() ? RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE + shardRouting.primary() ? RecoverySource.ExistingStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE ), listeners); } @@ -372,19 +395,50 @@ public abstract class IndexShardTestCase extends ESTestCase { } /** - * creates a new empyu shard and starts it. The shard will be either a replica or a primary. + * Creates a new empty shard and starts it. The shard will randomly be a replica or a primary. */ protected IndexShard newStartedShard() throws IOException { return newStartedShard(randomBoolean()); } /** - * creates a new empty shard and starts it. + * Creates a new empty shard and starts it + * @param settings the settings to use for this shard + */ + protected IndexShard newStartedShard(Settings settings) throws IOException { + return newStartedShard(randomBoolean(), settings, new InternalEngineFactory()); + } + + /** + * Creates a new empty shard and starts it. * * @param primary controls whether the shard will be a primary or a replica. */ - protected IndexShard newStartedShard(boolean primary) throws IOException { - IndexShard shard = newShard(primary); + protected IndexShard newStartedShard(final boolean primary) throws IOException { + return newStartedShard(primary, Settings.EMPTY, new InternalEngineFactory()); + } + + /** + * Creates a new empty shard with the specified settings and engine factory and starts it. + * + * @param primary controls whether the shard will be a primary or a replica. + * @param settings the settings to use for this shard + * @param engineFactory the engine factory to use for this shard + */ + protected IndexShard newStartedShard( + final boolean primary, final Settings settings, final EngineFactory engineFactory) throws IOException { + return newStartedShard(p -> newShard(p, settings, engineFactory), primary); + } + + /** + * creates a new empty shard and starts it. + * + * @param shardFunction shard factory function + * @param primary controls whether the shard will be a primary or a replica. + */ + protected IndexShard newStartedShard(CheckedFunction shardFunction, + boolean primary) throws IOException { + IndexShard shard = shardFunction.apply(primary); if (primary) { recoverShardFromStore(shard); } else { @@ -401,6 +455,7 @@ public abstract class IndexShardTestCase extends ESTestCase { for (IndexShard shard : shards) { if (shard != null) { try { + assertConsistentHistoryBetweenTranslogAndLucene(shard); shard.close("test", false); } finally { IOUtils.close(shard.store()); @@ -509,11 +564,10 @@ public abstract class IndexShardTestCase extends ESTestCase { final StartRecoveryRequest request = new StartRecoveryRequest(replica.shardId(), targetAllocationId, pNode, rNode, snapshot, replica.routingEntry().primary(), 0, startingSeqNo); final RecoverySourceHandler recovery = new RecoverySourceHandler( - primary, - recoveryTarget, - request, - (int) ByteSizeUnit.MB.toBytes(1), - Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), pNode.getName()).build()); + primary, + recoveryTarget, + request, + (int) ByteSizeUnit.MB.toBytes(1)); primary.updateShardState(primary.routingEntry(), primary.getPendingPrimaryTerm(), null, currentClusterStateVersion.incrementAndGet(), inSyncIds, routingTable, Collections.emptySet()); recovery.recoverToTarget(); @@ -582,22 +636,7 @@ public abstract class IndexShardTestCase extends ESTestCase { } protected Set getShardDocUIDs(final IndexShard shard) throws IOException { - shard.refresh("get_uids"); - try (Engine.Searcher searcher = shard.acquireSearcher("test")) { - Set ids = new HashSet<>(); - for (LeafReaderContext leafContext : searcher.reader().leaves()) { - LeafReader reader = leafContext.reader(); - Bits liveDocs = reader.getLiveDocs(); - for (int i = 0; i < reader.maxDoc(); i++) { - if (liveDocs == null || liveDocs.get(i)) { - Document uuid = reader.document(i, Collections.singleton(IdFieldMapper.NAME)); - BytesRef binaryID = uuid.getBinaryValue(IdFieldMapper.NAME); - ids.add(Uid.decodeId(Arrays.copyOfRange(binaryID.bytes, binaryID.offset, binaryID.offset + binaryID.length))); - } - } - } - return ids; - } + return EngineTestCase.getDocIds(shard.getEngine(), true); } protected void assertDocCount(IndexShard shard, int docDount) throws IOException { @@ -610,6 +649,12 @@ public abstract class IndexShardTestCase extends ESTestCase { assertThat(shardDocUIDs, hasSize(ids.length)); } + public static void assertConsistentHistoryBetweenTranslogAndLucene(IndexShard shard) throws IOException { + final Engine engine = shard.getEngineOrNull(); + if (engine != null) { + EngineTestCase.assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, shard.mapperService()); + } + } protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id) throws IOException { return indexDoc(shard, type, id, "{}"); @@ -653,11 +698,14 @@ public abstract class IndexShardTestCase extends ESTestCase { } protected Engine.DeleteResult deleteDoc(IndexShard shard, String type, String id) throws IOException { + final Engine.DeleteResult result; if (shard.routingEntry().primary()) { - return shard.applyDeleteOperationOnPrimary(Versions.MATCH_ANY, type, id, VersionType.INTERNAL); + result = shard.applyDeleteOperationOnPrimary(Versions.MATCH_ANY, type, id, VersionType.INTERNAL); + shard.updateLocalCheckpointForShard(shard.routingEntry().allocationId().getId(), shard.getEngine().getLocalCheckpoint()); } else { - return shard.applyDeleteOperationOnReplica(shard.seqNoStats().getMaxSeqNo() + 1, 0L, type, id); + result = shard.applyDeleteOperationOnReplica(shard.seqNoStats().getMaxSeqNo() + 1, 0L, type, id); } + return result; } protected void flushShard(IndexShard shard) { @@ -678,7 +726,7 @@ public abstract class IndexShardTestCase extends ESTestCase { final IndexId indexId = new IndexId(shardId.getIndex().getName(), shardId.getIndex().getUUID()); final DiscoveryNode node = getFakeDiscoNode(shard.routingEntry().currentNodeId()); final RecoverySource.SnapshotRecoverySource recoverySource = new RecoverySource.SnapshotRecoverySource(snapshot, version, index); - final ShardRouting shardRouting = newShardRouting(shardId, node.getId(), true, recoverySource, ShardRoutingState.INITIALIZING); + final ShardRouting shardRouting = newShardRouting(shardId, node.getId(), true, ShardRoutingState.INITIALIZING, recoverySource); shard.markAsRecovering("from snapshot", new RecoveryState(shardRouting, node, null)); repository.restoreShard(shard, snapshot.getSnapshotId(), version, indexId, shard.shardId(), shard.recoveryState()); diff --git a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java index 5298c3995ce..2164fe32a39 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java @@ -29,7 +29,6 @@ import org.elasticsearch.index.analysis.PreConfiguredCharFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenizer; import org.elasticsearch.index.analysis.ShingleTokenFilterFactory; -import org.elasticsearch.index.analysis.StandardTokenFilterFactory; import org.elasticsearch.index.analysis.StandardTokenizerFactory; import org.elasticsearch.index.analysis.StopTokenFilterFactory; import org.elasticsearch.index.analysis.SynonymGraphTokenFilterFactory; @@ -167,7 +166,6 @@ public abstract class AnalysisFactoryTestCase extends ESTestCase { .put("soraninormalization", MovedToAnalysisCommon.class) .put("soranistem", MovedToAnalysisCommon.class) .put("spanishlightstem", MovedToAnalysisCommon.class) - .put("standard", StandardTokenFilterFactory.class) .put("stemmeroverride", MovedToAnalysisCommon.class) .put("stop", StopTokenFilterFactory.class) .put("swedishlightstem", MovedToAnalysisCommon.class) @@ -267,8 +265,9 @@ public abstract class AnalysisFactoryTestCase extends ESTestCase { */ protected Map> getPreConfiguredTokenFilters() { Map> filters = new HashMap<>(); - filters.put("standard", null); filters.put("lowercase", null); + // for old indices + filters.put("standard", Void.class); return filters; } diff --git a/test/framework/src/main/java/org/elasticsearch/ingest/TestProcessor.java b/test/framework/src/main/java/org/elasticsearch/ingest/TestProcessor.java index 4e4c5a24c0c..a1feb3e1f73 100644 --- a/test/framework/src/main/java/org/elasticsearch/ingest/TestProcessor.java +++ b/test/framework/src/main/java/org/elasticsearch/ingest/TestProcessor.java @@ -45,9 +45,10 @@ public class TestProcessor implements Processor { } @Override - public void execute(IngestDocument ingestDocument) throws Exception { + public IngestDocument execute(IngestDocument ingestDocument) throws Exception { invokedCounter.incrementAndGet(); ingestDocumentConsumer.accept(ingestDocument); + return ingestDocument; } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java index 79694f8050a..67d91e97e16 100644 --- a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java +++ b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java @@ -65,18 +65,36 @@ import java.util.function.Function; * */ public class MockNode extends Node { + private final Collection> classpathPlugins; - public MockNode(Settings settings, Collection> classpathPlugins) { - this(settings, classpathPlugins, null); + public MockNode(final Settings settings, final Collection> classpathPlugins) { + this(settings, classpathPlugins, true); } - public MockNode(Settings settings, Collection> classpathPlugins, Path configPath) { - this(InternalSettingsPreparer.prepareEnvironment(settings, Collections.emptyMap(), configPath), classpathPlugins); + public MockNode( + final Settings settings, + final Collection> classpathPlugins, + final boolean forbidPrivateIndexSettings) { + this(settings, classpathPlugins, null, forbidPrivateIndexSettings); } - public MockNode(Environment environment, Collection> classpathPlugins) { - super(environment, classpathPlugins); + public MockNode( + final Settings settings, + final Collection> classpathPlugins, + final Path configPath, + final boolean forbidPrivateIndexSettings) { + this( + InternalSettingsPreparer.prepareEnvironment(settings, Collections.emptyMap(), configPath), + classpathPlugins, + forbidPrivateIndexSettings); + } + + private MockNode( + final Environment environment, + final Collection> classpathPlugins, + final boolean forbidPrivateIndexSettings) { + super(environment, classpathPlugins, forbidPrivateIndexSettings); this.classpathPlugins = classpathPlugins; } @@ -156,5 +174,9 @@ public class MockNode extends Node { return new MockHttpTransport(); } } -} + @Override + protected void registerDerivedNodeNameWithLogger(String nodeName) { + // Nothing to do because test uses the thread name + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java index 0d340a91d4c..be77846b2ba 100644 --- a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java @@ -20,7 +20,7 @@ package org.elasticsearch.script; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.index.similarity.ScriptedSimilarity.Doc; import org.elasticsearch.index.similarity.ScriptedSimilarity.Field; import org.elasticsearch.index.similarity.ScriptedSimilarity.Query; @@ -96,6 +96,14 @@ public class MockScriptEngine implements ScriptEngine { } }; return context.factoryClazz.cast(factory); + } else if (context.instanceClazz.equals(IngestConditionalScript.class)) { + IngestConditionalScript.Factory factory = parameters -> new IngestConditionalScript(parameters) { + @Override + public boolean execute(Map ctx) { + return (boolean) script.apply(ctx); + } + }; + return context.factoryClazz.cast(factory); } else if (context.instanceClazz.equals(UpdateScript.class)) { UpdateScript.Factory factory = parameters -> new UpdateScript(parameters) { @Override @@ -242,16 +250,18 @@ public class MockScriptEngine implements ScriptEngine { return new MockMovingFunctionScript(); } - public ScriptedMetricAggContexts.InitScript createMetricAggInitScript(Map params, Object state) { + public ScriptedMetricAggContexts.InitScript createMetricAggInitScript(Map params, Map state) { return new MockMetricAggInitScript(params, state, script != null ? script : ctx -> 42d); } - public ScriptedMetricAggContexts.MapScript.LeafFactory createMetricAggMapScript(Map params, Object state, + public ScriptedMetricAggContexts.MapScript.LeafFactory createMetricAggMapScript(Map params, + Map state, SearchLookup lookup) { return new MockMetricAggMapScript(params, state, lookup, script != null ? script : ctx -> 42d); } - public ScriptedMetricAggContexts.CombineScript createMetricAggCombineScript(Map params, Object state) { + public ScriptedMetricAggContexts.CombineScript createMetricAggCombineScript(Map params, + Map state) { return new MockMetricAggCombineScript(params, state, script != null ? script : ctx -> 42d); } @@ -324,7 +334,7 @@ public class MockScriptEngine implements ScriptEngine { } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { ctx.put("_score", new ScoreAccessor(scorer)); } @@ -383,7 +393,7 @@ public class MockScriptEngine implements ScriptEngine { } @Override - public double execute(double weight, Query query, Field field, Term term, Doc doc) throws IOException { + public double execute(double weight, Query query, Field field, Term term, Doc doc) { Map map = new HashMap<>(); map.put("weight", weight); map.put("query", query); @@ -403,7 +413,7 @@ public class MockScriptEngine implements ScriptEngine { } @Override - public double execute(Query query, Field field, Term term) throws IOException { + public double execute(Query query, Field field, Term term) { Map map = new HashMap<>(); map.put("query", query); map.put("field", field); @@ -415,7 +425,7 @@ public class MockScriptEngine implements ScriptEngine { public static class MockMetricAggInitScript extends ScriptedMetricAggContexts.InitScript { private final Function, Object> script; - MockMetricAggInitScript(Map params, Object state, + MockMetricAggInitScript(Map params, Map state, Function, Object> script) { super(params, state); this.script = script; @@ -436,11 +446,11 @@ public class MockScriptEngine implements ScriptEngine { public static class MockMetricAggMapScript implements ScriptedMetricAggContexts.MapScript.LeafFactory { private final Map params; - private final Object state; + private final Map state; private final SearchLookup lookup; private final Function, Object> script; - MockMetricAggMapScript(Map params, Object state, SearchLookup lookup, + MockMetricAggMapScript(Map params, Map state, SearchLookup lookup, Function, Object> script) { this.params = params; this.state = state; @@ -473,7 +483,7 @@ public class MockScriptEngine implements ScriptEngine { public static class MockMetricAggCombineScript extends ScriptedMetricAggContexts.CombineScript { private final Function, Object> script; - MockMetricAggCombineScript(Map params, Object state, + MockMetricAggCombineScript(Map params, Map state, Function, Object> script) { super(params, state); this.script = script; @@ -543,7 +553,7 @@ public class MockScriptEngine implements ScriptEngine { @Override public ScoreScript newInstance(LeafReaderContext ctx) throws IOException { - Scorer[] scorerHolder = new Scorer[1]; + Scorable[] scorerHolder = new Scorable[1]; return new ScoreScript(params, lookup, ctx) { @Override public double execute() { @@ -556,7 +566,7 @@ public class MockScriptEngine implements ScriptEngine { } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { scorerHolder[0] = scorer; } }; diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 3002711bdbd..17202839a65 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -30,6 +30,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -37,6 +38,7 @@ import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.index.Index; @@ -137,6 +139,7 @@ public abstract class AggregatorTestCase extends ESTestCase { when(mapperService.getIndexSettings()).thenReturn(indexSettings); when(mapperService.hasNested()).thenReturn(false); DocumentMapper mapper = mock(DocumentMapper.class); + when(mapper.typeText()).thenReturn(new Text(TYPE_NAME)); when(mapper.type()).thenReturn(TYPE_NAME); when(mapperService.documentMapper()).thenReturn(mapper); when(searchContext.mapperService()).thenReturn(mapperService); @@ -365,7 +368,7 @@ public abstract class AggregatorTestCase extends ESTestCase { List aggs = new ArrayList<> (); Query rewritten = searcher.rewrite(query); - Weight weight = searcher.createWeight(rewritten, true, 1f); + Weight weight = searcher.createWeight(rewritten, ScoreMode.COMPLETE, 1f); MultiBucketConsumer bucketConsumer = new MultiBucketConsumer(maxBucket); C root = createAggregator(query, builder, searcher, bucketConsumer, fieldTypes); diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/InternalSingleBucketAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/InternalSingleBucketAggregationTestCase.java index 56a4bc983ca..e32734b887b 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/InternalSingleBucketAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/InternalSingleBucketAggregationTestCase.java @@ -25,8 +25,8 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregation; import org.elasticsearch.search.aggregations.bucket.ParsedSingleBucketAggregation; -import org.elasticsearch.search.aggregations.metrics.max.InternalMax; -import org.elasticsearch.search.aggregations.metrics.min.InternalMin; +import org.elasticsearch.search.aggregations.metrics.InternalMax; +import org.elasticsearch.search.aggregations.metrics.InternalMin; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java index 58593cbe2fd..60f93f8ea30 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -340,8 +340,8 @@ public abstract class AbstractBuilderTestCase extends ESTestCase { clientInvocationHandler); ScriptModule scriptModule = createScriptModule(pluginsService.filterPlugins(ScriptPlugin.class)); List> additionalSettings = pluginsService.getPluginSettings(); - additionalSettings.add(InternalSettingsPlugin.VERSION_CREATED); - SettingsModule settingsModule = new SettingsModule(nodeSettings, additionalSettings, pluginsService.getPluginSettingsFilter()); + SettingsModule settingsModule = + new SettingsModule(nodeSettings, additionalSettings, pluginsService.getPluginSettingsFilter(), Collections.emptySet()); searchModule = new SearchModule(nodeSettings, false, pluginsService.filterPlugins(SearchPlugin.class)); IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class)); List entries = new ArrayList<>(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java index 6ec32f6654f..5aeb30bfdbd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.test; import org.elasticsearch.common.Strings; @@ -34,9 +35,17 @@ public abstract class AbstractSerializingTestCase 0) { @@ -2254,6 +2254,10 @@ public abstract class ESIntegTestCase extends ESTestCase { } } + protected boolean forbidPrivateIndexSettings() { + return true; + } + /** * Returns an instance of {@link RestClient} pointing to the current test cluster. * Creates a new client if the method is invoked for the first time in the context of the current test scope. @@ -2328,37 +2332,48 @@ public abstract class ESIntegTestCase extends ESTestCase { protected void assertSeqNos() throws Exception { assertBusy(() -> { - IndicesStatsResponse stats = client().admin().indices().prepareStats().clear().get(); - for (IndexStats indexStats : stats.getIndices().values()) { - for (IndexShardStats indexShardStats : indexStats.getIndexShards().values()) { - Optional maybePrimary = Stream.of(indexShardStats.getShards()) - .filter(s -> s.getShardRouting().active() && s.getShardRouting().primary()) - .findFirst(); - if (maybePrimary.isPresent() == false) { + final ClusterState state = clusterService().state(); + for (ObjectObjectCursor indexRoutingTable : state.routingTable().indicesRouting()) { + for (IntObjectCursor indexShardRoutingTable : indexRoutingTable.value.shards()) { + ShardRouting primaryShardRouting = indexShardRoutingTable.value.primaryShard(); + if (primaryShardRouting == null || primaryShardRouting.assignedToNode() == false) { continue; } - ShardStats primary = maybePrimary.get(); - final SeqNoStats primarySeqNoStats = primary.getSeqNoStats(); - final ShardRouting primaryShardRouting = primary.getShardRouting(); + DiscoveryNode primaryNode = state.nodes().get(primaryShardRouting.currentNodeId()); + IndexShard primaryShard = internalCluster().getInstance(IndicesService.class, primaryNode.getName()) + .indexServiceSafe(primaryShardRouting.index()).getShard(primaryShardRouting.id()); + final SeqNoStats primarySeqNoStats; + final ObjectLongMap syncGlobalCheckpoints; + try { + primarySeqNoStats = primaryShard.seqNoStats(); + syncGlobalCheckpoints = primaryShard.getInSyncGlobalCheckpoints(); + } catch (AlreadyClosedException ex) { + continue; // shard is closed - just ignore + } assertThat(primaryShardRouting + " should have set the global checkpoint", - primarySeqNoStats.getGlobalCheckpoint(), not(equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO))); - final DiscoveryNode node = clusterService().state().nodes().get(primaryShardRouting.currentNodeId()); - final IndicesService indicesService = - internalCluster().getInstance(IndicesService.class, node.getName()); - final IndexShard indexShard = indicesService.getShardOrNull(primaryShardRouting.shardId()); - final ObjectLongMap globalCheckpoints = indexShard.getInSyncGlobalCheckpoints(); - for (ShardStats shardStats : indexShardStats) { - final SeqNoStats seqNoStats = shardStats.getSeqNoStats(); - assertThat(shardStats.getShardRouting() + " local checkpoint mismatch", - seqNoStats.getLocalCheckpoint(), equalTo(primarySeqNoStats.getLocalCheckpoint())); - assertThat(shardStats.getShardRouting() + " global checkpoint mismatch", - seqNoStats.getGlobalCheckpoint(), equalTo(primarySeqNoStats.getGlobalCheckpoint())); - assertThat(shardStats.getShardRouting() + " max seq no mismatch", - seqNoStats.getMaxSeqNo(), equalTo(primarySeqNoStats.getMaxSeqNo())); + primarySeqNoStats.getGlobalCheckpoint(), not(equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO))); + for (ShardRouting replicaShardRouting : indexShardRoutingTable.value.replicaShards()) { + if (replicaShardRouting.assignedToNode() == false) { + continue; + } + DiscoveryNode replicaNode = state.nodes().get(replicaShardRouting.currentNodeId()); + IndexShard replicaShard = internalCluster().getInstance(IndicesService.class, replicaNode.getName()) + .indexServiceSafe(replicaShardRouting.index()).getShard(replicaShardRouting.id()); + final SeqNoStats seqNoStats; + try { + seqNoStats = replicaShard.seqNoStats(); + } catch (AlreadyClosedException e) { + continue; // shard is closed - just ignore + } + assertThat(replicaShardRouting + " local checkpoint mismatch", + seqNoStats.getLocalCheckpoint(), equalTo(primarySeqNoStats.getLocalCheckpoint())); + assertThat(replicaShardRouting + " global checkpoint mismatch", + seqNoStats.getGlobalCheckpoint(), equalTo(primarySeqNoStats.getGlobalCheckpoint())); + assertThat(replicaShardRouting + " max seq no mismatch", + seqNoStats.getMaxSeqNo(), equalTo(primarySeqNoStats.getMaxSeqNo())); // the local knowledge on the primary of the global checkpoint equals the global checkpoint on the shard - assertThat( - seqNoStats.getGlobalCheckpoint(), - equalTo(globalCheckpoints.get(shardStats.getShardRouting().allocationId().getId()))); + assertThat(replicaShardRouting + " global checkpoint syncs mismatch", seqNoStats.getGlobalCheckpoint(), + equalTo(syncGlobalCheckpoints.get(replicaShardRouting.allocationId().getId()))); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index 9633f56dea9..d73520f91b3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -41,6 +41,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.node.MockNode; @@ -87,6 +88,14 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { .setOrder(0) .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)).get(); + client().admin().indices() + .preparePutTemplate("random-soft-deletes-template") + .setPatterns(Collections.singletonList("*")) + .setOrder(0) + .setSettings(Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), + randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000)) + ).get(); } private static void stopNode() throws IOException { @@ -202,7 +211,7 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { if (addMockHttpTransport()) { plugins.add(MockHttpTransport.TestPlugin.class); } - Node build = new MockNode(settings, plugins); + Node build = new MockNode(settings, plugins, forbidPrivateIndexSettings()); try { build.start(); } catch (NodeValidationException e) { @@ -332,4 +341,9 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { protected NamedXContentRegistry xContentRegistry() { return getInstanceFromNode(NamedXContentRegistry.class); } + + protected boolean forbidPrivateIndexSettings() { + return true; + } + } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index d8cd22d92db..66b795731b1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -934,7 +934,7 @@ public abstract class ESTestCase extends LuceneTestCase { .put(settings) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) .putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()).build(); - return new NodeEnvironment(build, TestEnvironment.newEnvironment(build)); + return new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {}); } /** Return consistent index settings for the provided index version. */ diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java index 15e44853a97..1149c7b0941 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java @@ -85,38 +85,38 @@ import org.elasticsearch.search.aggregations.bucket.terms.ParsedDoubleTerms; import org.elasticsearch.search.aggregations.bucket.terms.ParsedLongTerms; import org.elasticsearch.search.aggregations.bucket.terms.ParsedStringTerms; import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.avg.ParsedAvg; -import org.elasticsearch.search.aggregations.metrics.cardinality.CardinalityAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.cardinality.ParsedCardinality; -import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBoundsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.geobounds.ParsedGeoBounds; -import org.elasticsearch.search.aggregations.metrics.geocentroid.GeoCentroidAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.geocentroid.ParsedGeoCentroid; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.max.ParsedMax; -import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.min.ParsedMin; -import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.InternalHDRPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.InternalHDRPercentiles; -import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.ParsedHDRPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.ParsedHDRPercentiles; -import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.InternalTDigestPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.InternalTDigestPercentiles; -import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.ParsedTDigestPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.ParsedTDigestPercentiles; -import org.elasticsearch.search.aggregations.metrics.scripted.ParsedScriptedMetric; -import org.elasticsearch.search.aggregations.metrics.scripted.ScriptedMetricAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.stats.ParsedStats; -import org.elasticsearch.search.aggregations.metrics.stats.StatsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStatsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.stats.extended.ParsedExtendedStats; -import org.elasticsearch.search.aggregations.metrics.sum.ParsedSum; -import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.tophits.ParsedTopHits; -import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.valuecount.ParsedValueCount; -import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ParsedAvg; +import org.elasticsearch.search.aggregations.metrics.CardinalityAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ParsedCardinality; +import org.elasticsearch.search.aggregations.metrics.GeoBoundsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ParsedGeoBounds; +import org.elasticsearch.search.aggregations.metrics.GeoCentroidAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ParsedGeoCentroid; +import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ParsedMax; +import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ParsedMin; +import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentileRanks; +import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentiles; +import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentileRanks; +import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentiles; +import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentileRanks; +import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentiles; +import org.elasticsearch.search.aggregations.metrics.ParsedTDigestPercentileRanks; +import org.elasticsearch.search.aggregations.metrics.ParsedTDigestPercentiles; +import org.elasticsearch.search.aggregations.metrics.ParsedScriptedMetric; +import org.elasticsearch.search.aggregations.metrics.ScriptedMetricAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ParsedStats; +import org.elasticsearch.search.aggregations.metrics.StatsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ExtendedStatsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ParsedExtendedStats; +import org.elasticsearch.search.aggregations.metrics.ParsedSum; +import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ParsedTopHits; +import org.elasticsearch.search.aggregations.metrics.TopHitsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ParsedValueCount; +import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; import org.elasticsearch.search.aggregations.pipeline.ParsedSimpleValue; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java b/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java index be8c824f0f7..1f4a35a29c2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java @@ -32,8 +32,6 @@ import java.util.concurrent.TimeUnit; public final class InternalSettingsPlugin extends Plugin { - public static final Setting VERSION_CREATED = - Setting.intSetting("index.version.created", 0, Property.IndexScope, Property.NodeScope); public static final Setting PROVIDED_NAME_SETTING = Setting.simpleString("index.provided_name",Property.IndexScope, Property.NodeScope); public static final Setting MERGE_ENABLED = @@ -47,7 +45,6 @@ public final class InternalSettingsPlugin extends Plugin { @Override public List> getSettings() { return Arrays.asList( - VERSION_CREATED, MERGE_ENABLED, INDEX_CREATION_DATE_SETTING, PROVIDED_NAME_SETTING, diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 306f79e5e16..3c46acd0fbe 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -26,7 +26,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import org.apache.logging.log4j.Logger; import org.apache.lucene.store.AlreadyClosedException; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; @@ -64,6 +63,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.ZenDiscovery; @@ -205,6 +205,8 @@ public final class InternalTestCluster extends TestCluster { private final Collection> mockPlugins; + private final boolean forbidPrivateIndexSettings; + /** * All nodes started by the cluster will have their name set to nodePrefix followed by a positive number */ @@ -214,13 +216,53 @@ public final class InternalTestCluster extends TestCluster { private ServiceDisruptionScheme activeDisruptionScheme; private Function clientWrapper; - public InternalTestCluster(long clusterSeed, Path baseDir, - boolean randomlyAddDedicatedMasters, - boolean autoManageMinMasterNodes, int minNumDataNodes, int maxNumDataNodes, String clusterName, NodeConfigurationSource nodeConfigurationSource, int numClientNodes, - String nodePrefix, Collection> mockPlugins, Function clientWrapper) { + public InternalTestCluster( + final long clusterSeed, + final Path baseDir, + final boolean randomlyAddDedicatedMasters, + final boolean autoManageMinMasterNodes, + final int minNumDataNodes, + final int maxNumDataNodes, + final String clusterName, + final NodeConfigurationSource nodeConfigurationSource, + final int numClientNodes, + final String nodePrefix, + final Collection> mockPlugins, + final Function clientWrapper) { + this( + clusterSeed, + baseDir, + randomlyAddDedicatedMasters, + autoManageMinMasterNodes, + minNumDataNodes, + maxNumDataNodes, + clusterName, + nodeConfigurationSource, + numClientNodes, + nodePrefix, + mockPlugins, + clientWrapper, + true); + } + + public InternalTestCluster( + final long clusterSeed, + final Path baseDir, + final boolean randomlyAddDedicatedMasters, + final boolean autoManageMinMasterNodes, + final int minNumDataNodes, + final int maxNumDataNodes, + final String clusterName, + final NodeConfigurationSource nodeConfigurationSource, + final int numClientNodes, + final String nodePrefix, + final Collection> mockPlugins, + final Function clientWrapper, + final boolean forbidPrivateIndexSettings) { super(clusterSeed); this.autoManageMinMasterNodes = autoManageMinMasterNodes; this.clientWrapper = clientWrapper; + this.forbidPrivateIndexSettings = forbidPrivateIndexSettings; this.baseDir = baseDir; this.clusterName = clusterName; if (minNumDataNodes < 0 || maxNumDataNodes < 0) { @@ -583,7 +625,11 @@ public final class InternalTestCluster extends TestCluster { // we clone this here since in the case of a node restart we might need it again secureSettings = ((MockSecureSettings) secureSettings).clone(); } - MockNode node = new MockNode(finalSettings.build(), plugins, nodeConfigurationSource.nodeConfigPath(nodeId)); + MockNode node = new MockNode( + finalSettings.build(), + plugins, + nodeConfigurationSource.nodeConfigPath(nodeId), + forbidPrivateIndexSettings); try { IOUtils.close(secureSettings); } catch (IOException e) { @@ -1163,6 +1209,26 @@ public final class InternalTestCluster extends TestCluster { }); } + /** + * Asserts that the document history in Lucene index is consistent with Translog's on every index shard of the cluster. + * This assertion might be expensive, thus we prefer not to execute on every test but only interesting tests. + */ + public void assertConsistentHistoryBetweenTranslogAndLuceneIndex() throws IOException { + final Collection nodesAndClients = nodes.values(); + for (NodeAndClient nodeAndClient : nodesAndClients) { + IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name); + for (IndexService indexService : indexServices) { + for (IndexShard indexShard : indexService) { + try { + IndexShardTestCase.assertConsistentHistoryBetweenTranslogAndLucene(indexShard); + } catch (AlreadyClosedException ignored) { + // shard is closed + } + } + } + } + } + private void randomlyResetClients() throws IOException { // only reset the clients on nightly tests, it causes heavy load... if (RandomizedTest.isNightly() && rarely(random)) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java b/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java index a53ba046d32..895bd7ec77a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java +++ b/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java @@ -85,7 +85,7 @@ public class MockLogAppender extends AbstractAppender { @Override public void match(LogEvent event) { - if (event.getLevel().equals(level) && event.getLoggerName().equals(logger)) { + if (event.getLevel().equals(level) && event.getLoggerName().equals(logger) && innerMatch(event)) { if (Regex.isSimpleMatchPattern(message)) { if (Regex.simpleMatch(message, event.getMessage().getFormattedMessage())) { saw = true; @@ -97,6 +97,11 @@ public class MockLogAppender extends AbstractAppender { } } } + + public boolean innerMatch(final LogEvent event) { + return true; + } + } public static class UnseenEventExpectation extends AbstractEventExpectation { @@ -123,6 +128,32 @@ public class MockLogAppender extends AbstractAppender { } } + public static class ExceptionSeenEventExpectation extends SeenEventExpectation { + + private final Class clazz; + private final String exceptionMessage; + + public ExceptionSeenEventExpectation( + final String name, + final String logger, + final Level level, + final String message, + final Class clazz, + final String exceptionMessage) { + super(name, logger, level, message); + this.clazz = clazz; + this.exceptionMessage = exceptionMessage; + } + + @Override + public boolean innerMatch(final LogEvent event) { + return event.getThrown() != null + && event.getThrown().getClass() == clazz + && event.getThrown().getMessage().equals(exceptionMessage); + } + + } + public static class PatternSeenEventExcpectation implements LoggingExpectation { protected final String name; diff --git a/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java b/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java deleted file mode 100644 index 4c4fe8f76ad..00000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.test; - -import org.apache.logging.log4j.Logger; -import org.apache.lucene.index.IndexWriter; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.index.MergePolicyConfig; - -import java.io.IOException; -import java.nio.file.DirectoryStream; -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.SimpleFileVisitor; -import java.nio.file.attribute.BasicFileAttributes; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -import static junit.framework.TestCase.assertFalse; -import static junit.framework.TestCase.assertTrue; -import static org.elasticsearch.test.ESTestCase.randomInt; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.Assert.assertNotNull; - - -public class OldIndexUtils { - - public static List loadDataFilesList(String prefix, Path bwcIndicesPath) throws IOException { - List indexes = new ArrayList<>(); - try (DirectoryStream stream = Files.newDirectoryStream(bwcIndicesPath, prefix + "-*.zip")) { - for (Path path : stream) { - indexes.add(path.getFileName().toString()); - } - } - Collections.sort(indexes); - return indexes; - } - - public static Settings getSettings() { - return Settings.builder() - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) // disable merging so no segments will be upgraded - .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), 30) // - // speed up recoveries - .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), 30) - .build(); - } - - public static Path getIndexDir( - final Logger logger, - final String indexName, - final String indexFile, - final Path dataDir) throws IOException { - final Version version = Version.fromString(indexName.substring("index-".length())); - if (version.before(Version.V_5_0_0_alpha1)) { - // the bwc scripts packs the indices under this path - Path src = dataDir.resolve("nodes/0/indices/" + indexName); - assertTrue("[" + indexFile + "] missing index dir: " + src.toString(), Files.exists(src)); - return src; - } else { - final List indexFolders = new ArrayList<>(); - try (DirectoryStream stream = Files.newDirectoryStream(dataDir.resolve("0/indices"), - (p) -> p.getFileName().toString().startsWith("extra") == false)) { // extra FS can break this... - for (final Path path : stream) { - indexFolders.add(path); - } - } - assertThat(indexFolders.toString(), indexFolders.size(), equalTo(1)); - final IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, - indexFolders.get(0)); - assertNotNull(indexMetaData); - assertThat(indexFolders.get(0).getFileName().toString(), equalTo(indexMetaData.getIndexUUID())); - assertThat(indexMetaData.getCreationVersion(), equalTo(version)); - return indexFolders.get(0); - } - } - - // randomly distribute the files from src over dests paths - public static void copyIndex(final Logger logger, final Path src, final String folderName, final Path... dests) throws IOException { - Path destinationDataPath = dests[randomInt(dests.length - 1)]; - for (Path dest : dests) { - Path indexDir = dest.resolve(folderName); - assertFalse(Files.exists(indexDir)); - Files.createDirectories(indexDir); - } - Files.walkFileTree(src, new SimpleFileVisitor() { - @Override - public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { - Path relativeDir = src.relativize(dir); - for (Path dest : dests) { - Path destDir = dest.resolve(folderName).resolve(relativeDir); - Files.createDirectories(destDir); - } - return FileVisitResult.CONTINUE; - } - - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - if (file.getFileName().toString().equals(IndexWriter.WRITE_LOCK_NAME)) { - // skip lock file, we don't need it - logger.trace("Skipping lock file: {}", file); - return FileVisitResult.CONTINUE; - } - - Path relativeFile = src.relativize(file); - Path destFile = destinationDataPath.resolve(folderName).resolve(relativeFile); - logger.trace("--> Moving {} to {}", relativeFile, destFile); - Files.move(file, destFile); - assertFalse(Files.exists(file)); - assertTrue(Files.exists(destFile)); - return FileVisitResult.CONTINUE; - } - }); - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java index fc2a85b35a9..27bcb5868c5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java @@ -143,7 +143,7 @@ public final class MockEngineSupport { } // this executes basic query checks and asserts that weights are normalized only once etc. final AssertingIndexSearcher assertingIndexSearcher = new AssertingIndexSearcher(mockContext.random, wrappedReader); - assertingIndexSearcher.setSimilarity(searcher.searcher().getSimilarity(true)); + assertingIndexSearcher.setSimilarity(searcher.searcher().getSimilarity()); assertingIndexSearcher.setQueryCache(filterCache); assertingIndexSearcher.setQueryCachingPolicy(filterCachingPolicy); return assertingIndexSearcher; @@ -185,7 +185,7 @@ public final class MockEngineSupport { public Engine.Searcher wrapSearcher(String source, Engine.Searcher engineSearcher) { final AssertingIndexSearcher assertingIndexSearcher = newSearcher(engineSearcher); - assertingIndexSearcher.setSimilarity(engineSearcher.searcher().getSimilarity(true)); + assertingIndexSearcher.setSimilarity(engineSearcher.searcher().getSimilarity()); // pass the original searcher to the super.newSearcher() method to make sure this is the searcher that will // be released later on. If we wrap an index reader here must not pass the wrapped version to the manager // on release otherwise the reader will be closed too early. - good news, stuff will fail all over the place if we don't get this right here diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index ecb965040f8..9d47c4e24a9 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -236,6 +236,16 @@ public abstract class ESRestTestCase extends ESTestCase { return false; } + /** + * Controls whether or not to preserve cluster settings upon completion of the test. The default implementation is to remove all cluster + * settings. + * + * @return true if cluster settings should be preserved and otherwise false + */ + protected boolean preserveClusterSettings() { + return false; + } + /** * Returns whether to preserve the repositories on completion of this test. * Defaults to not preserving repos. See also @@ -295,7 +305,11 @@ public abstract class ESRestTestCase extends ESTestCase { } wipeSnapshots(); - wipeClusterSettings(); + + // wipe cluster settings + if (preserveClusterSettings() == false) { + wipeClusterSettings(); + } } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java index 82d8dbeebe6..6ecaae75a8e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java @@ -32,6 +32,7 @@ import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertThat; /** @@ -70,8 +71,13 @@ public class MatchAssertion extends Assertion { } } - assertNotNull("field [" + getField() + "] is null", actualValue); logger.trace("assert that [{}] matches [{}] (field [{}])", actualValue, expectedValue, getField()); + if (expectedValue == null) { + assertNull("field [" + getField() + "] should be null but was [" + actualValue + "]", actualValue); + return; + } + assertNotNull("field [" + getField() + "] is null", actualValue); + if (actualValue.getClass().equals(safeClass(expectedValue)) == false) { if (actualValue instanceof Number && expectedValue instanceof Number) { //Double 1.0 is equal to Integer 1 diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java index 8a22383dcae..777f790d2dd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java @@ -101,7 +101,7 @@ public class MockFSIndexStore extends IndexStore { if (indexShard != null) { Boolean remove = shardSet.remove(indexShard); if (remove == Boolean.TRUE) { - Logger logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); + Logger logger = Loggers.getLogger(getClass(), indexShard.shardId()); MockFSDirectoryService.checkIndex(logger, indexShard.store(), indexShard.shardId()); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java index 94a6e1b82ee..132a07d5b7f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java @@ -51,7 +51,6 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportStats; import java.io.IOException; -import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -66,11 +65,13 @@ import java.util.function.Function; import static org.apache.lucene.util.LuceneTestCase.rarely; -/** A transport class that doesn't send anything but rather captures all requests for inspection from tests */ +/** + * A transport class that doesn't send anything but rather captures all requests for inspection from tests + */ public class CapturingTransport implements Transport { private volatile Map requestHandlers = Collections.emptyMap(); - final Object requestHandlerMutex = new Object(); + private final Object requestHandlerMutex = new Object(); private final ResponseHandlers responseHandlers = new ResponseHandlers(); private TransportMessageListener listener; @@ -80,7 +81,7 @@ public class CapturingTransport implements Transport { public final String action; public final TransportRequest request; - public CapturedRequest(DiscoveryNode node, long requestId, String action, TransportRequest request) { + CapturedRequest(DiscoveryNode node, long requestId, String action, TransportRequest request) { this.node = node; this.requestId = requestId; this.action = action; @@ -96,14 +97,15 @@ public class CapturingTransport implements Transport { @Nullable ClusterSettings clusterSettings, Set taskHeaders) { StubbableConnectionManager connectionManager = new StubbableConnectionManager(new ConnectionManager(settings, this, threadPool), settings, this, threadPool); - connectionManager.setDefaultNodeConnectedBehavior((cm, discoveryNode) -> true); + connectionManager.setDefaultNodeConnectedBehavior((cm, discoveryNode) -> nodeConnected(discoveryNode)); connectionManager.setDefaultConnectBehavior((cm, discoveryNode) -> openConnection(discoveryNode, null)); return new TransportService(settings, this, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders, connectionManager); - } - /** returns all requests captured so far. Doesn't clear the captured request list. See {@link #clear()} */ + /** + * returns all requests captured so far. Doesn't clear the captured request list. See {@link #clear()} + */ public CapturedRequest[] capturedRequests() { return capturedRequests.toArray(new CapturedRequest[0]); } @@ -151,12 +153,16 @@ public class CapturingTransport implements Transport { return groupRequestsByTargetNode(requests); } - /** clears captured requests */ + /** + * clears captured requests + */ public void clear() { capturedRequests.clear(); } - /** simulate a response for the given requestId */ + /** + * simulate a response for the given requestId + */ public void handleResponse(final long requestId, final TransportResponse response) { responseHandlers.onResponseReceived(requestId, listener).handleResponse(response); } @@ -167,7 +173,7 @@ public class CapturingTransport implements Transport { * * @param requestId the id corresponding to the captured send * request - * @param t the failure to wrap + * @param t the failure to wrap */ public void handleLocalError(final long requestId, final Throwable t) { Tuple request = requests.get(requestId); @@ -181,7 +187,7 @@ public class CapturingTransport implements Transport { * * @param requestId the id corresponding to the captured send * request - * @param t the failure to wrap + * @param t the failure to wrap */ public void handleRemoteError(final long requestId, final Throwable t) { final RemoteTransportException remoteException; @@ -207,7 +213,7 @@ public class CapturingTransport implements Transport { * * @param requestId the id corresponding to the captured send * request - * @param e the failure + * @param e the failure */ public void handleError(final long requestId, final TransportException e) { responseHandlers.onResponseReceived(requestId, listener).handleException(e); @@ -229,7 +235,6 @@ public class CapturingTransport implements Transport { @Override public void addCloseListener(ActionListener listener) { - } @Override @@ -248,6 +253,10 @@ public class CapturingTransport implements Transport { capturedRequests.add(new CapturedRequest(node, requestId, action, request)); } + protected boolean nodeConnected(DiscoveryNode discoveryNode) { + return true; + } + @Override public TransportStats getStats() { throw new UnsupportedOperationException(); @@ -264,7 +273,7 @@ public class CapturingTransport implements Transport { } @Override - public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { + public TransportAddress[] addressesFromString(String address, int perAddressLimit) { return new TransportAddress[0]; } @@ -275,22 +284,23 @@ public class CapturingTransport implements Transport { @Override public void addLifecycleListener(LifecycleListener listener) { - } @Override public void removeLifecycleListener(LifecycleListener listener) { - } @Override - public void start() {} + public void start() { + } @Override - public void stop() {} + public void stop() { + } @Override - public void close() {} + public void close() { + } @Override public List getLocalAddresses() { @@ -306,6 +316,7 @@ public class CapturingTransport implements Transport { requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); } } + @Override public ResponseHandlers getResponseHandlers() { return responseHandlers; diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index 15ab06d651e..d6c4f30a885 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -95,6 +95,12 @@ public final class MockTransportService extends TransportService { public static MockTransportService createNewService(Settings settings, Version version, ThreadPool threadPool, @Nullable ClusterSettings clusterSettings) { + MockTcpTransport mockTcpTransport = newMockTransport(settings, version, threadPool); + return createNewService(settings, mockTcpTransport, version, threadPool, clusterSettings, + Collections.emptySet()); + } + + public static MockTcpTransport newMockTransport(Settings settings, Version version, ThreadPool threadPool) { // some tests use MockTransportService to do network based testing. Yet, we run tests in multiple JVMs that means // concurrent tests could claim port that another JVM just released and if that test tries to simulate a disconnect it might // be smart enough to re-connect depending on what is tested. To reduce the risk, since this is very hard to debug we use @@ -102,9 +108,8 @@ public final class MockTransportService extends TransportService { int basePort = 10300 + (JVM_ORDINAL * 100); // use a non-default port otherwise some cluster in this JVM might reuse a port settings = Settings.builder().put(TcpTransport.PORT.getKey(), basePort + "-" + (basePort + 100)).put(settings).build(); NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); - final Transport transport = new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, + return new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList()), version); - return createNewService(settings, transport, version, threadPool, clusterSettings, Collections.emptySet()); } public static MockTransportService createNewService(Settings settings, Transport transport, Version version, ThreadPool threadPool, diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java index 486ccc805d0..012369feb83 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java @@ -120,8 +120,8 @@ public class StubbableConnectionManager extends ConnectionManager { } @Override - public int connectedNodeCount() { - return delegate.connectedNodeCount(); + public int size() { + return delegate.size(); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java index 2e78f8a9a4f..d35fe609c08 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java @@ -41,7 +41,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; -public class StubbableTransport implements Transport { +public final class StubbableTransport implements Transport { private final ConcurrentHashMap sendBehaviors = new ConcurrentHashMap<>(); private final ConcurrentHashMap connectBehaviors = new ConcurrentHashMap<>(); @@ -60,6 +60,12 @@ public class StubbableTransport implements Transport { return prior == null; } + public boolean setDefaultConnectBehavior(OpenConnectionBehavior openConnectionBehavior) { + OpenConnectionBehavior prior = this.defaultConnectBehavior; + this.defaultConnectBehavior = openConnectionBehavior; + return prior == null; + } + boolean addSendBehavior(TransportAddress transportAddress, SendRequestBehavior sendBehavior) { return sendBehaviors.put(transportAddress, sendBehavior) == null; } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 4e59aaecf8d..989afd04dab 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -768,6 +768,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { public void testNotifyOnShutdown() throws Exception { final CountDownLatch latch2 = new CountDownLatch(1); + final CountDownLatch latch3 = new CountDownLatch(1); try { serviceA.registerRequestHandler("internal:foobar", StringMessageRequest::new, ThreadPool.Names.GENERIC, (request, channel, task) -> { @@ -777,6 +778,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { serviceB.stop(); } catch (Exception e) { fail(e.getMessage()); + } finally { + latch3.countDown(); } }); TransportFuture foobar = serviceB.submitRequest(nodeA, "internal:foobar", @@ -788,6 +791,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { } catch (TransportException ex) { } + latch3.await(); } finally { serviceB.close(); // make sure we are fully closed here otherwise we might run into assertions down the road serviceA.disconnectFromNode(nodeB); @@ -2650,7 +2654,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { public void onConnectionOpened(final Transport.Connection connection) { closeConnectionChannel(connection); try { - assertBusy(connection::isClosed); + assertBusy(() -> assertTrue(connection.isClosed())); } catch (Exception e) { throw new AssertionError(e); } @@ -2682,7 +2686,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { } @SuppressForbidden(reason = "need local ephemeral port") - private InetSocketAddress getLocalEphemeral() throws UnknownHostException { + protected InetSocketAddress getLocalEphemeral() throws UnknownHostException { return new InetSocketAddress(InetAddress.getLocalHost(), 0); } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java index e6d80ac24d8..996508bdb88 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java @@ -19,6 +19,7 @@ package org.elasticsearch.transport; import org.elasticsearch.cli.SuppressForbidden; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -162,7 +163,8 @@ public class MockTcpTransport extends TcpTransport { @Override @SuppressForbidden(reason = "real socket for mocking remote connections") - protected MockChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException { + protected MockChannel initiateChannel(DiscoveryNode node, ActionListener connectListener) throws IOException { + InetSocketAddress address = node.getAddress().address(); final MockSocket socket = new MockSocket(); final MockChannel channel = new MockChannel(socket, address, "none"); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index fbe61db6ee7..19543cfdcbb 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -22,6 +22,7 @@ package org.elasticsearch.transport.nio; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; @@ -85,7 +86,8 @@ public class MockNioTransport extends TcpTransport { } @Override - protected MockSocketChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException { + protected MockSocketChannel initiateChannel(DiscoveryNode node, ActionListener connectListener) throws IOException { + InetSocketAddress address = node.getAddress().address(); MockSocketChannel channel = nioGroup.openChannel(address, clientChannelFactory); channel.addConnectListener(ActionListener.toBiConsumer(connectListener)); return channel; diff --git a/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java b/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java new file mode 100644 index 00000000000..62c8e2f00ff --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.upgrades; + +import org.elasticsearch.Version; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.test.rest.ESRestTestCase; + +public abstract class AbstractFullClusterRestartTestCase extends ESRestTestCase { + + private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster")); + + public final boolean isRunningAgainstOldCluster() { + return runningAgainstOldCluster; + } + + private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); + + public final Version getOldClusterVersion() { + return oldClusterVersion; + } + + @Override + protected boolean preserveIndicesUponCompletion() { + return true; + } + + @Override + protected boolean preserveSnapshotsUponCompletion() { + return true; + } + + @Override + protected boolean preserveReposUponCompletion() { + return true; + } + + @Override + protected boolean preserveTemplatesUponCompletion() { + return true; + } + +} diff --git a/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.json b/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.json index 38937a9b5af..e69c2db6ff4 100644 --- a/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.json +++ b/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.json @@ -42,7 +42,7 @@ }, "czechAnalyzerWithStemmer":{ "tokenizer":"standard", - "filter":["standard", "lowercase", "stop", "czech_stem"] + "filter":["lowercase", "stop", "czech_stem"] }, "decompoundingAnalyzer":{ "tokenizer":"standard", diff --git a/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.yml b/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.yml index f7a57d14dbe..82f933296a3 100644 --- a/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.yml +++ b/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.yml @@ -33,7 +33,7 @@ index : version: 3.6 czechAnalyzerWithStemmer : tokenizer : standard - filter : [standard, lowercase, stop, czech_stem] + filter : [lowercase, stop, czech_stem] decompoundingAnalyzer : tokenizer : standard filter : [dict_dec] diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java index 5da8601a9f3..500cff893cb 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java @@ -152,7 +152,7 @@ public class ClientYamlTestSectionTests extends AbstractClientYamlTestFragmentPa parser = createParser(YamlXContent.yamlXContent, "\"First test section\": \n" + " - skip:\n" + - " version: \"5.0.0 - 5.2.0\"\n" + + " version: \"6.0.0 - 6.2.0\"\n" + " reason: \"Update doesn't return metadata fields, waiting for #3259\"\n" + " - do :\n" + " catch: missing\n" + @@ -167,9 +167,9 @@ public class ClientYamlTestSectionTests extends AbstractClientYamlTestFragmentPa assertThat(testSection, notNullValue()); assertThat(testSection.getName(), equalTo("First test section")); assertThat(testSection.getSkipSection(), notNullValue()); - assertThat(testSection.getSkipSection().getLowerVersion(), equalTo(Version.V_5_0_0)); + assertThat(testSection.getSkipSection().getLowerVersion(), equalTo(Version.V_6_0_0)); assertThat(testSection.getSkipSection().getUpperVersion(), - equalTo(Version.V_5_2_0)); + equalTo(Version.V_6_2_0)); assertThat(testSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); assertThat(testSection.getExecutableSections().size(), equalTo(2)); DoSection doSection = (DoSection)testSection.getExecutableSections().get(0); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java index 4c97eb45361..71814593ad4 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java @@ -66,10 +66,10 @@ public class ClientYamlTestSuiteTests extends AbstractClientYamlTestFragmentPars " - match: {test_index.test_type.properties.text.analyzer: whitespace}\n" + "\n" + "---\n" + - "\"Get type mapping - pre 5.0\":\n" + + "\"Get type mapping - pre 6.0\":\n" + "\n" + " - skip:\n" + - " version: \"5.0.0 - \"\n" + + " version: \"6.0.0 - \"\n" + " reason: \"for newer versions the index name is always returned\"\n" + "\n" + " - do:\n" + @@ -97,7 +97,7 @@ public class ClientYamlTestSuiteTests extends AbstractClientYamlTestFragmentPars } else { assertThat(restTestSuite.getSetupSection().isEmpty(), equalTo(true)); } - + assertThat(restTestSuite.getTeardownSection(), notNullValue()); if (includeTeardown) { assertThat(restTestSuite.getTeardownSection().isEmpty(), equalTo(false)); @@ -131,12 +131,12 @@ public class ClientYamlTestSuiteTests extends AbstractClientYamlTestFragmentPars assertThat(matchAssertion.getExpectedValue().toString(), equalTo("whitespace")); assertThat(restTestSuite.getTestSections().get(1).getName(), - equalTo("Get type mapping - pre 5.0")); + equalTo("Get type mapping - pre 6.0")); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().isEmpty(), equalTo(false)); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getReason(), equalTo("for newer versions the index name is always returned")); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getLowerVersion(), - equalTo(Version.V_5_0_0)); + equalTo(Version.V_6_0_0)); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getUpperVersion(), equalTo(Version.CURRENT)); assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().size(), equalTo(3)); assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(0), instanceOf(DoSection.class)); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/MatchAssertionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/MatchAssertionTests.java new file mode 100644 index 00000000000..2bd72347441 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/MatchAssertionTests.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.test.rest.yaml.section; + +import org.elasticsearch.common.xcontent.XContentLocation; +import org.elasticsearch.test.ESTestCase; + +public class MatchAssertionTests extends ESTestCase { + + public void testNull() { + XContentLocation xContentLocation = new XContentLocation(0, 0); + { + MatchAssertion matchAssertion = new MatchAssertion(xContentLocation, "field", null); + matchAssertion.doAssert(null, null); + expectThrows(AssertionError.class, () -> matchAssertion.doAssert("non-null", null)); + } + { + MatchAssertion matchAssertion = new MatchAssertion(xContentLocation, "field", "non-null"); + expectThrows(AssertionError.class, () -> matchAssertion.doAssert(null, "non-null")); + } + { + MatchAssertion matchAssertion = new MatchAssertion(xContentLocation, "field", "/exp/"); + expectThrows(AssertionError.class, () -> matchAssertion.doAssert(null, "/exp/")); + } + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java index cb9ab009b25..e883e8e062a 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java @@ -53,7 +53,7 @@ public class SetupSectionTests extends AbstractClientYamlTestFragmentParserTestC public void testParseSetupAndSkipSectionNoSkip() throws Exception { parser = createParser(YamlXContent.yamlXContent, " - skip:\n" + - " version: \"5.0.0 - 5.3.0\"\n" + + " version: \"6.0.0 - 6.3.0\"\n" + " reason: \"Update doesn't return metadata fields, waiting for #3259\"\n" + " - do:\n" + " index1:\n" + @@ -74,9 +74,9 @@ public class SetupSectionTests extends AbstractClientYamlTestFragmentParserTestC assertThat(setupSection, notNullValue()); assertThat(setupSection.getSkipSection().isEmpty(), equalTo(false)); assertThat(setupSection.getSkipSection(), notNullValue()); - assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(Version.V_5_0_0)); + assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(Version.V_6_0_0)); assertThat(setupSection.getSkipSection().getUpperVersion(), - equalTo(Version.V_5_3_0)); + equalTo(Version.V_6_3_0)); assertThat(setupSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); assertThat(setupSection.getDoSections().size(), equalTo(2)); assertThat(setupSection.getDoSections().get(0).getApiCallSection().getApi(), equalTo("index1")); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java index 3ab9583335e..e5e466a82cc 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java @@ -34,17 +34,17 @@ import static org.hamcrest.Matchers.nullValue; public class SkipSectionTests extends AbstractClientYamlTestFragmentParserTestCase { public void testSkip() { - SkipSection section = new SkipSection("5.0.0 - 5.1.0", + SkipSection section = new SkipSection("6.0.0 - 6.1.0", randomBoolean() ? Collections.emptyList() : Collections.singletonList("warnings"), "foobar"); assertFalse(section.skip(Version.CURRENT)); - assertTrue(section.skip(Version.V_5_0_0)); - section = new SkipSection(randomBoolean() ? null : "5.0.0 - 5.1.0", + assertTrue(section.skip(Version.V_6_0_0)); + section = new SkipSection(randomBoolean() ? null : "6.0.0 - 6.1.0", Collections.singletonList("boom"), "foobar"); assertTrue(section.skip(Version.CURRENT)); } public void testMessage() { - SkipSection section = new SkipSection("5.0.0 - 5.1.0", + SkipSection section = new SkipSection("6.0.0 - 6.1.0", Collections.singletonList("warnings"), "foobar"); assertEquals("[FOOBAR] skipped, reason: [foobar] unsupported features [warnings]", section.getSkipMessage("FOOBAR")); section = new SkipSection(null, Collections.singletonList("warnings"), "foobar"); @@ -55,14 +55,14 @@ public class SkipSectionTests extends AbstractClientYamlTestFragmentParserTestCa public void testParseSkipSectionVersionNoFeature() throws Exception { parser = createParser(YamlXContent.yamlXContent, - "version: \" - 5.1.1\"\n" + + "version: \" - 6.1.1\"\n" + "reason: Delete ignores the parent param" ); SkipSection skipSection = SkipSection.parse(parser); assertThat(skipSection, notNullValue()); assertThat(skipSection.getLowerVersion(), equalTo(VersionUtils.getFirstVersion())); - assertThat(skipSection.getUpperVersion(), equalTo(Version.V_5_1_1)); + assertThat(skipSection.getUpperVersion(), equalTo(Version.V_6_1_1)); assertThat(skipSection.getFeatures().size(), equalTo(0)); assertThat(skipSection.getReason(), equalTo("Delete ignores the parent param")); } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java index 15ca1ec0096..07afa9f33b5 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java @@ -56,7 +56,7 @@ public class TeardownSectionTests extends AbstractClientYamlTestFragmentParserTe public void testParseWithSkip() throws Exception { parser = createParser(YamlXContent.yamlXContent, " - skip:\n" + - " version: \"5.0.0 - 5.3.0\"\n" + + " version: \"6.0.0 - 6.3.0\"\n" + " reason: \"there is a reason\"\n" + " - do:\n" + " delete:\n" + @@ -75,8 +75,8 @@ public class TeardownSectionTests extends AbstractClientYamlTestFragmentParserTe TeardownSection section = TeardownSection.parse(parser); assertThat(section, notNullValue()); assertThat(section.getSkipSection().isEmpty(), equalTo(false)); - assertThat(section.getSkipSection().getLowerVersion(), equalTo(Version.V_5_0_0)); - assertThat(section.getSkipSection().getUpperVersion(), equalTo(Version.V_5_3_0)); + assertThat(section.getSkipSection().getLowerVersion(), equalTo(Version.V_6_0_0)); + assertThat(section.getSkipSection().getUpperVersion(), equalTo(Version.V_6_3_0)); assertThat(section.getSkipSection().getReason(), equalTo("there is a reason")); assertThat(section.getDoSections().size(), equalTo(2)); assertThat(section.getDoSections().get(0).getApiCallSection().getApi(), equalTo("delete")); diff --git a/test/logger-usage/build.gradle b/test/logger-usage/build.gradle index c16dab6a625..6ab975fd42e 100644 --- a/test/logger-usage/build.gradle +++ b/test/logger-usage/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -29,7 +27,7 @@ loggerUsageCheck.enabled = false forbiddenApisMain.enabled = true // disabled by parent project forbiddenApisMain { - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] // does not depend on core, only jdk signatures + replaceSignatureFiles 'jdk-signatures' // does not depend on core, only jdk signatures } jarHell.enabled = true // disabled by parent project @@ -44,14 +42,4 @@ thirdPartyAudit.excludes = [ 'org.osgi.framework.SynchronousBundleListener', 'org.osgi.framework.wiring.BundleWire', 'org.osgi.framework.wiring.BundleWiring' -] - -if (JavaVersion.current() <= JavaVersion.VERSION_1_8) { - // Used by Log4J 2.11.1 - thirdPartyAudit.excludes += [ - 'java.io.ObjectInputFilter', - 'java.io.ObjectInputFilter$Config', - 'java.io.ObjectInputFilter$FilterInfo', - 'java.io.ObjectInputFilter$Status' - ] -} \ No newline at end of file +] \ No newline at end of file diff --git a/x-pack/build.gradle b/x-pack/build.gradle index 01ce465fc09..d2a19be2136 100644 --- a/x-pack/build.gradle +++ b/x-pack/build.gradle @@ -31,16 +31,8 @@ subprojects { } } -File checkstyleSuppressions = file('dev-tools/checkstyle_suppressions.xml') subprojects { - tasks.withType(Checkstyle) { - inputs.file(checkstyleSuppressions) - // Use x-pack-elasticsearch specific suppressions file rather than the open source one. - configProperties = [ - suppressions: checkstyleSuppressions - ] - } - + ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-ccr:${version}": xpackModule('ccr')] ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-core:${version}": xpackModule('core')] ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-deprecation:${version}": xpackModule('deprecation')] ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-graph:${version}": xpackModule('graph')] diff --git a/x-pack/dev-tools/checkstyle_suppressions.xml b/x-pack/dev-tools/checkstyle_suppressions.xml deleted file mode 100644 index 4748436a849..00000000000 --- a/x-pack/dev-tools/checkstyle_suppressions.xml +++ /dev/null @@ -1,29 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index aab84355581..f027493b0ab 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -14,23 +14,13 @@ buildRestTests.expectedUnconvertedCandidates = [ 'en/security/authorization/run-as-privilege.asciidoc', 'en/security/ccs-clients-integrations/http.asciidoc', 'en/security/authorization/custom-roles-provider.asciidoc', - 'en/rest-api/ml/delete-snapshot.asciidoc', - 'en/rest-api/ml/get-bucket.asciidoc', - 'en/rest-api/ml/get-job-stats.asciidoc', - 'en/rest-api/ml/get-overall-buckets.asciidoc', - 'en/rest-api/ml/get-category.asciidoc', - 'en/rest-api/ml/get-record.asciidoc', - 'en/rest-api/ml/get-influencer.asciidoc', - 'en/rest-api/ml/get-snapshot.asciidoc', - 'en/rest-api/ml/post-data.asciidoc', - 'en/rest-api/ml/revert-snapshot.asciidoc', - 'en/rest-api/ml/update-snapshot.asciidoc', 'en/rest-api/watcher/stats.asciidoc', 'en/watcher/example-watches/watching-time-series-data.asciidoc', ] dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackProject('plugin').path, configuration: 'testArtifacts') } @@ -309,7 +299,7 @@ setups['farequote_datafeed'] = setups['farequote_job'] + ''' "job_id":"farequote", "indexes":"farequote" } -''' +''' setups['ml_filter_safe_domains'] = ''' - do: xpack.ml.put_filter: @@ -684,9 +674,8 @@ setups['sensor_prefab_data'] = ''' page_size: 1000 groups: date_histogram: - delay: "7d" field: "timestamp" - interval: "1h" + interval: "7d" time_zone: "UTC" terms: fields: @@ -749,3 +738,22 @@ setups['jacknich_user'] = ''' "metadata" : { "intelligence" : 7 } } ''' +setups['app0102_privileges'] = ''' + - do: + xpack.security.put_privileges: + body: > + { + "myapp": { + "read": { + "application": "myapp", + "name": "read", + "actions": [ + "data:read/*", + "action:login" ], + "metadata": { + "description": "Read access to myapp" + } + } + } + } +''' diff --git a/x-pack/docs/en/ml/api-quickref.asciidoc b/x-pack/docs/en/ml/api-quickref.asciidoc deleted file mode 100644 index be74167862e..00000000000 --- a/x-pack/docs/en/ml/api-quickref.asciidoc +++ /dev/null @@ -1,102 +0,0 @@ -[role="xpack"] -[[ml-api-quickref]] -== API quick reference - -All {ml} endpoints have the following base: - -[source,js] ----- -/_xpack/ml/ ----- -// NOTCONSOLE - -The main {ml} resources can be accessed with a variety of endpoints: - -* <>: Create and manage {ml} jobs -* <>: Select data from {es} to be analyzed -* <>: Access the results of a {ml} job -* <>: Manage model snapshots -//* <>: Validate subsections of job configurations - -[float] -[[ml-api-jobs]] -=== /anomaly_detectors/ - -* {ref}/ml-put-job.html[PUT /anomaly_detectors/+++]: Create a job -* {ref}/ml-open-job.html[POST /anomaly_detectors//_open]: Open a job -* {ref}/ml-post-data.html[POST /anomaly_detectors//_data]: Send data to a job -* {ref}/ml-get-job.html[GET /anomaly_detectors]: List jobs -* {ref}/ml-get-job.html[GET /anomaly_detectors/+++]: Get job details -* {ref}/ml-get-job-stats.html[GET /anomaly_detectors//_stats]: Get job statistics -* {ref}/ml-update-job.html[POST /anomaly_detectors//_update]: Update certain properties of the job configuration -* {ref}/ml-flush-job.html[POST anomaly_detectors//_flush]: Force a job to analyze buffered data -* {ref}/ml-forecast.html[POST anomaly_detectors//_forecast]: Forecast future job behavior -* {ref}/ml-close-job.html[POST /anomaly_detectors//_close]: Close a job -* {ref}/ml-delete-job.html[DELETE /anomaly_detectors/+++]: Delete a job - -[float] -[[ml-api-calendars]] -=== /calendars/ - -* {ref}/ml-put-calendar.html[PUT /calendars/+++]: Create a calendar -* {ref}/ml-post-calendar-event.html[POST /calendars/+++/events]: Add a scheduled event to a calendar -* {ref}/ml-put-calendar-job.html[PUT /calendars/+++/jobs/+++]: Associate a job with a calendar -* {ref}/ml-get-calendar.html[GET /calendars/+++]: Get calendar details -* {ref}/ml-get-calendar-event.html[GET /calendars/+++/events]: Get scheduled event details -* {ref}/ml-delete-calendar-event.html[DELETE /calendars/+++/events/+++]: Remove a scheduled event from a calendar -* {ref}/ml-delete-calendar-job.html[DELETE /calendars/+++/jobs/+++]: Disassociate a job from a calendar -* {ref}/ml-delete-calendar.html[DELETE /calendars/+++]: Delete a calendar - -[float] -[[ml-api-filters]] -=== /filters/ - -* {ref}/ml-put-filter.html[PUT /filters/+++]: Create a filter -* {ref}/ml-update-filter.html[POST /filters/+++/_update]: Update a filter -* {ref}/ml-get-filter.html[GET /filters/+++]: List filters -* {ref}/ml-delete-filter.html[DELETE /filter/+++]: Delete a filter - -[float] -[[ml-api-datafeeds]] -=== /datafeeds/ - -* {ref}/ml-put-datafeed.html[PUT /datafeeds/+++]: Create a {dfeed} -* {ref}/ml-start-datafeed.html[POST /datafeeds//_start]: Start a {dfeed} -* {ref}/ml-get-datafeed.html[GET /datafeeds]: List {dfeeds} -* {ref}/ml-get-datafeed.html[GET /datafeeds/+++]: Get {dfeed} details -* {ref}/ml-get-datafeed-stats.html[GET /datafeeds//_stats]: Get statistical information for {dfeeds} -* {ref}/ml-preview-datafeed.html[GET /datafeeds//_preview]: Get a preview of a {dfeed} -* {ref}/ml-update-datafeed.html[POST /datafeeds//_update]: Update certain settings for a {dfeed} -* {ref}/ml-stop-datafeed.html[POST /datafeeds//_stop]: Stop a {dfeed} -* {ref}/ml-delete-datafeed.html[DELETE /datafeeds/+++]: Delete {dfeed} - -[float] -[[ml-api-results]] -=== /results/ - -* {ref}/ml-get-bucket.html[GET /results/buckets]: List the buckets in the results -* {ref}/ml-get-bucket.html[GET /results/buckets/+++]: Get bucket details -* {ref}/ml-get-overall-buckets.html[GET /results/overall_buckets]: Get overall bucket results for multiple jobs -* {ref}/ml-get-category.html[GET /results/categories]: List the categories in the results -* {ref}/ml-get-category.html[GET /results/categories/+++]: Get category details -* {ref}/ml-get-influencer.html[GET /results/influencers]: Get influencer details -* {ref}/ml-get-record.html[GET /results/records]: Get records from the results - -[float] -[[ml-api-snapshots]] -=== /model_snapshots/ - -* {ref}/ml-get-snapshot.html[GET /model_snapshots]: List model snapshots -* {ref}/ml-get-snapshot.html[GET /model_snapshots/+++]: Get model snapshot details -* {ref}/ml-revert-snapshot.html[POST /model_snapshots//_revert]: Revert a model snapshot -* {ref}/ml-update-snapshot.html[POST /model_snapshots//_update]: Update certain settings for a model snapshot -* {ref}/ml-delete-snapshot.html[DELETE /model_snapshots/+++]: Delete a model snapshot - -//// -[float] -[[ml-api-validate]] -=== /validate/ - -* {ref}/ml-valid-detector.html[POST /anomaly_detectors/_validate/detector]: Validate a detector -* {ref}/ml-valid-job.html[POST /anomaly_detectors/_validate]: Validate a job -//// diff --git a/x-pack/docs/en/rest-api/defs.asciidoc b/x-pack/docs/en/rest-api/defs.asciidoc deleted file mode 100644 index 349ce343c7a..00000000000 --- a/x-pack/docs/en/rest-api/defs.asciidoc +++ /dev/null @@ -1,33 +0,0 @@ -[role="xpack"] -[[ml-api-definitions]] -== Definitions - -These resource definitions are used in {ml} APIs and in {kib} advanced -job configuration options. - -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> - -[role="xpack"] -include::ml/calendarresource.asciidoc[] -[role="xpack"] -include::ml/datafeedresource.asciidoc[] -[role="xpack"] -include::ml/filterresource.asciidoc[] -[role="xpack"] -include::ml/jobresource.asciidoc[] -[role="xpack"] -include::ml/jobcounts.asciidoc[] -[role="xpack"] -include::ml/snapshotresource.asciidoc[] -[role="xpack"] -include::ml/resultsresource.asciidoc[] -[role="xpack"] -include::ml/eventresource.asciidoc[] diff --git a/x-pack/docs/en/rest-api/security.asciidoc b/x-pack/docs/en/rest-api/security.asciidoc index f5b0c8eef66..3ba582d5d78 100644 --- a/x-pack/docs/en/rest-api/security.asciidoc +++ b/x-pack/docs/en/rest-api/security.asciidoc @@ -6,18 +6,39 @@ You can use the following APIs to perform {security} activities. * <> * <> -* <> -* <> +* <> * <> +[float] +[[security-api-app-privileges]] +=== Application privileges + +You can use the following APIs to add, update, retrieve, and remove application +privileges: + +* <> +* <> +* <> + +[float] +[[security-role-mapping-apis]] +=== Role mappings + +You can use the following APIs to add, remove, update, and retrieve role mappings: + +* <> +* <> +* <> + [float] [[security-role-apis]] === Roles -You can use the following APIs to add, remove, and retrieve roles in the native realm: +You can use the following APIs to add, remove, update, and retrieve roles in the native realm: -* <>, <> +* <> * <> +* <> * <> [float] @@ -27,34 +48,43 @@ You can use the following APIs to add, remove, and retrieve roles in the native You can use the following APIs to create and invalidate bearer tokens for access without requiring basic authentication: -* <>, <> +* <> +* <> [float] [[security-user-apis]] === Users -You can use the following APIs to create, read, update, and delete users from the +You can use the following APIs to add, remove, update, or retrieve users in the native realm: -* <>, <> -* <>, <> +* <> * <> +* <> +* <> +* <> * <> + +include::security/put-app-privileges.asciidoc[] include::security/authenticate.asciidoc[] include::security/change-password.asciidoc[] include::security/clear-cache.asciidoc[] +include::security/create-role-mappings.asciidoc[] include::security/clear-roles-cache.asciidoc[] include::security/create-roles.asciidoc[] include::security/create-users.asciidoc[] +include::security/delete-app-privileges.asciidoc[] +include::security/delete-role-mappings.asciidoc[] include::security/delete-roles.asciidoc[] include::security/delete-tokens.asciidoc[] include::security/delete-users.asciidoc[] include::security/disable-users.asciidoc[] include::security/enable-users.asciidoc[] +include::security/get-app-privileges.asciidoc[] +include::security/get-role-mappings.asciidoc[] include::security/get-roles.asciidoc[] include::security/get-tokens.asciidoc[] include::security/get-users.asciidoc[] -include::security/privileges.asciidoc[] -include::security/role-mapping.asciidoc[] +include::security/has-privileges.asciidoc[] include::security/ssl.asciidoc[] diff --git a/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc b/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc new file mode 100644 index 00000000000..87dedbba4f7 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc @@ -0,0 +1,239 @@ +[role="xpack"] +[[security-api-put-role-mapping]] +=== Create or update role mappings API + +Creates and updates role mappings. + +==== Request + +`POST /_xpack/security/role_mapping/` + + +`PUT /_xpack/security/role_mapping/` + + +==== Description + +Role mappings define which roles are assigned to each user. Each mapping has +_rules_ that identify users and a list of _roles_ that are +granted to those users. + +NOTE: This API does not create roles. Rather, it maps users to existing roles. +Roles can be created by using <> or +{stack-ov}/defining-roles.html#roles-management-file[roles files]. + +For more information, see +{stack-ov}/mapping-roles.html[Mapping users and groups to roles]. + + +==== Path Parameters + +`name`:: + (string) The distinct name that identifies the role mapping. The name is + used solely as an identifier to facilitate interaction via the API; it does + not affect the behavior of the mapping in any way. + + +==== Request Body + +The following parameters can be specified in the body of a PUT or POST request +and pertain to adding a role mapping: + +`enabled` (required):: +(boolean) Mappings that have `enabled` set to `false` are ignored when role +mapping is performed. + +`metadata`:: +(object) Additional metadata that helps define which roles are assigned to each +user. Within the `metadata` object, keys beginning with `_` are reserved for +system usage. + +`roles` (required):: +(list) A list of roles that are granted to the users that match the role mapping +rules. + +`rules` (required):: +(object) The rules that determine which users should be matched by the mapping. +A rule is a logical condition that is expressed by using a JSON DSL. See +<>. + + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster privilege. + + +==== Examples + +The following example assigns the "user" role to all users: + +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping1 +{ + "roles": [ "user"], + "enabled": true, <1> + "rules": { + "field" : { "username" : "*" } + }, + "metadata" : { <2> + "version" : 1 + } +} +------------------------------------------------------------ +// CONSOLE +<1> Mappings that have `enabled` set to `false` are ignored when role mapping + is performed. +<2> Metadata is optional. + +A successful call returns a JSON structure that shows whether the mapping has +been created or updated. + +[source,js] +-------------------------------------------------- +{ + "role_mapping" : { + "created" : true <1> + } +} +-------------------------------------------------- +// TESTRESPONSE +<1> When an existing mapping is updated, `created` is set to false. + +The following example assigns the "user" and "admin" roles to specific users: + +[source,js] +-------------------------------------------------- +POST /_xpack/security/role_mapping/mapping2 +{ + "roles": [ "user", "admin" ], + "enabled": true, + "rules": { + "field" : { "username" : [ "esadmin01", "esadmin02" ] } + } +} +-------------------------------------------------- +// CONSOLE + +The following example matches any user where either the username is `esadmin` +or the user is in the `cn=admin,dc=example,dc=com` group: + +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping3 +{ + "roles": [ "superuser" ], + "enabled": true, + "rules": { + "any": [ + { + "field": { + "username": "esadmin" + } + }, + { + "field": { + "groups": "cn=admins,dc=example,dc=com" + } + } + ] + } +} +------------------------------------------------------------ +// CONSOLE + +The following example matches users who authenticated against a specific realm: +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping4 +{ + "roles": [ "ldap-user" ], + "enabled": true, + "rules": { + "field" : { "realm.name" : "ldap1" } + } +} +------------------------------------------------------------ +// CONSOLE + +The following example matches users within a specific LDAP sub-tree: + +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping5 +{ + "roles": [ "example-user" ], + "enabled": true, + "rules": { + "field" : { "dn" : "*,ou=subtree,dc=example,dc=com" } + } +} +------------------------------------------------------------ +// CONSOLE + +The following example matches users within a particular LDAP sub-tree in a +specific realm: + +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping6 +{ + "roles": [ "ldap-example-user" ], + "enabled": true, + "rules": { + "all": [ + { "field" : { "dn" : "*,ou=subtree,dc=example,dc=com" } }, + { "field" : { "realm.name" : "ldap1" } } + ] + } +} +------------------------------------------------------------ +// CONSOLE + +The rules can be more complex and include wildcard matching. For example, the +following mapping matches any user where *all* of these conditions are met: + +- the _Distinguished Name_ matches the pattern `*,ou=admin,dc=example,dc=com`, + or the username is `es-admin`, or the username is `es-system` +- the user in in the `cn=people,dc=example,dc=com` group +- the user does not have a `terminated_date` + + +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping7 +{ + "roles": [ "superuser" ], + "enabled": true, + "rules": { + "all": [ + { + "any": [ + { + "field": { + "dn": "*,ou=admin,dc=example,dc=com" + } + }, + { + "field": { + "username": [ "es-admin", "es-system" ] + } + } + ] + }, + { + "field": { + "groups": "cn=people,dc=example,dc=com" + } + }, + { + "except": { + "field": { + "metadata.terminated_date": null + } + } + } + ] + } +} +------------------------------------------------------------ +// CONSOLE diff --git a/x-pack/docs/en/rest-api/security/create-roles.asciidoc b/x-pack/docs/en/rest-api/security/create-roles.asciidoc index 749676b4e83..fc3c613557e 100644 --- a/x-pack/docs/en/rest-api/security/create-roles.asciidoc +++ b/x-pack/docs/en/rest-api/security/create-roles.asciidoc @@ -1,8 +1,8 @@ [role="xpack"] [[security-api-put-role]] -=== Create roles API +=== Create or update roles API -Adds roles in the native realm. +Adds and updates roles in the native realm. ==== Request @@ -29,9 +29,20 @@ file-based role management. For more information about the native realm, see The following parameters can be specified in the body of a PUT or POST request and pertain to adding a role: +`applications`:: (list) A list of application privilege entries. +`application` (required)::: (string) The name of the application to which this entry applies +`privileges`::: (list) A list of strings, where each element is the name of an application +privilege or action. +`resources`::: (list) A list resources to which the privileges are applied. + `cluster`:: (list) A list of cluster privileges. These privileges define the cluster level actions that users with this role are able to execute. +`global`:: (object) An object defining global privileges. A global privilege is +a form of cluster privilege that is request-aware. Support for global privileges +is currently limited to the management of application privileges. +This field is optional. + `indices`:: (list) A list of indices permissions entries. `field_security`::: (list) The document fields that the owners of the role have read access to. For more information, see @@ -79,6 +90,13 @@ POST /_xpack/security/role/my_admin_role "query": "{\"match\": {\"title\": \"foo\"}}" // optional } ], + "applications": [ + { + "application": "myapp", + "privileges": [ "admin", "read" ], + "resources": [ "*" ] + } + ], "run_as": [ "other_user" ], // optional "metadata" : { // optional "version" : 1 diff --git a/x-pack/docs/en/rest-api/security/create-users.asciidoc b/x-pack/docs/en/rest-api/security/create-users.asciidoc index 5015d0401c2..789e8c7e80d 100644 --- a/x-pack/docs/en/rest-api/security/create-users.asciidoc +++ b/x-pack/docs/en/rest-api/security/create-users.asciidoc @@ -1,8 +1,8 @@ [role="xpack"] [[security-api-put-user]] -=== Create users API +=== Create or update users API -Creates and updates users in the native realm. These users are commonly referred +Adds and updates users in the native realm. These users are commonly referred to as _native users_. @@ -91,8 +91,9 @@ created or updated. -------------------------------------------------- { "user": { - "created" : true <1> - } + "created" : true + }, + "created": true <1> } -------------------------------------------------- // TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/security/delete-app-privileges.asciidoc b/x-pack/docs/en/rest-api/security/delete-app-privileges.asciidoc new file mode 100644 index 00000000000..d7f001721b1 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/delete-app-privileges.asciidoc @@ -0,0 +1,59 @@ +[role="xpack"] +[[security-api-delete-privilege]] +=== Delete application privileges API + +Removes +{stack-ov}/security-privileges.html#application-privileges[application privileges]. + +==== Request + +`DELETE /_xpack/security/privilege//` + +//==== Description + +==== Path Parameters + +`application` (required):: + (string) The name of the application. Application privileges are always + associated with exactly one application. + +`privilege` (required):: + (string) The name of the privilege. + +// ==== Request Body + +==== Authorization + +To use this API, you must have either: + +- the `manage_security` cluster privilege (or a greater privilege such as `all`); _or_ +- the _"Manage Application Privileges"_ global privilege for the application being referenced + in the request + +==== Examples + +The following example deletes the `read` application privilege from the +`myapp` application: + +[source,js] +-------------------------------------------------- +DELETE /_xpack/security/privilege/myapp/read +-------------------------------------------------- +// CONSOLE +// TEST[setup:app0102_privileges] + +If the role is successfully deleted, the request returns `{"found": true}`. +Otherwise, `found` is set to false. + +[source,js] +-------------------------------------------------- +{ + "myapp": { + "read": { + "found" : true + } + } +} +-------------------------------------------------- +// TESTRESPONSE + diff --git a/x-pack/docs/en/rest-api/security/delete-role-mappings.asciidoc b/x-pack/docs/en/rest-api/security/delete-role-mappings.asciidoc new file mode 100644 index 00000000000..dc9bf2ba109 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/delete-role-mappings.asciidoc @@ -0,0 +1,50 @@ +[role="xpack"] +[[security-api-delete-role-mapping]] +=== Delete role mappings API + +Removes role mappings. + +==== Request + +`DELETE /_xpack/security/role_mapping/` + +==== Description + +Role mappings define which roles are assigned to each user. For more information, +see {stack-ov}/mapping-roles.html[Mapping users and groups to roles]. + +==== Path Parameters + +`name`:: + (string) The distinct name that identifies the role mapping. The name is + used solely as an identifier to facilitate interaction via the API; it does + not affect the behavior of the mapping in any way. + +//==== Request Body + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster privilege. + + +==== Examples + +The following example delete a role mapping: + +[source,js] +-------------------------------------------------- +DELETE /_xpack/security/role_mapping/mapping1 +-------------------------------------------------- +// CONSOLE +// TEST[setup:role_mapping] + +If the mapping is successfully deleted, the request returns `{"found": true}`. +Otherwise, `found` is set to false. + +[source,js] +-------------------------------------------------- +{ + "found" : true +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/security/get-app-privileges.asciidoc b/x-pack/docs/en/rest-api/security/get-app-privileges.asciidoc new file mode 100644 index 00000000000..5412a4bdceb --- /dev/null +++ b/x-pack/docs/en/rest-api/security/get-app-privileges.asciidoc @@ -0,0 +1,94 @@ +[role="xpack"] +[[security-api-get-privileges]] +=== Get application privileges API + +Retrieves +{stack-ov}/security-privileges.html#application-privileges[application privileges]. + +==== Request + +`GET /_xpack/security/privilege` + + +`GET /_xpack/security/privilege/` + + +`GET /_xpack/security/privilege//` + + +==== Description + +To check a user's application privileges, use the +<>. + + +==== Path Parameters + +`application`:: + (string) The name of the application. Application privileges are always + associated with exactly one application. + If you do not specify this parameter, the API returns information about all + privileges for all applications. + +`privilege`:: + (string) The name of the privilege. If you do not specify this parameter, the + API returns information about all privileges for the requested application. + +//==== Request Body + +==== Authorization + +To use this API, you must have either: + +- the `manage_security` cluster privilege (or a greater privilege such as `all`); _or_ +- the _"Manage Application Privileges"_ global privilege for the application being referenced + in the request + +==== Examples + +The following example retrieves information about the `read` privilege for the +`app01` application: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/privilege/myapp/read +-------------------------------------------------- +// CONSOLE +// TEST[setup:app0102_privileges] + +A successful call returns an object keyed by application name and privilege +name. If the privilege is not defined, the request responds with a 404 status. + +[source,js] +-------------------------------------------------- +{ + "myapp": { + "read": { + "application": "myapp", + "name": "read", + "actions": [ + "data:read/*", + "action:login" + ], + "metadata": { + "description": "Read access to myapp" + } + } + } +} +-------------------------------------------------- +// TESTRESPONSE + +To retrieve all privileges for an application, omit the privilege name: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/privilege/myapp/ +-------------------------------------------------- +// CONSOLE + +To retrieve every privilege, omit both the application and privilege names: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/privilege/ +-------------------------------------------------- +// CONSOLE diff --git a/x-pack/docs/en/rest-api/security/get-role-mappings.asciidoc b/x-pack/docs/en/rest-api/security/get-role-mappings.asciidoc new file mode 100644 index 00000000000..7abe34b32f5 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/get-role-mappings.asciidoc @@ -0,0 +1,74 @@ +[role="xpack"] +[[security-api-get-role-mapping]] +=== Get role mappings API + +Retrieves role mappings. + +==== Request + +`GET /_xpack/security/role_mapping` + + +`GET /_xpack/security/role_mapping/` + +==== Description + +Role mappings define which roles are assigned to each user. For more information, +see {stack-ov}/mapping-roles.html[Mapping users and groups to roles]. + +==== Path Parameters + +`name`:: + (string) The distinct name that identifies the role mapping. The name is + used solely as an identifier to facilitate interaction via the API; it does + not affect the behavior of the mapping in any way. You can specify multiple + mapping names as a comma-separated list. If you do not specify this + parameter, the API returns information about all role mappings. + +//==== Request Body + +==== Results + +A successful call retrieves an object, where the keys are the +names of the request mappings, and the values are the JSON representation of +those mappings. For more information, see +<>. + +If there is no mapping with the requested name, the +response will have status code `404`. + + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster privilege. + + +==== Examples + +The following example retrieves information about the `mapping1` role mapping: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/role_mapping/mapping1 +-------------------------------------------------- +// CONSOLE +// TEST[setup:role_mapping] + + +[source,js] +-------------------------------------------------- +{ + "mapping1": { + "enabled": true, + "roles": [ + "user" + ], + "rules": { + "field": { + "username": "*" + } + }, + "metadata": {} + } +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/security/get-tokens.asciidoc b/x-pack/docs/en/rest-api/security/get-tokens.asciidoc index a2c4e6d7a37..c80b4f60c6b 100644 --- a/x-pack/docs/en/rest-api/security/get-tokens.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-tokens.asciidoc @@ -38,16 +38,19 @@ The following parameters can be specified in the body of a POST request and pertain to creating a token: `grant_type`:: -(string) The type of grant. Valid grant types are: `password` and `refresh_token`. +(string) The type of grant. Supported grant types are: `password`, +`client_credentials` and `refresh_token`. `password`:: (string) The user's password. If you specify the `password` grant type, this -parameter is required. +parameter is required. This parameter is not valid with any other supported +grant type. `refresh_token`:: (string) If you specify the `refresh_token` grant type, this parameter is required. It contains the string that was returned when you created the token -and enables you to extend its life. +and enables you to extend its life. This parameter is not valid with any other +supported grant type. `scope`:: (string) The scope of the token. Currently tokens are only issued for a scope of @@ -55,11 +58,48 @@ and enables you to extend its life. `username`:: (string) The username that identifies the user. If you specify the `password` -grant type, this parameter is required. +grant type, this parameter is required. This parameter is not valid with any +other supported grant type. ==== Examples -The following example obtains a token for the `test_admin` user: +The following example obtains a token using the `client_credentials` grant type, +which simply creates a token as the authenticated user: + +[source,js] +-------------------------------------------------- +POST /_xpack/security/oauth2/token +{ + "grant_type" : "client_credentials" +} +-------------------------------------------------- +// CONSOLE + +The following example output contains the access token, the amount of time (in +seconds) that the token expires in, and the type: + +[source,js] +-------------------------------------------------- +{ + "access_token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", + "type" : "Bearer", + "expires_in" : 1200 +} +-------------------------------------------------- +// TESTRESPONSE[s/dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==/$body.access_token/] + +The token returned by this API can be used by sending a request with a +`Authorization` header with a value having the prefix `Bearer ` followed +by the value of the `access_token`. + +[source,shell] +-------------------------------------------------- +curl -H "Authorization: Bearer dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==" http://localhost:9200/_cluster/health +-------------------------------------------------- +// NOTCONSOLE + +The following example obtains a token for the `test_admin` user using the +`password` grant type: [source,js] -------------------------------------------------- @@ -73,7 +113,7 @@ POST /_xpack/security/oauth2/token // CONSOLE The following example output contains the access token, the amount of time (in -seconds) that the token expires in, and the type: +seconds) that the token expires in, the type, and the refresh token: [source,js] -------------------------------------------------- @@ -87,19 +127,10 @@ seconds) that the token expires in, and the type: // TESTRESPONSE[s/dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==/$body.access_token/] // TESTRESPONSE[s/vLBPvmAB6KvwvJZr27cS/$body.refresh_token/] -The token returned by this API can be used by sending a request with a -`Authorization` header with a value having the prefix `Bearer ` followed -by the value of the `access_token`. - -[source,shell] --------------------------------------------------- -curl -H "Authorization: Bearer dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==" http://localhost:9200/_cluster/health --------------------------------------------------- -// NOTCONSOLE - [[security-api-refresh-token]] -To extend the life of an existing token, you can call the API again with the -refresh token within 24 hours of the token's creation. For example: +To extend the life of an existing token obtained using the `password` grant type, +you can call the API again with the refresh token within 24 hours of the token's +creation. For example: [source,js] -------------------------------------------------- diff --git a/x-pack/docs/en/rest-api/security/privileges.asciidoc b/x-pack/docs/en/rest-api/security/has-privileges.asciidoc similarity index 69% rename from x-pack/docs/en/rest-api/security/privileges.asciidoc rename to x-pack/docs/en/rest-api/security/has-privileges.asciidoc index adaf27e9707..cae1bc4d303 100644 --- a/x-pack/docs/en/rest-api/security/privileges.asciidoc +++ b/x-pack/docs/en/rest-api/security/has-privileges.asciidoc @@ -1,6 +1,6 @@ [role="xpack"] -[[security-api-privileges]] -=== Privilege APIs +[[security-api-has-privileges]] +=== Has Privileges API [[security-api-has-privilege]] @@ -15,7 +15,7 @@ a specified list of privileges. ==== Description For a list of the privileges that you can specify in this API, -see {xpack-ref}/security-privileges.html[Security Privileges]. +see {stack-ov}/security-privileges.html[Security privileges]. A successful call returns a JSON structure that shows whether each specified privilege is assigned to the user. @@ -30,6 +30,14 @@ privilege is assigned to the user. `privileges`::: (list) A list of the privileges that you want to check for the specified indices. +`application`:: +`application`::: (string) The name of the application. +`privileges`::: (list) A list of the privileges that you want to check for the +specified resources. May be either application privilege names, or the names of +actions that are granted by those privileges +`resources`::: (list) A list of resource names against which the privileges +should be checked + ==== Authorization All users can use this API, but only to determine their own privileges. @@ -41,7 +49,7 @@ more information, see ==== Examples The following example checks whether the current user has a specific set of -cluster and indices privileges: +cluster, index, and application privileges: [source,js] -------------------------------------------------- @@ -57,6 +65,13 @@ GET _xpack/security/user/_has_privileges "names": [ "inventory" ], "privileges" : [ "read", "write" ] } + ], + "application": [ + { + "application": "inventory_manager", + "privileges" : [ "read", "data:write/inventory" ], + "resources" : [ "product/1852563" ] + } ] } -------------------------------------------------- @@ -85,7 +100,14 @@ The following example output indicates which privileges the "rdeniro" user has: "write" : false } }, - "application" : {} + "application" : { + "inventory_manager" : { + "product/1852563" : { + "read": false, + "data:write/inventory": false + } + } + } } -------------------------------------------------- // TESTRESPONSE[s/"rdeniro"/"$body.username"/] diff --git a/x-pack/docs/en/rest-api/security/put-app-privileges.asciidoc b/x-pack/docs/en/rest-api/security/put-app-privileges.asciidoc new file mode 100644 index 00000000000..f715a80014b --- /dev/null +++ b/x-pack/docs/en/rest-api/security/put-app-privileges.asciidoc @@ -0,0 +1,163 @@ +[role="xpack"] +[[security-api-put-privileges]] +=== Create or update application privileges API + +Adds or updates +{stack-ov}/security-privileges.html#application-privileges[application privileges]. + +==== Request + +`POST /_xpack/security/privilege` + + +`PUT /_xpack/security/privilege` + + +==== Description + +This API creates or updates privileges. To remove privileges, use the +<>. + +For more information, see +{stack-ov}/defining-roles.html#roles-application-priv[Application privileges]. + +To check a user's application privileges, use the +<>. + +==== Request Body + +The body is a JSON object where the names of the fields are the application +names and the value of each field is an object. The fields in this inner +object are the names of the privileges and each value is a JSON object that +includes the following fields: + +`actions`:: (array-of-string) A list of action names that are granted by this +privilege. This field must exist and cannot be an empty array. + +`metadata`:: (object) Optional meta-data. Within the `metadata` object, keys +that begin with `_` are reserved for system usage. + + +[[security-api-app-privileges-validation]] +==== Validation + +Application Names:: + Application names are formed from a _prefix_, with an optional _suffix_ that + conform to the following rules: + * The prefix must begin with a lowercase ASCII letter + * The prefix must contain only ASCII letters or digits + * The prefix must be at least 3 characters long + * If the suffix exists, it must begin with either `-` or `_` + * The suffix cannot contain any of the following characters: + `\\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `*` + * No part of the name can contain whitespace. + +Privilege Names:: + Privilege names must begin with a lowercase ASCII letter and must contain + only ASCII letters and digits along with the characters `_`, `-` and `.` + +Action Names:: + Action names can contain any number of printable ASCII characters and must + contain at least one of the following characters: `/` `*`, `:` + +==== Authorization + +To use this API, you must have either: + +- the `manage_security` cluster privilege (or a greater privilege such as `all`); _or_ +- the _"Manage Application Privileges"_ global privilege for the application being referenced + in the request + +==== Examples + +To add a single privilege, submit a PUT or POST request to the +`/_xpack/security/privilege//` endpoint. For example: + +[source,js] +-------------------------------------------------- +PUT /_xpack/security/privilege +{ + "myapp": { + "read": { + "actions": [ <1> + "data:read/*" , <2> + "action:login" ], + "metadata": { <3> + "description": "Read access to myapp" + } + } + } +} +-------------------------------------------------- +// CONSOLE +<1> These strings have significance within the "myapp" application. {es} does not + assign any meaning to them. +<2> The use of a wildcard here (`*`) means that this privilege grants access to + all actions that start with `data:read/`. {es} does not assign any meaning + to these actions. However, if the request includes an application privilege + such as `data:read/users` or `data:read/settings`, the + <> respects the use of a + wildcard and returns `true`. +<3> The metadata object is optional. + +A successful call returns a JSON structure that shows whether the privilege has +been created or updated. + +[source,js] +-------------------------------------------------- +{ + "myapp": { + "read": { + "created": true <1> + } + } +} +-------------------------------------------------- +// TESTRESPONSE +<1> When an existing privilege is updated, `created` is set to false. + +To add multiple privileges, submit a POST request to the +`/_xpack/security/privilege/` endpoint. For example: + +[source,js] +-------------------------------------------------- +PUT /_xpack/security/privilege +{ + "app01": { + "read": { + "actions": [ "action:login", "data:read/*" ] + }, + "write": { + "actions": [ "action:login", "data:write/*" ] + } + }, + "app02": { + "all": { + "actions": [ "*" ] + } + } +} +-------------------------------------------------- +// CONSOLE + +A successful call returns a JSON structure that shows whether the privileges +have been created or updated. + +[source,js] +-------------------------------------------------- +{ + "app02": { + "all": { + "created": true + } + }, + "app01": { + "read": { + "created": true + }, + "write": { + "created": true + } + } +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc b/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc new file mode 100644 index 00000000000..be4afc57a1a --- /dev/null +++ b/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc @@ -0,0 +1,89 @@ +[role="xpack"] +[[role-mapping-resources]] +=== Role mapping resources + +A role mapping resource has the following properties: + +`enabled`:: +(boolean) Mappings that have `enabled` set to `false` are ignored when role +mapping is performed. + +`metadata`:: +(object) Additional metadata that helps define which roles are assigned to each +user. Within the `metadata` object, keys beginning with `_` are reserved for +system usage. + +`roles`:: +(list) A list of roles that are granted to the users that match the role mapping +rules. + +`rules`:: +(object) The rules that determine which users should be matched by the mapping. +A rule is a logical condition that is expressed by using a JSON DSL. The DSL supports the following rule types: +`any`::: +(array of rules) If *any* of its children are true, it evaluates to `true`. +`all`::: +(array of rules) If *all* of its children are true, it evaluates to `true`. +`field`::: +(object) See <>. +`except`:: +(object) A single rule as an object. Only valid as a child of an `all` rule. If +its child is `false`, the `except` is `true`. + + +[float] +[[mapping-roles-rule-field]] +==== Field rules + +The `field` rule is the primary building block for a role mapping expression. +It takes a single object as its value and that object must contain a single +member with key _F_ and value _V_. The field rule looks up the value of _F_ +within the user object and then tests whether the user value _matches_ the +provided value _V_. + +The value specified in the field rule can be one of the following types: +[cols="2,5,3m"] +|======================= +| Type | Description | Example + +| Simple String | Exactly matches the provided value. | "esadmin" +| Wildcard String | Matches the provided value using a wildcard. | "*,dc=example,dc=com" +| Regular Expression | Matches the provided value using a + {ref}/query-dsl-regexp-query.html#regexp-syntax[Lucene regexp]. | "/.\*-admin[0-9]*/" +| Number | Matches an equivalent numerical value. | 7 +| Null | Matches a null or missing value. | null +| Array | Tests each element in the array in + accordance with the above definitions. + If _any_ of elements match, the match is successful. | ["admin", "operator"] +|======================= + +[float] +===== User fields + +The _user object_ against which rules are evaluated has the following fields: + +`username`:: +(string) The username by which {security} knows this user. For example, `"username": "jsmith"`. +`dn`:: +(string) The _Distinguished Name_ of the user. For example, `"dn": "cn=jsmith,ou=users,dc=example,dc=com",`. +`groups`:: +(array of strings) The groups to which the user belongs. For example, `"groups" : [ "cn=admin,ou=groups,dc=example,dc=com","cn=esusers,ou=groups,dc=example,dc=com ]`. +`metadata`:: +(object) Additional metadata for the user. For example, `"metadata": { "cn": "John Smith" }`. +`realm`:: +(object) The realm that authenticated the user. The only field in this object is the realm name. For example, `"realm": { "name": "ldap1" }`. + +The `groups` field is multi-valued; a user can belong to many groups. When a +`field` rule is applied against a multi-valued field, it is considered to match +if _at least one_ of the member values matches. For example, the following rule +matches any user who is a member of the `admin` group, regardless of any +other groups they belong to: + +[source, js] +------------------------------------------------------------ +{ "field" : { "groups" : "admin" } } +------------------------------------------------------------ +// NOTCONSOLE + +For additional realm-specific details, see +{stack-ov}/mapping-roles.html#ldap-role-mapping[Mapping Users and Groups to Roles]. diff --git a/x-pack/docs/en/rest-api/security/role-mapping.asciidoc b/x-pack/docs/en/rest-api/security/role-mapping.asciidoc deleted file mode 100644 index c8006346d4e..00000000000 --- a/x-pack/docs/en/rest-api/security/role-mapping.asciidoc +++ /dev/null @@ -1,404 +0,0 @@ -[role="xpack"] -[[security-api-role-mapping]] -=== Role Mapping APIs - -The Role Mapping API enables you to add, remove, and retrieve role mappings. - -==== Request - -`GET /_xpack/security/role_mapping` + - -`GET /_xpack/security/role_mapping/` + - -`DELETE /_xpack/security/role_mapping/` + - -`POST /_xpack/security/role_mapping/` + - -`PUT /_xpack/security/role_mapping/` - -==== Description - -Role mappings have _rules_ that identify users and a list of _roles_ that are -granted to those users. - -NOTE: This API does not create roles. Rather, it maps users to existing roles. -Roles can be created by using <> or -{xpack-ref}/defining-roles.html#roles-management-file[roles files]. - -The role mapping rule is a logical condition that is expressed using a JSON DSL. -The DSL supports the following rule types: - -|======================= -| Type | Value Type (child) | Description - -| `any` | An array of rules | If *any* of its children are true, it - evaluates to `true`. -| `all` | An array of rules | If *all* of its children are true, it - evaluates to `true`. -| `field` | An object | See <> -| `except` | A single rule as an object | Only valid as a child of an `all` - rule. If its child is `false`, the - `except` is `true`. -|======================= - -[float] -[[mapping-roles-rule-field]] -===== The Field Rule - -The `field` rule is the primary building block for a role-mapping expression. -It takes a single object as its value and that object must contain a single -member with key _F_ and value _V_. The field rule looks up the value of _F_ -within the user object and then tests whether the user value _matches_ the -provided value _V_. - -The value specified in the field rule can be one of the following types: -[cols="2,5,3m"] -|======================= -| Type | Description | Example - -| Simple String | Exactly matches the provided value. | "esadmin" -| Wildcard String | Matches the provided value using a wildcard. | "*,dc=example,dc=com" -| Regular Expression | Matches the provided value using a - {ref}/query-dsl-regexp-query.html#regexp-syntax[Lucene regexp]. | "/.\*-admin[0-9]*/" -| Number | Matches an equivalent numerical value. | 7 -| Null | Matches a null or missing value. | null -| Array | Tests each element in the array in - accordance with the above definitions. - If _any_ of elements match, the match is successful. | ["admin", "operator"] -|======================= - -===== User Fields - -The _user object_ against which rules are evaluated has the following fields: -[cols="1s,,,m"] -|======================= -| Name | Type | Description | Example - -| username | string | The username by which {security} knows this user. | `"username": "jsmith"` -| dn | string | The _Distinguished Name_ of the user. | `"dn": "cn=jsmith,ou=users,dc=example,dc=com",` -| groups | array-of-string | The groups to which the user belongs. | `"groups" : [ "cn=admin,ou=groups,dc=example,dc=com", -"cn=esusers,ou=groups,dc=example,dc=com ]` -| metadata | object | Additional metadata for the user. | `"metadata": { "cn": "John Smith" }` -| realm | object | The realm that authenticated the user. The only field in this object is the realm name. | `"realm": { "name": "ldap1" }` -|======================= - -The `groups` field is multi-valued; a user can belong to many groups. When a -`field` rule is applied against a multi-valued field, it is considered to match -if _at least one_ of the member values matches. For example, the following rule -matches any user who is a member of the `admin` group, regardless of any -other groups they belong to: - -[source, js] ------------------------------------------------------------- -{ "field" : { "groups" : "admin" } } ------------------------------------------------------------- -// NOTCONSOLE - -For additional realm-specific details, see -{xpack-ref}/mapping-roles.html#ldap-role-mapping[Mapping Users and Groups to Roles]. - - -==== Path Parameters - -`name`:: - (string) The distinct name that identifies the role mapping. The name is - used solely as an identifier to facilitate interaction via the API; it does - not affect the behavior of the mapping in any way. If you do not specify this - parameter for the Get Role Mappings API, it returns information about all - role mappings. - - -==== Request Body - -The following parameters can be specified in the body of a PUT or POST request -and pertain to adding a role mapping: - -`enabled` (required):: -(boolean) Mappings that have `enabled` set to `false` are ignored when role -mapping is performed. - -`metadata`:: -(object) Additional metadata that helps define which roles are assigned to each -user. Within the `metadata` object, keys beginning with `_` are reserved for -system usage. - -`roles` (required):: -(list) A list of roles that are granted to the users that match the role-mapping -rules. - -`rules` (required):: -(object) The rules that determine which users should be matched by the mapping. -A rule is a logical condition that is expressed by using a JSON DSL. - - -==== Authorization - -To use this API, you must have at least the `manage_security` cluster privilege. - - -==== Examples - -[[security-api-put-role-mapping]] -To add a role mapping, submit a PUT or POST request to the `/_xpack/security/role_mapping/` endpoint. The following example assigns -the "user" role to all users: - -[source, js] ------------------------------------------------------------- -POST /_xpack/security/role_mapping/mapping1 -{ - "roles": [ "user"], - "enabled": true, <1> - "rules": { - "field" : { "username" : "*" } - }, - "metadata" : { <2> - "version" : 1 - } -} ------------------------------------------------------------- -// CONSOLE -<1> Mappings that have `enabled` set to `false` are ignored when role mapping - is performed. -<2> Metadata is optional. - -A successful call returns a JSON structure that shows whether the mapping has -been created or updated. - -[source,js] --------------------------------------------------- -{ - "role_mapping" : { - "created" : true <1> - } -} --------------------------------------------------- -// TESTRESPONSE -<1> When an existing mapping is updated, `created` is set to false. - -The following example assigns the "user" and "admin" roles to specific users: - -[source,js] --------------------------------------------------- -POST /_xpack/security/role_mapping/mapping2 -{ - "roles": [ "user", "admin" ], - "enabled": true, - "rules": { - "field" : { "username" : [ "esadmin01", "esadmin02" ] } - } -} --------------------------------------------------- -// CONSOLE - -The following example matches any user where either the username is `esadmin` -or the user is in the `cn=admin,dc=example,dc=com` group: - -[source, js] ------------------------------------------------------------- -POST /_xpack/security/role_mapping/mapping3 -{ - "roles": [ "superuser" ], - "enabled": true, - "rules": { - "any": [ - { - "field": { - "username": "esadmin" - } - }, - { - "field": { - "groups": "cn=admins,dc=example,dc=com" - } - } - ] - } -} ------------------------------------------------------------- -// CONSOLE - -The following example matches users who authenticated against a specific realm: -[source, js] ------------------------------------------------------------- -POST /_xpack/security/role_mapping/mapping4 -{ - "roles": [ "ldap-user" ], - "enabled": true, - "rules": { - "field" : { "realm.name" : "ldap1" } - } -} ------------------------------------------------------------- -// CONSOLE - -The following example matches users within a specific LDAP sub-tree: - -[source, js] ------------------------------------------------------------- -POST /_xpack/security/role_mapping/mapping5 -{ - "roles": [ "example-user" ], - "enabled": true, - "rules": { - "field" : { "dn" : "*,ou=subtree,dc=example,dc=com" } - } -} ------------------------------------------------------------- -// CONSOLE - -The following example matches users within a particular LDAP sub-tree in a -specific realm: - -[source, js] ------------------------------------------------------------- -POST /_xpack/security/role_mapping/mapping6 -{ - "roles": [ "ldap-example-user" ], - "enabled": true, - "rules": { - "all": [ - { "field" : { "dn" : "*,ou=subtree,dc=example,dc=com" } }, - { "field" : { "realm.name" : "ldap1" } } - ] - } -} ------------------------------------------------------------- -// CONSOLE - -The rules can be more complex and include wildcard matching. For example, the -following mapping matches any user where *all* of these conditions are met: - -- the _Distinguished Name_ matches the pattern `*,ou=admin,dc=example,dc=com`, - or the username is `es-admin`, or the username is `es-system` -- the user in in the `cn=people,dc=example,dc=com` group -- the user does not have a `terminated_date` - - -[source, js] ------------------------------------------------------------- -POST /_xpack/security/role_mapping/mapping7 -{ - "roles": [ "superuser" ], - "enabled": true, - "rules": { - "all": [ - { - "any": [ - { - "field": { - "dn": "*,ou=admin,dc=example,dc=com" - } - }, - { - "field": { - "username": [ "es-admin", "es-system" ] - } - } - ] - }, - { - "field": { - "groups": "cn=people,dc=example,dc=com" - } - }, - { - "except": { - "field": { - "metadata.terminated_date": null - } - } - } - ] - } -} ------------------------------------------------------------- -// CONSOLE - -[[security-api-get-role-mapping]] -To retrieve a role mapping, issue a GET request to the -`/_xpack/security/role_mapping/` endpoint: - -[source,js] --------------------------------------------------- -GET /_xpack/security/role_mapping/mapping7 --------------------------------------------------- -// CONSOLE -// TEST[continued] - -A successful call retrieves an object, where the keys are the -names of the request mappings, and the values are -the JSON representation of those mappings. -If there is no mapping with the requested name, the -response will have status code `404`. - -[source,js] --------------------------------------------------- -{ - "mapping7": { - "enabled": true, - "roles": [ - "superuser" - ], - "rules": { - "all": [ - { - "any": [ - { - "field": { - "dn": "*,ou=admin,dc=example,dc=com" - } - }, - { - "field": { - "username": [ - "es-admin", - "es-system" - ] - } - } - ] - }, - { - "field": { - "groups": "cn=people,dc=example,dc=com" - } - }, - { - "except": { - "field": { - "metadata.terminated_date": null - } - } - } - ] - }, - "metadata": {} - } -} --------------------------------------------------- -// TESTRESPONSE - -You can specify multiple mapping names as a comma-separated list. -To retrieve all mappings, omit the name entirely. - -[[security-api-delete-role-mapping]] -To delete a role mapping, submit a DELETE request to the -`/_xpack/security/role_mapping/` endpoint: - -[source,js] --------------------------------------------------- -DELETE /_xpack/security/role_mapping/mapping1 --------------------------------------------------- -// CONSOLE -// TEST[setup:role_mapping] - -If the mapping is successfully deleted, the request returns `{"found": true}`. -Otherwise, `found` is set to false. - -[source,js] --------------------------------------------------- -{ - "found" : true -} --------------------------------------------------- -// TESTRESPONSE diff --git a/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc index 6298bb8ef9f..ba554eb8595 100644 --- a/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc @@ -173,7 +173,7 @@ represent user roles for different systems in the organization. The `active_directory` realm enables you to map Active Directory users to roles via their Active Directory groups or other metadata. This role mapping can be -configured via the <> or by using +configured via the <> or by using a file stored on each node. When a user authenticates against an Active Directory realm, the privileges for that user are the union of all privileges defined by the roles to which the user is mapped. diff --git a/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc index 683da76bb7b..fbf823dae70 100644 --- a/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc @@ -55,18 +55,23 @@ cluster. + -- The `users` file stores all the users and their passwords. Each line in the file -represents a single user entry consisting of the username and **hashed** password. +represents a single user entry consisting of the username and **hashed** and **salted** password. [source,bash] ---------------------------------------------------------------------- rdeniro:$2a$10$BBJ/ILiyJ1eBTYoRKxkqbuDEdYECplvxnqQ47uiowE7yGqvCEgj9W alpacino:$2a$10$cNwHnElYiMYZ/T3K4PvzGeJ1KbpXZp2PfoQD.gfaVdImnHOwIuBKS -jacknich:$2a$10$GYUNWyABV/Ols/.bcwxuBuuaQzV6WIauW6RdboojxcixBq3LtI3ni +jacknich:{PBKDF2}50000$z1CLJt0MEFjkIK5iEfgvfnA6xq7lF25uasspsTKSo5Q=$XxCVLbaKDimOdyWgLCLJiyoiWpA/XDMe/xtVgn1r5Sg= ---------------------------------------------------------------------- -{security} uses `bcrypt` to hash the user passwords. +NOTE: To limit exposure to credential theft and mitigate credential compromise, +the file realm stores passwords and caches user credentials according to +security best practices. By default, a hashed version of user credentials +is stored in memory, using a salted `sha-256` hash algorithm and a hashed +version of passwords is stored on disk salted and hashed with the `bcrypt` +hash algorithm. To use different hash algorithms, see <>. -While it is possible to modify this files directly using any standard text +While it is possible to modify the `users` files directly using any standard text editor, we strongly recommend using the <> tool to apply the required changes. @@ -103,4 +108,4 @@ By default, {security} checks these files for changes every 5 seconds. You can change this default behavior by changing the `resource.reload.interval.high` setting in the `elasticsearch.yml` file (as this is a common setting in {es}, changing its value may effect other schedules in the system). --- \ No newline at end of file +-- diff --git a/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc new file mode 100644 index 00000000000..9e7ed476272 --- /dev/null +++ b/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc @@ -0,0 +1,175 @@ +[role="xpack"] +[[configuring-kerberos-realm]] +=== Configuring a Kerberos realm + +Kerberos is used to protect services and uses a ticket-based authentication +protocol to authenticate users. +You can configure {es} to use the Kerberos V5 authentication protocol, which is +an industry standard protocol, to authenticate users. +In this scenario, clients must present Kerberos tickets for authentication. + +In Kerberos, users authenticate with an authentication service and later +with a ticket granting service to generate a TGT (ticket-granting ticket). +This ticket is then presented to the service for authentication. +Refer to your Kerberos installation documentation for more information about +obtaining TGT. {es} clients must first obtain a TGT then initiate the process of +authenticating with {es}. + +For a summary of Kerberos terminology, see {stack-ov}/kerberos-realm.html[Kerberos authentication]. + +==== Before you begin + +. Deploy Kerberos. ++ +-- +You must have the Kerberos infrastructure set up in your environment. + +NOTE: Kerberos requires a lot of external services to function properly, such as +time synchronization between all machines and working forward and reverse DNS +mappings in your domain. Refer to your Kerberos documentation for more details. + +These instructions do not cover setting up and configuring your Kerberos +deployment. Where examples are provided, they pertain to an MIT Kerberos V5 +deployment. For more information, see +http://web.mit.edu/kerberos/www/index.html[MIT Kerberos documentation] +-- + +. Configure Java GSS. ++ +-- + +{es} uses Java GSS framework support for Kerberos authentication. +To support Kerberos authentication, {es} needs the following files: + +* `krb5.conf`, a Kerberos configuration file +* A `keytab` file that contains credentials for the {es} service principal + +The configuration requirements depend on your Kerberos setup. Refer to your +Kerberos documentation to configure the `krb5.conf` file. + +For more information on Java GSS, see +https://docs.oracle.com/javase/10/security/kerberos-requirements1.htm[Java GSS Kerberos requirements] +-- + +==== Create a Kerberos realm + +To configure a Kerberos realm in {es}: + +. Configure the JVM to find the Kerberos configuration file. ++ +-- +{es} uses Java GSS and JAAS Krb5LoginModule to support Kerberos authentication +using a Simple and Protected GSSAPI Negotiation Mechanism (SPNEGO) mechanism. +The Kerberos configuration file (`krb5.conf`) provides information such as the +default realm, the Key Distribution Center (KDC), and other configuration details +required for Kerberos authentication. When the JVM needs some configuration +properties, it tries to find those values by locating and loading this file. The +JVM system property to configure the file path is `java.security.krb5.conf`. To +configure JVM system properties see {ref}/jvm-options.html[configuring jvm options]. +If this system property is not specified, Java tries to locate the file based on +the conventions. + +TIP: It is recommended that this system property be configured for {es}. +The method for setting this property depends on your Kerberos infrastructure. +Refer to your Kerberos documentation for more details. + +For more information, see http://web.mit.edu/kerberos/krb5-latest/doc/admin/conf_files/krb5_conf.html[krb5.conf] + +-- + +. Create a keytab for the {es} node. ++ +-- +A keytab is a file that stores pairs of principals and encryption keys. {es} +uses the keys from the keytab to decrypt the tickets presented by the user. You +must create a keytab for {es} by using the tools provided by your Kerberos +implementation. For example, some tools that create keytabs are `ktpass.exe` on +Windows and `kadmin` for MIT Kerberos. +-- + +. Put the keytab file in the {es} configuration directory. ++ +-- +Make sure that this keytab file has read permissions. This file contains +credentials, therefore you must take appropriate measures to protect it. + +IMPORTANT: {es} uses Kerberos on the HTTP network layer, therefore there must be +a keytab file for the HTTP service principal on every {es} node. The service +principal name must have the format `HTTP/es.domain.local@ES.DOMAIN.LOCAL`. +The keytab files are unique for each node since they include the hostname. +An {es} node can act as any principal a client requests as long as that +principal and its credentials are found in the configured keytab. + +-- + +. Create a Kerberos realm. ++ +-- + +To enable Kerberos authentication in {es}, you must add a Kerberos realm in the +realm chain. + +NOTE: You can configure only one Kerberos realm on {es} nodes. + +To configure a Kerberos realm, there are a few mandatory realm settings and +other optional settings that you need to configure in the `elasticsearch.yml` +configuration file. Add a realm of type `kerberos` under the +`xpack.security.authc.realms` namespace. + +The most common configuration for a Kerberos realm is as follows: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authc.realms.kerb1: + type: kerberos + order: 3 + keytab.path: es.keytab + remove_realm_name: false +------------------------------------------------------------ + +The `username` is extracted from the ticket presented by user and usually has +the format `username@REALM`. This `username` is used for mapping +roles to the user. If realm setting `remove_realm_name` is +set to `true`, the realm part (`@REALM`) is removed. The resulting `username` +is used for role mapping. + +For detailed information of available realm settings, +see {ref}/security-settings.html#ref-kerberos-settings[Kerberos realm settings]. + +-- + +. Restart {es} + +. Map Kerberos users to roles. ++ +-- + +The `kerberos` realm enables you to map Kerberos users to roles. You can +configure these role mappings by using the +{ref}/security-api-role-mapping.html[role-mapping API]. You identify +users by their `username` field. + +The following example uses the role mapping API to map `user@REALM` to the roles +`monitoring` and `user`: + +[source,js] +-------------------------------------------------- +POST _xpack/security/role_mapping/kerbrolemapping +{ + "roles" : [ "monitoring_user" ], + "enabled": true, + "rules" : { + "field" : { "username" : "user@REALM" } + } +} +-------------------------------------------------- +// CONSOLE + +For more information, see {stack-ov}/mapping-roles.html[Mapping users and groups to roles]. + +NOTE: The Kerberos realm supports +{stack-ov}/realm-chains.html#authorization_realms[authorization realms] as an +alternative to role mapping. + +-- + diff --git a/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc index e32c9eb5300..a5f8c3e4412 100644 --- a/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc @@ -133,7 +133,7 @@ supports both failover and load balancing modes of operation. See -- The `ldap` realm enables you to map LDAP users to to roles via their LDAP groups, or other metadata. This role mapping can be configured via the -{ref}/security-api-role-mapping.html[role-mapping API] or by using a file stored +{ref}/security-api-put-role-mapping.html[add role mapping API] or by using a file stored on each node. When a user authenticates with LDAP, the privileges for that user are the union of all privileges defined by the roles to which the user is mapped. @@ -189,6 +189,11 @@ For more information, see {xpack-ref}/ldap-realm.html#mapping-roles-ldap[Mapping LDAP Groups to Roles] and {xpack-ref}/mapping-roles.html[Mapping Users and Groups to Roles]. + +NOTE: The LDAP realm supports +{stack-ov}/realm-chains.html#authorization_realms[authorization realms] as an +alternative to role mapping. + -- . (Optional) Configure the `metadata` setting on the LDAP realm to include extra @@ -211,4 +216,4 @@ xpack: type: ldap metadata: cn -------------------------------------------------- --- \ No newline at end of file +-- diff --git a/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc index 3cda29c2c71..e9fb9cd0eb8 100644 --- a/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc @@ -34,6 +34,13 @@ xpack: type: native order: 0 ------------------------------------------------------------ + +NOTE: To limit exposure to credential theft and mitigate credential compromise, +the native realm stores passwords and caches user credentials according to +security best practices. By default, a hashed version of user credentials +is stored in memory, using a salted `sha-256` hash algorithm and a hashed +version of passwords is stored on disk salted and hashed with the `bcrypt` +hash algorithm. To use different hash algorithms, see <>. -- . Restart {es}. diff --git a/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc index f66a82b0664..9a4d5fcf18b 100644 --- a/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc @@ -10,7 +10,8 @@ NOTE: You cannot use PKI certificates to authenticate users in {kib}. To use PKI in {es}, you configure a PKI realm, enable client authentication on the desired network layers (transport or http), and map the Distinguished Names -(DNs) from the user certificates to {security} roles in the role mapping file. +(DNs) from the user certificates to {security} roles in the +<> or role-mapping file. You can also use a combination of PKI and username/password authentication. For example, you can enable SSL/TLS on the transport layer and define a PKI realm to @@ -126,7 +127,7 @@ The `certificate_authorities` option can be used as an alternative to the + -- You map roles for PKI users through the -<> or by using a file stored on +<> or by using a file stored on each node. When a user authenticates against a PKI realm, the privileges for that user are the union of all privileges defined by the roles to which the user is mapped. @@ -173,4 +174,9 @@ key. You can also use the authenticate API to validate your role mapping. For more information, see {xpack-ref}/mapping-roles.html[Mapping Users and Groups to Roles]. --- \ No newline at end of file + +NOTE: The PKI realm supports +{stack-ov}/realm-chains.html#authorization_realms[authorization realms] as an +alternative to role mapping. + +-- diff --git a/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc index cbcbeebb359..d16e1302550 100644 --- a/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc @@ -219,6 +219,11 @@ access any data. Your SAML users cannot do anything until they are mapped to {security} roles. See {stack-ov}/saml-role-mapping.html[Configuring role mappings]. + +NOTE: The SAML realm supports +{stack-ov}/realm-chains.html#authorization_realms[authorization realms] as an +alternative to role mapping. + -- . {stack-ov}/saml-kibana.html[Configure {kib} to use SAML SSO]. diff --git a/x-pack/docs/en/security/authentication/saml-guide.asciidoc b/x-pack/docs/en/security/authentication/saml-guide.asciidoc index 7139f4f8198..b0077dc1ba9 100644 --- a/x-pack/docs/en/security/authentication/saml-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-guide.asciidoc @@ -76,12 +76,13 @@ binding. There are five configuration steps to enable SAML authentication in {es}: -. Enable SSL/TLS for HTTP -. Enable the Token Service -. Create one or more SAML realms -. Configure role mappings +. <> +. <> +. <> +. <> . Generate a SAML Metadata file for use by your Identity Provider _(optional)_ +[[saml-enable-http]] ==== Enable TLS for HTTP If your {es} cluster is operating in production mode, then you must @@ -91,6 +92,7 @@ authentication. For more information, see {ref}/configuring-tls.html#tls-http[Encrypting HTTP Client Communications]. +[[saml-enable-token]] ==== Enable the token service The {es} SAML implementation makes use of the {es} Token Service. This service @@ -356,6 +358,35 @@ address such as `admin@staff.example.com.attacker.net`. It is important that you make sure your regular expressions are as precise as possible so that you do not inadvertently open an avenue for user impersonation attacks. +[[req-authn-context]] +==== Requesting specific authentication methods + +It is sometimes necessary for a SAML SP to be able to impose specific +restrictions regarding the authentication that will take place at an IdP, +in order to assess the level of confidence that it can place in +the corresponding authentication response. The restrictions might have to do +with the authentication method (password, client certificates, etc), the +user identification method during registration, and other details. {es} implements +https://docs.oasis-open.org/security/saml/v2.0/saml-authn-context-2.0-os.pdf[SAML 2.0 Authentication Context], which can be used for this purpose as defined in SAML 2.0 Core +Specification. + +In short, the SAML SP defines a set of Authentication Context Class Reference +values, which describe the restrictions to be imposed on the IdP, and sends these +in the Authentication Request. The IdP attempts to grant these restrictions. +If it cannot grant them, the authentication attempt fails. If the user is +successfully authenticated, the Authentication Statement of the SAML Response +contains an indication of the restrictions that were satisfied. + +You can define the Authentication Context Class Reference values by using the `req_authn_context_class_ref` option in the SAML realm configuration. See +{ref}/security-settings.html#ref-saml-settings[SAML realm settings]. + +{es} supports only the `exact` comparison method for the Authentication Context. +When it receives the Authentication Response from the IdP, {es} examines the +value of the Authentication Context Class Reference that is part of the +Authentication Statement of the SAML Assertion. If it matches one of the +requested values, the authentication is considered successful. Otherwise, the +authentication attempt fails. + [[saml-logout]] ==== SAML logout @@ -442,7 +473,7 @@ or separate keys used for each of those. The Elastic Stack uses X.509 certificates with RSA private keys for SAML cryptography. These keys can be generated using any standard SSL tool, including -the `elasticsearch-certutil` tool that ships with X-Pack. +the `elasticsearch-certutil` tool that ships with {xpack}. Your IdP may require that the Elastic Stack have a cryptographic key for signing SAML messages, and that you provide the corresponding signing certificate within @@ -573,6 +604,7 @@ The passphrase for the keystore, if the file is encypted. This is a {ref}/secure-settings.html[secure setting] that must be set with the `elasticsearch-keystore` tool. +[[saml-sp-metadata]] === Generating SP metadata Some Identity Providers support importing a metadata file from the Service @@ -592,9 +624,10 @@ When a user authenticates using SAML, they are identified to the Elastic Stack, but this does not automatically grant them access to perform any actions or access any data. -Your SAML users cannot do anything until they are mapped to X-Pack Security -roles. This mapping is performed through the -{ref}/security-api-role-mapping.html[role-mapping API] +Your SAML users cannot do anything until they are assigned {security} +roles. This is done through either the +{ref}/security-api-put-role-mapping.html[add role mapping API], or with +<>. This is an example of a simple role mapping that grants the `kibana_user` role to any user who authenticates against the `saml1` realm: @@ -626,7 +659,7 @@ mapping are derived from the SAML attributes as follows: - `metadata`: See <> For more information, see <> and -{ref}/security-api-role-mapping.html[Role Mapping APIs]. +{ref}/security-api.html#security-role-mapping-apis[role mapping APIs]. If your IdP has the ability to provide groups or roles to Service Providers, then you should map this SAML attribute to the `attributes.groups` setting in @@ -651,6 +684,18 @@ PUT /_xpack/security/role_mapping/saml-finance // CONSOLE // TEST +If your users also exist in a repository that can be directly accessed by {security} +(such as an LDAP directory) then you can use +<> instead of role mappings. + +In this case, you perform the following steps: +1. In your SAML realm, assigned a SAML attribute to act as the lookup userid, + by configuring the `attributes.principal` setting. +2. Create a new realm that can lookup users from your local repository (e.g. an + `ldap` realm) +3. In your SAML realm, set `authorization_realms` to the name of the realm you + created in step 2. + [[saml-user-metadata]] === User metadata diff --git a/x-pack/docs/en/security/authentication/user-cache.asciidoc b/x-pack/docs/en/security/authentication/user-cache.asciidoc index 36af070bf06..716e7af9914 100644 --- a/x-pack/docs/en/security/authentication/user-cache.asciidoc +++ b/x-pack/docs/en/security/authentication/user-cache.asciidoc @@ -12,27 +12,8 @@ object to avoid unnecessarily needing to perform role mapping on each request. The cached user credentials are hashed in memory. By default, {security} uses a salted `sha-256` hash algorithm. You can use a different hashing algorithm by -setting the `cache_hash_algo` setting to any of the following: - -[[cache-hash-algo]] -.Cache hash algorithms -|======================= -| Algorithm | | | Description -| `ssha256` | | | Uses a salted `sha-256` algorithm (default). -| `md5` | | | Uses `MD5` algorithm. -| `sha1` | | | Uses `SHA1` algorithm. -| `bcrypt` | | | Uses `bcrypt` algorithm with salt generated in 1024 rounds. -| `bcrypt4` | | | Uses `bcrypt` algorithm with salt generated in 16 rounds. -| `bcrypt5` | | | Uses `bcrypt` algorithm with salt generated in 32 rounds. -| `bcrypt6` | | | Uses `bcrypt` algorithm with salt generated in 64 rounds. -| `bcrypt7` | | | Uses `bcrypt` algorithm with salt generated in 128 rounds. -| `bcrypt8` | | | Uses `bcrypt` algorithm with salt generated in 256 rounds. -| `bcrypt9` | | | Uses `bcrypt` algorithm with salt generated in 512 rounds. -| `noop`,`clear_text` | | | Doesn't hash the credentials and keeps it in clear text in - memory. CAUTION: keeping clear text is considered insecure - and can be compromised at the OS level (for example through - memory dumps and using `ptrace`). -|======================= +setting the `cache.hash_algo` realm settings. See +{ref}/security-settings.html#hashing-settings[User cache and password hash algorithms]. [[cache-eviction-api]] ==== Evicting users from the cache diff --git a/x-pack/docs/en/security/authorization/managing-roles.asciidoc b/x-pack/docs/en/security/authorization/managing-roles.asciidoc index f550c900edc..7b30284f583 100644 --- a/x-pack/docs/en/security/authorization/managing-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/managing-roles.asciidoc @@ -9,7 +9,10 @@ A role is defined by the following JSON structure: { "run_as": [ ... ], <1> "cluster": [ ... ], <2> - "indices": [ ... ] <3> + "global": { ... }, <3> + "indices": [ ... ], <4> + "applications": [ ... ] <5> + } ----- // NOTCONSOLE @@ -19,8 +22,15 @@ A role is defined by the following JSON structure: cluster level actions users with this role are able to execute. This field is optional (missing `cluster` privileges effectively mean no cluster level permissions). -<3> A list of indices permissions entries. This field is optional (missing `indices` +<3> An object defining global privileges. A global privilege is a form of + cluster privilege that is request sensitive. A standard cluster privilege + makes authorization decisions based solely on the action being executed. + A global privilege also considers the parameters included in the request. + Support for global privileges is currently limited to the management of + application privileges. This field is optional. +<4> A list of indices permissions entries. This field is optional (missing `indices` privileges effectively mean no index level permissions). +<5> A list of application privilege entries. This field is optional. [[valid-role-name]] NOTE: Role names must be at least 1 and no more than 1024 characters. They can @@ -28,6 +38,9 @@ NOTE: Role names must be at least 1 and no more than 1024 characters. They can punctuation, and printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block]. Leading or trailing whitespace is not allowed. +[[roles-indices-priv]] +==== Indices Privileges + The following describes the structure of an indices permissions entry: [source,js] @@ -77,8 +90,60 @@ names or regular expressions that refer to multiple indices. ------------------------------------------------------------------------------ ============================================================================== -The following snippet shows an example definition of a `clicks_admin` role: +[[roles-global-priv]] +==== Global Privileges +The following describes the structure of a global privileges entry: +[source,js] +------- +{ + "application": { + "manage": { <1> + "applications": [ ... ] <2> + } + } +} +------- +// NOTCONSOLE + +<1> The only supported global privilege is the ability to manage application + privileges +<2> The list of application names that may be managed. This list supports + wildcards (e.g. `"myapp-*"`) and regular expressions (e.g. + `"/app[0-9]*/"`) + +[[roles-application-priv]] +==== Application Privileges +The following describes the structure of an application privileges entry: + +[source,js] +------- +{ + "application": "my_app", <1> + "privileges": [ ... ], <2> + "resources": [ ... ] <3> +} +------- +// NOTCONSOLE + +<1> The name of the application. +<2> The list of the names of the application privileges to grant to this role. +<3> The resources to which those privileges apply. These are handled in the same + way as index name pattern in `indices` permissions. These resources do not + have any special meaning to {security}. + +For details about the validation rules for these fields, see the +{ref}/security-api-put-privileges.html[add application privileges API]. + +A role may refer to application privileges that do not exist - that is, they +have not yet been defined through the add application privileges API (or they +were defined, but have since been deleted). In this case, the privilege has +no effect, and will not grant any actions in the +{ref}/security-api-has-privileges.html[has privileges API]. + +==== Example + +The following snippet shows an example definition of a `clicks_admin` role: [source,js] ----------- POST /_xpack/security/role/clicks_admin diff --git a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc index 36f3a1f27f3..166238c32ac 100644 --- a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc @@ -24,11 +24,14 @@ either role management method. For example, when you use the role mapping API, you are able to map users to both API-managed roles and file-managed roles (and likewise for file-based role-mappings). +NOTE: The PKI, LDAP, Kerberos and SAML realms support using +<> as an alternative to role mapping. + [[mapping-roles-api]] ==== Using the role mapping API You can define role-mappings through the -{ref}/security-api-role-mapping.html[role mapping API]. +{ref}/security-api-put-role-mapping.html[add role mapping API]. [[mapping-roles-file]] ==== Using role mapping files diff --git a/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc b/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc index 93d11c0ab2a..8dba764cc1c 100644 --- a/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc +++ b/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc @@ -12,7 +12,7 @@ the realm you use to authenticate. Both the internal `native` and `file` realms support this out of the box. The LDAP realm must be configured to run in <>. The Active Directory realm must be <> to support -_run as_. The PKI realm does not support _run as_. +_run as_. The PKI, Kerberos, and SAML realms do not support _run as_. To submit requests on behalf of other users, you need to have the `run_as` permission. For example, the following role grants permission to submit request diff --git a/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc index e5f43a08e7a..12a5a565336 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc @@ -49,7 +49,7 @@ information about the `xpack.security.enabled` setting, see PUT _cluster/settings { "persistent": { - "search": { + "cluster": { "remote": { "cluster_one": { "seeds": [ "10.0.1.1:9300" ] @@ -82,7 +82,7 @@ First, enable cluster `one` to perform cross cluster search on remote cluster PUT _cluster/settings { "persistent": { - "search.remote.cluster_two.seeds": [ "10.0.2.1:9300" ] + "cluster.remote.cluster_two.seeds": [ "10.0.2.1:9300" ] } } ----------------------------------------------------------- diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index 53f36afc734..5fd9ed610cb 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -27,6 +27,9 @@ https://www.elastic.co/subscriptions and your cluster. If you are using a trial license, the default value is `false`. For more information, see {ref}/security-settings.html[Security Settings in {es}]. +. If you plan to run {es} in a Federal Information Processing Standard (FIPS) +140-2 enabled JVM, see <>. + . Configure Transport Layer Security (TLS/SSL) for internode-communication. + -- @@ -52,8 +55,8 @@ help you get up and running. The +elasticsearch-setup-passwords+ command is the simplest method to set the built-in users' passwords for the first time. For example, you can run the command in an "interactive" mode, which prompts you -to enter new passwords for the `elastic`, `kibana`, `beats_system`, and -`logstash_system` users: +to enter new passwords for the `elastic`, `kibana`, `beats_system`, +`logstash_system`, and `apm_system` users: [source,shell] -------------------------------------------------- @@ -77,6 +80,7 @@ user API. ** <>. ** <>. ** <>. +** <>. . Set up roles and users to control access to {es}. For example, to grant _John Doe_ full access to all indices that match @@ -142,5 +146,8 @@ include::authentication/configuring-ldap-realm.asciidoc[] include::authentication/configuring-native-realm.asciidoc[] include::authentication/configuring-pki-realm.asciidoc[] include::authentication/configuring-saml-realm.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc +include::authentication/configuring-kerberos-realm.asciidoc[] +include::fips-140-compliance.asciidoc[] include::{es-repo-dir}/settings/security-settings.asciidoc[] include::{es-repo-dir}/settings/audit-settings.asciidoc[] diff --git a/x-pack/docs/en/security/fips-140-compliance.asciidoc b/x-pack/docs/en/security/fips-140-compliance.asciidoc new file mode 100644 index 00000000000..ceb605c2e2d --- /dev/null +++ b/x-pack/docs/en/security/fips-140-compliance.asciidoc @@ -0,0 +1,128 @@ +[role="xpack"] +[[fips-140-compliance]] +=== FIPS 140-2 + +The Federal Information Processing Standard (FIPS) Publication 140-2, (FIPS PUB +140-2), titled "Security Requirements for Cryptographic Modules" is a U.S. +government computer security standard used to approve cryptographic modules. +{es} offers a FIPS 140-2 compliant mode and as such can run in a FIPS 140-2 +enabled JVM. In order to set {es} in fips mode, you must set the +`xpack.security.fips_mode.enabled` to `true` in `elasticsearch.yml` + +For {es}, FIPS 140-2 compliance is ensured by + +- Using FIPS approved / NIST recommended cryptographic algorithms. +- Delegating the implementation of these cryptographic algorithms to a NIST + validated cryptographic module (available via the Java Security Provider + in use in the JVM). +- Allowing the configuration of {es} in a FIPS 140-2 compliant manner, as + documented below. + +[float] +=== Upgrade considerations + +If you plan to upgrade your existing Cluster to a version that can be run in +a FIPS 140-2 enabled JVM, the suggested approach is to first perform a rolling +upgrade to the new version in your existing JVM and perform all necessary +configuration changes in preparation for running in fips mode. You can then +perform a rolling restart of the nodes, this time starting each node in the FIPS +140-2 JVM. This will allow {es} to take care of a couple of things automatically for you: + +- <> will be upgraded to the latest format version as + previous format versions cannot be loaded in a FIPS 140-2 JVM. +- Self-generated trial licenses will be upgraded to the latest format that + is compliant with FIPS 140-2. + +If you are on a appropriate license level (platinum) you can elect to perform +a rolling upgrade while at the same time running each upgraded node in a +FIPS 140-2 JVM. In this case, you would need to also regenerate your +`elasticsearch.keystore` and migrate all secure settings to it, in addition to the +necessary configuration changes outlined below, before starting each node. + +[float] +=== Configuring {es} for FIPS 140-2 + +Apart from setting `xpack.security.fips_mode.enabled`, a number of security +related settings need to be configured accordingly in order to be compliant +and able to run {es} successfully in a FIPS 140-2 enabled JVM. + +[float] +==== TLS + +SSLv2 and SSLv3 are not allowed by FIPS 140-2, so `SSLv2Hello` and `SSLv3` cannot +be used for <> + +NOTE: The use of TLS ciphers is mainly governed by the relevant crypto module +(the FIPS Approved Security Provider that your JVM uses). All the ciphers that +are configured by default in {es} are FIPS 140-2 compliant and as such can be +used in a FIPS 140-2 JVM. (see <>) + +[float] +==== TLS Keystores and keys + +Keystores can be used in a number of <> in order to +conveniently store key and trust material. Neither `JKS`, nor `PKCS#12` keystores +can be used in a FIPS 140-2 enabled JVM however, so you must refrain from using +these keystores. Your FIPS 140-2 provider may provide a compliant keystore that +can be used or you can use PEM encoded files. To use PEM encoded key material, +you can use the relevant `\*.key` and `*.certificate` configuration +options, and for trust material you can use `*.certificate_authorities`. + + +FIPS 140-2 compliance dictates that the length of the public keys used for TLS +must correspond to the strength of the symmetric key algorithm in use in TLS. +Depending on the value of <> that +you select to use, the TLS keys must have corresponding length according to +the following table: + +[[comparable-key-strength]] +.Comparable key strengths +|======================= +| Symmetric Key Algorithm | RSA key Length | ECC key length +| `3DES` | 2048 | 224-255 +| `AES-128` | 3072 | 256-383 +| `AES-256` | 15630 | 512+ +|======================= + +[float] +==== Password Hashing + +{es} offers a number of algorithms for securely hashing credentials in memory and +on disk. However, only the `PBKDF2` family of algorithms is compliant with FIPS +140-2 for password hashing. You must set the the `cache.hash_algo` realm settings +and the `xpack.security.authc.password_hashing.algorithm` setting to one of the +available `PBKDF2` values. +See <>. + +Password hashing configuration changes are not retroactive so the stored hashed +credentials of existing users of the file and native realms will not be updated +on disk. +Authentication will still work, but in order to ensure FIPS 140-2 compliance, +you would need to recreate users or change their password using the +<> CLI tool for the file realm and the +<> for the native realm. + +The user cache will be emptied upon node restart, so any existing hashes using +non-compliant algorithms will be discarded and the new ones will be created +using the compliant `PBKDF2` algorithm you have selected. + +[float] +=== Limitations + +Due to the limitations that FIPS 140-2 compliance enforces, a small number of +features are not available while running in fips mode. The list is as follows: + +* Azure Classic Discovery Plugin +* Ingest Attachment Plugin +* The {ref}/certutil.html[`elasticsearch-certutil`] tool. However, + `elasticsearch-certutil` can very well be used in a non FIPS 140-2 + enabled JVM (pointing `JAVA_HOME` environment variable to a different java + installation) in order to generate the keys and certificates that + can be later used in the FIPS 140-2 enabled JVM. +* The `elasticsearch-plugin` tool. Accordingly, `elasticsearch-plugin` can be + used with a different (non FIPS 140-2 enabled) Java installation if + available. +* The SQL CLI client cannot run in a FIPS 140-2 enabled JVM while using + TLS for transport security or PKI for client authentication. +* The SAML Realm cannot decrypt and consume encrypted Assertions or encrypted + attributes in Attribute Statements from the SAML IdP. \ No newline at end of file diff --git a/x-pack/docs/en/security/limitations.asciidoc b/x-pack/docs/en/security/limitations.asciidoc deleted file mode 100644 index fb8b826d5dd..00000000000 --- a/x-pack/docs/en/security/limitations.asciidoc +++ /dev/null @@ -1,87 +0,0 @@ -[role="xpack"] -[[security-limitations]] -== Security Limitations - -[float] -=== Plugins - -Elasticsearch's plugin infrastructure is extremely flexible in terms of what can -be extended. While it opens up Elasticsearch to a wide variety of (often custom) -additional functionality, when it comes to security, this high extensibility level -comes at a cost. We have no control over the third-party plugins' code (open -source or not) and therefore we cannot guarantee their compliance with {security}. -For this reason, third-party plugins are not officially supported on clusters -with {security} enabled. - -[float] -=== Changes in Index Wildcard Behavior - -Elasticsearch clusters with {security} enabled apply the `/_all` wildcard, and -all other wildcards, to the indices that the current user has privileges for, not -the set of all indices on the cluster. - -[float] -=== Multi Document APIs - -Multi get and multi term vectors API throw IndexNotFoundException when trying to access non existing indices that the user is -not authorized for. By doing that they leak information regarding the fact that the index doesn't exist, while the user is not -authorized to know anything about those indices. - -[float] -=== Filtered Index Aliases - -Aliases containing filters are not a secure way to restrict access to individual -documents, due to the limitations described in <>. -{security} provides a secure way to restrict access to documents through the -<> feature. - -[float] -=== Field and Document Level Security Limitations - -When a user's role enables document or field level security for an index: - -* The user cannot perform write operations: -** The update API isn't supported. -** Update requests included in bulk requests aren't supported. -* The request cache is disabled for search requests. - -When a user's role enables document level security for an index: - -* Document level security isn't applied for APIs that aren't document based. - An example is the field stats API. -* Document level security doesn't affect global index statistics that relevancy - scoring uses. So this means that scores are computed without taking the role - query into account. Note that documents not matching with the role query are - never returned. -* The `has_child` and `has_parent` queries aren't supported as query in the - role definition. The `has_child` and `has_parent` queries can be used in the - search API with document level security enabled. -* Any query that makes remote calls to fetch data to query by isn't supported. - The following queries aren't supported: -** The `terms` query with terms lookup isn't supported. -** The `geo_shape` query with indexed shapes isn't supported. -** The `percolate` query isn't supported. -* If suggesters are specified and document level security is enabled then - the specified suggesters are ignored. -* A search request cannot be profiled if document level security is enabled. - -[float] -[[alias-limitations]] -=== Index and Field Names Can Be Leaked When Using Aliases - -Calling certain Elasticsearch APIs on an alias can potentially leak information -about indices that the user isn't authorized to access. For example, when you get -the mappings for an alias with the `_mapping` API, the response includes the -index name and mappings for each index that the alias applies to. - -Until this limitation is addressed, avoid index and field names that contain -confidential or sensitive information. - -[float] -=== LDAP Realm - -The <> does not currently support the discovery of nested -LDAP Groups. For example, if a user is a member of `group_1` and `group_1` is a -member of `group_2`, only `group_1` will be discovered. However, the -<> *does* support transitive -group membership. diff --git a/x-pack/docs/en/security/securing-communications/tls-http.asciidoc b/x-pack/docs/en/security/securing-communications/tls-http.asciidoc index eb8e985a65b..06e70b03673 100644 --- a/x-pack/docs/en/security/securing-communications/tls-http.asciidoc +++ b/x-pack/docs/en/security/securing-communications/tls-http.asciidoc @@ -77,7 +77,17 @@ bin/elasticsearch-keystore add xpack.security.http.ssl.secure_key_passphrase . Restart {es}. -NOTE: All TLS-related node settings are considered to be highly sensitive and +[NOTE] +=============================== +* All TLS-related node settings are considered to be highly sensitive and therefore are not exposed via the {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API] For more information about any of these settings, see <>. + +* {es} monitors all files such as certificates, keys, keystores, or truststores +that are configured as values of TLS-related node settings. If you update any of +these files (for example, when your hostnames change or your certificates are +due to expire), {es} reloads them. The files are polled for changes at +a frequency determined by the global {es} `resource.reload.interval.high` +setting, which defaults to 5 seconds. +=============================== diff --git a/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc b/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc index c186aebbe24..c2306545536 100644 --- a/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc +++ b/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc @@ -95,7 +95,17 @@ vice-versa). After enabling TLS you must restart all nodes in order to maintain communication across the cluster. -- -NOTE: All TLS-related node settings are considered to be highly sensitive and +[NOTE] +=============================== +* All TLS-related node settings are considered to be highly sensitive and therefore are not exposed via the {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API] For more information about any of these settings, see <>. + +* {es} monitors all files such as certificates, keys, keystores, or truststores +that are configured as values of TLS-related node settings. If you update any of +these files (for example, when your hostnames change or your certificates are +due to expire), {es} reloads them. The files are polled for changes at +a frequency determined by the global {es} `resource.reload.interval.high` +setting, which defaults to 5 seconds. +=============================== diff --git a/x-pack/docs/en/security/troubleshooting.asciidoc b/x-pack/docs/en/security/troubleshooting.asciidoc deleted file mode 100644 index d1c88b2786f..00000000000 --- a/x-pack/docs/en/security/troubleshooting.asciidoc +++ /dev/null @@ -1,418 +0,0 @@ -[role="xpack"] -[[security-troubleshooting]] -== {security} Troubleshooting -++++ -{security} -++++ - -Use the information in this section to troubleshoot common problems and find -answers for frequently asked questions. - -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> - - -To get help, see <>. - -[[security-trb-settings]] -=== Some settings are not returned via the nodes settings API - -*Symptoms:* - -* When you use the {ref}/cluster-nodes-info.html[nodes info API] to retrieve -settings for a node, some information is missing. - -*Resolution:* - -This is intentional. Some of the settings are considered to be highly -sensitive: all `ssl` settings, ldap `bind_dn`, and `bind_password`. -For this reason, we filter these settings and do not expose them via -the nodes info API rest endpoint. You can also define additional -sensitive settings that should be hidden using the -`xpack.security.hide_settings` setting. For example, this snippet -hides the `url` settings of the `ldap1` realm and all settings of the -`ad1` realm. - -[source, yaml] ------------------------------------------- -xpack.security.hide_settings: xpack.security.authc.realms.ldap1.url, -xpack.security.authc.realms.ad1.* ------------------------------------------- - -[[security-trb-roles]] -=== Authorization exceptions - -*Symptoms:* - -* I configured the appropriate roles and the users, but I still get an -authorization exception. -* I can authenticate to LDAP, but I still get an authorization exception. - - -*Resolution:* - -. Verify that the role names associated with the users match the roles defined -in the `roles.yml` file. You can use the `elasticsearch-users` tool to list all -the users. Any unknown roles are marked with `*`. -+ --- -[source, shell] ------------------------------------------- -bin/elasticsearch-users list -rdeniro : admin -alpacino : power_user -jacknich : monitoring,unknown_role* <1> ------------------------------------------- -<1> `unknown_role` was not found in `roles.yml` - -For more information about this command, see the -{ref}/users-command.html[`elasticsearch-users` command]. --- - -. If you are authenticating to LDAP, a number of configuration options can cause -this error. -+ --- -|====================== -|_group identification_ | - -Groups are located by either an LDAP search or by the "memberOf" attribute on -the user. Also, If subtree search is turned off, it will search only one -level deep. See the <> for all the options. -There are many options here and sticking to the defaults will not work for all -scenarios. - -| _group to role mapping_| - -Either the `role_mapping.yml` file or the location for this file could be -misconfigured. See <> for more. - -|_role definition_| - -The role definition might be missing or invalid. - -|====================== - -To help track down these possibilities, add the following lines to the end of -the `log4j2.properties` configuration file in the `ES_PATH_CONF`: - -[source,properties] ----------------- -logger.authc.name = org.elasticsearch.xpack.security.authc -logger.authc.level = DEBUG ----------------- - -A successful authentication should produce debug statements that list groups and -role mappings. --- - -[[security-trb-extraargs]] -=== Users command fails due to extra arguments - -*Symptoms:* - -* The `elasticsearch-users` command fails with the following message: -`ERROR: extra arguments [...] were provided`. - -*Resolution:* - -This error occurs when the `elasticsearch-users` tool is parsing the input and -finds unexpected arguments. This can happen when there are special characters -used in some of the arguments. For example, on Windows systems the `,` character -is considered a parameter separator; in other words `-r role1,role2` is -translated to `-r role1 role2` and the `elasticsearch-users` tool only -recognizes `role1` as an expected parameter. The solution here is to quote the -parameter: `-r "role1,role2"`. - -For more information about this command, see -{ref}/users-command.html[`elasticsearch-users` command]. - -[[trouble-shoot-active-directory]] -=== Users are frequently locked out of Active Directory - -*Symptoms:* - -* Certain users are being frequently locked out of Active Directory. - -*Resolution:* - -Check your realm configuration; realms are checked serially, one after another. -If your Active Directory realm is being checked before other realms and there -are usernames that appear in both Active Directory and another realm, a valid -login for one realm might be causing failed login attempts in another realm. - -For example, if `UserA` exists in both Active Directory and a file realm, and -the Active Directory realm is checked first and file is checked second, an -attempt to authenticate as `UserA` in the file realm would first attempt to -authenticate against Active Directory and fail, before successfully -authenticating against the `file` realm. Because authentication is verified on -each request, the Active Directory realm would be checked - and fail - on each -request for `UserA` in the `file` realm. In this case, while the authentication -request completed successfully, the account on Active Directory would have -received several failed login attempts, and that account might become -temporarily locked out. Plan the order of your realms accordingly. - -Also note that it is not typically necessary to define multiple Active Directory -realms to handle domain controller failures. When using Microsoft DNS, the DNS -entry for the domain should always point to an available domain controller. - - -[[trb-security-maccurl]] -=== Certificate verification fails for curl on Mac - -*Symptoms:* - -* `curl` on the Mac returns a certificate verification error even when the -`--cacert` option is used. - - -*Resolution:* - -Apple's integration of `curl` with their keychain technology disables the -`--cacert` option. -See http://curl.haxx.se/mail/archive-2013-10/0036.html for more information. - -You can use another tool, such as `wget`, to test certificates. Alternately, you -can add the certificate for the signing certificate authority MacOS system -keychain, using a procedure similar to the one detailed at the -http://support.apple.com/kb/PH14003[Apple knowledge base]. Be sure to add the -signing CA's certificate and not the server's certificate. - - -[[trb-security-sslhandshake]] -=== SSLHandshakeException causes connections to fail - -*Symptoms:* - -* A `SSLHandshakeException` causes a connection to a node to fail and indicates -that there is a configuration issue. Some of the common exceptions are shown -below with tips on how to resolve these issues. - - -*Resolution:* - -`java.security.cert.CertificateException: No name matching node01.example.com found`:: -+ --- -Indicates that a client connection was made to `node01.example.com` but the -certificate returned did not contain the name `node01.example.com`. In most -cases, the issue can be resolved by ensuring the name is specified during -certificate creation. For more information, see <>. Another scenario is -when the environment does not wish to use DNS names in certificates at all. In -this scenario, all settings in `elasticsearch.yml` should only use IP addresses -including the `network.publish_host` setting. --- - -`java.security.cert.CertificateException: No subject alternative names present`:: -+ --- -Indicates that a client connection was made to an IP address but the returned -certificate did not contain any `SubjectAlternativeName` entries. IP addresses -are only used for hostname verification if they are specified as a -`SubjectAlternativeName` during certificate creation. If the intent was to use -IP addresses for hostname verification, then the certificate will need to be -regenerated with the appropriate IP address. See <>. --- - -`javax.net.ssl.SSLHandshakeException: null cert chain` and `javax.net.ssl.SSLException: Received fatal alert: bad_certificate`:: -+ --- -The `SSLHandshakeException` indicates that a self-signed certificate was -returned by the client that is not trusted as it cannot be found in the -`truststore` or `keystore`. This `SSLException` is seen on the client side of -the connection. --- - -`sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target` and `javax.net.ssl.SSLException: Received fatal alert: certificate_unknown`:: -+ --- -This `SunCertPathBuilderException` indicates that a certificate was returned -during the handshake that is not trusted. This message is seen on the client -side of the connection. The `SSLException` is seen on the server side of the -connection. The CA certificate that signed the returned certificate was not -found in the `keystore` or `truststore` and needs to be added to trust this -certificate. --- - -[[trb-security-ssl]] -=== Common SSL/TLS exceptions - -*Symptoms:* - -* You might see some exceptions related to SSL/TLS in your logs. Some of the -common exceptions are shown below with tips on how to resolve these issues. + - - - -*Resolution:* - -`WARN: received plaintext http traffic on a https channel, closing connection`:: -+ --- -Indicates that there was an incoming plaintext http request. This typically -occurs when an external applications attempts to make an unencrypted call to the -REST interface. Please ensure that all applications are using `https` when -calling the REST interface with SSL enabled. --- - -`org.elasticsearch.common.netty.handler.ssl.NotSslRecordException: not an SSL/TLS record:`:: -+ --- -Indicates that there was incoming plaintext traffic on an SSL connection. This -typically occurs when a node is not configured to use encrypted communication -and tries to connect to nodes that are using encrypted communication. Please -verify that all nodes are using the same setting for -`xpack.security.transport.ssl.enabled`. - -For more information about this setting, see -{ref}/security-settings.html[Security Settings in {es}]. --- - -`java.io.StreamCorruptedException: invalid internal transport message format, got`:: -+ --- -Indicates an issue with data received on the transport interface in an unknown -format. This can happen when a node with encrypted communication enabled -connects to a node that has encrypted communication disabled. Please verify that -all nodes are using the same setting for `xpack.security.transport.ssl.enabled`. - -For more information about this setting, see -{ref}/security-settings.html[Security Settings in {es}]. --- - -`java.lang.IllegalArgumentException: empty text`:: -+ --- -This exception is typically seen when a `https` request is made to a node that -is not using `https`. If `https` is desired, please ensure the following setting -is in `elasticsearch.yml`: - -[source,yaml] ----------------- -xpack.security.http.ssl.enabled: true ----------------- - -For more information about this setting, see -{ref}/security-settings.html[Security Settings in {es}]. --- - -`ERROR: unsupported ciphers [...] were requested but cannot be used in this JVM`:: -+ --- -This error occurs when a SSL/TLS cipher suite is specified that cannot supported -by the JVM that {es} is running in. Security tries to use the specified cipher -suites that are supported by this JVM. This error can occur when using the -Security defaults as some distributions of OpenJDK do not enable the PKCS11 -provider by default. In this case, we recommend consulting your JVM -documentation for details on how to enable the PKCS11 provider. - -Another common source of this error is requesting cipher suites that use -encrypting with a key length greater than 128 bits when running on an Oracle JDK. -In this case, you must install the -<>. --- - -[[trb-security-internalserver]] -=== Internal Server Error in Kibana - -*Symptoms:* - -* In 5.1.1, an `UnhandledPromiseRejectionWarning` occurs and {kib} displays an -Internal Server Error. -//TBD: Is the same true for later releases? - -*Resolution:* - -If the Security plugin is enabled in {es} but disabled in {kib}, you must -still set `elasticsearch.username` and `elasticsearch.password` in `kibana.yml`. -Otherwise, {kib} cannot connect to {es}. - - -[[trb-security-setup]] -=== Setup-passwords command fails due to connection failure - -The {ref}/setup-passwords.html[elasticsearch-setup-passwords command] sets -passwords for the built-in users by sending user management API requests. If -your cluster uses SSL/TLS for the HTTP (REST) interface, the command attempts to -establish a connection with the HTTPS protocol. If the connection attempt fails, -the command fails. - -*Symptoms:* - -. {es} is running HTTPS, but the command fails to detect it and returns the -following errors: -+ --- -[source, shell] ------------------------------------------- -Cannot connect to elasticsearch node. -java.net.SocketException: Unexpected end of file from server -... -ERROR: Failed to connect to elasticsearch at -http://127.0.0.1:9200/_xpack/security/_authenticate?pretty. -Is the URL correct and elasticsearch running? ------------------------------------------- --- - -. SSL/TLS is configured, but trust cannot be established. The command returns -the following errors: -+ --- -[source, shell] ------------------------------------------- -SSL connection to -https://127.0.0.1:9200/_xpack/security/_authenticate?pretty -failed: sun.security.validator.ValidatorException: -PKIX path building failed: -sun.security.provider.certpath.SunCertPathBuilderException: -unable to find valid certification path to requested target -Please check the elasticsearch SSL settings under -xpack.security.http.ssl. -... -ERROR: Failed to establish SSL connection to elasticsearch at -https://127.0.0.1:9200/_xpack/security/_authenticate?pretty. ------------------------------------------- --- - -. The command fails because hostname verification fails, which results in the -following errors: -+ --- -[source, shell] ------------------------------------------- -SSL connection to -https://idp.localhost.test:9200/_xpack/security/_authenticate?pretty -failed: java.security.cert.CertificateException: -No subject alternative DNS name matching -elasticsearch.example.com found. -Please check the elasticsearch SSL settings under -xpack.security.http.ssl. -... -ERROR: Failed to establish SSL connection to elasticsearch at -https://elasticsearch.example.com:9200/_xpack/security/_authenticate?pretty. ------------------------------------------- --- - -*Resolution:* - -. If your cluster uses TLS/SSL for the HTTP interface but the -`elasticsearch-setup-passwords` command attempts to establish a non-secure -connection, use the `--url` command option to explicitly specify an HTTPS URL. -Alternatively, set the `xpack.security.http.ssl.enabled` setting to `true`. - -. If the command does not trust the {es} server, verify that you configured the -`xpack.security.http.ssl.certificate_authorities` setting or the -`xpack.security.http.ssl.truststore.path` setting. - -. If hostname verification fails, you can disable this verification by setting -`xpack.security.http.ssl.verification_mode` to `certificate`. - -For more information about these settings, see -{ref}/security-settings.html[Security Settings in {es}]. diff --git a/x-pack/docs/en/watcher/images/watcher-ui-edit-watch.png b/x-pack/docs/en/watcher/images/watcher-ui-edit-watch.png deleted file mode 100644 index f6a3ab4354a..00000000000 Binary files a/x-pack/docs/en/watcher/images/watcher-ui-edit-watch.png and /dev/null differ diff --git a/x-pack/docs/en/watcher/limitations.asciidoc b/x-pack/docs/en/watcher/limitations.asciidoc deleted file mode 100644 index 9ae7273de71..00000000000 --- a/x-pack/docs/en/watcher/limitations.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -[[watcher-limitations]] -== Watcher Limitations - -[float] -=== Watches Are Not Updated When File Based Scripts Change - -When you refer to a file script in a watch, the watch itself is not updated -if you change the script on the filesystem. - -Currently, the only way to reload a file script in a watch is to delete -the watch and recreate it. - -[float] -=== Watcher UI - -When you create a new watch or edit an existing watch, if you navigate away -from the page without saving your changes they will be lost without warning. -Make sure to save your changes before leaving the page. - -image::watcher-ui-edit-watch.png[] - -[float] -=== Security Integration - -When {security} is enabled, a watch stores information about what the user who -stored the watch is allowed to execute **at that time**. This means, if those -permissions change over time, the watch will still be able to execute with the -permissions that existed when the watch was created. diff --git a/x-pack/docs/en/watcher/troubleshooting.asciidoc b/x-pack/docs/en/watcher/troubleshooting.asciidoc deleted file mode 100644 index 20d599f8f52..00000000000 --- a/x-pack/docs/en/watcher/troubleshooting.asciidoc +++ /dev/null @@ -1,63 +0,0 @@ -[[watcher-troubleshooting]] -== {xpack} {watcher} Troubleshooting -++++ -{xpack} {watcher} -++++ - -[float] -=== Dynamic Mapping Error When Trying to Add a Watch - -If you get the _Dynamic Mapping is Disabled_ error when you try to add a watch, -verify that the index mappings for the `.watches` index are available. You can -do that by submitting the following request: - -[source,js] --------------------------------------------------- -GET .watches/_mapping --------------------------------------------------- -// CONSOLE -// TEST[setup:my_active_watch] - -If the index mappings are missing, follow these steps to restore the correct -mappings: - -. Stop the Elasticsearch node. -. Add `xpack.watcher.index.rest.direct_access : true` to `elasticsearch.yml`. -. Restart the Elasticsearch node. -. Delete the `.watches` index: -+ -[source,js] --------------------------------------------------- -DELETE .watches --------------------------------------------------- -// CONSOLE -// TEST[skip:index deletion] -+ -. Disable direct access to the `.watches` index: -.. Stop the Elasticsearch node. -.. Remove `xpack.watcher.index.rest.direct_access : true` from `elasticsearch.yml`. -.. Restart the Elasticsearch node. - -[float] -=== Unable to Send Email - -If you get an authentication error indicating that you need to continue the -sign-in process from a web browser when Watcher attempts to send email, you need -to configure Gmail to -https://support.google.com/accounts/answer/6010255?hl=en[Allow Less Secure Apps to access your account]. - -If you have two-step verification enabled for your email account, you must -generate and use an App Specific password to send email from {watcher}. For more -information, see: - -- Gmail: https://support.google.com/accounts/answer/185833?hl=en[Sign in using App Passwords] -- Outlook.com: http://windows.microsoft.com/en-us/windows/app-passwords-two-step-verification[App passwords and two-step verification] - -[float] -=== {watcher} Not Responsive - -Keep in mind that there's no built-in validation of scripts that you add to a -watch. Buggy or deliberately malicious scripts can negatively impact {watcher} -performance. For example, if you add multiple watches with buggy script -conditions in a short period of time, {watcher} might be temporarily unable to -process watches until the bad watches time out. diff --git a/x-pack/license-tools/build.gradle b/x-pack/license-tools/build.gradle index 183b9ab50e0..4bd17713a2f 100644 --- a/x-pack/license-tools/build.gradle +++ b/x-pack/license-tools/build.gradle @@ -1,7 +1,7 @@ apply plugin: 'elasticsearch.build' dependencies { - compile project(path: xpackModule('core'), configuration: 'shadow') + compile "org.elasticsearch.plugin:x-pack-core:${version}" compile "org.elasticsearch:elasticsearch:${version}" testCompile "org.elasticsearch.test:framework:${version}" } diff --git a/x-pack/plugin/ccr/build.gradle b/x-pack/plugin/ccr/build.gradle new file mode 100644 index 00000000000..0b1f889a2c1 --- /dev/null +++ b/x-pack/plugin/ccr/build.gradle @@ -0,0 +1,60 @@ +import com.carrotsearch.gradle.junit4.RandomizedTestingTask +import org.elasticsearch.gradle.BuildPlugin + +evaluationDependsOn(xpackModule('core')) + +apply plugin: 'elasticsearch.esplugin' +esplugin { + name 'x-pack-ccr' + description 'Elasticsearch Expanded Pack Plugin - CCR' + classname 'org.elasticsearch.xpack.ccr.Ccr' + hasNativeController false + requiresKeystore true + extendedPlugins = ['x-pack-core'] +} +archivesBaseName = 'x-pack-ccr' + +integTest.enabled = false + +compileJava.options.compilerArgs << "-Xlint:-try" +compileTestJava.options.compilerArgs << "-Xlint:-try" + +// Instead we create a separate task to run the +// tests based on ESIntegTestCase +task internalClusterTest(type: RandomizedTestingTask, + group: JavaBasePlugin.VERIFICATION_GROUP, + description: 'Java fantasy integration tests', + dependsOn: test.dependsOn) { + configure(BuildPlugin.commonTestConfig(project)) + classpath = project.test.classpath + testClassesDirs = project.test.testClassesDirs + include '**/*IT.class' + systemProperty 'es.set.netty.runtime.available.processors', 'false' +} + +check.dependsOn internalClusterTest +internalClusterTest.mustRunAfter test + +// add all sub-projects of the qa sub-project +gradle.projectsEvaluated { + project.subprojects + .find { it.path == project.path + ":qa" } + .subprojects + .findAll { it.path.startsWith(project.path + ":qa") } + .each { check.dependsOn it.check } +} + +dependencies { + compileOnly "org.elasticsearch:elasticsearch:${version}" + + compileOnly project(path: xpackModule('core'), configuration: 'default') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') +} + +dependencyLicenses { + ignoreSha 'x-pack-core' +} + +run { + plugin xpackModule('core') +} diff --git a/x-pack/plugin/ccr/qa/build.gradle b/x-pack/plugin/ccr/qa/build.gradle new file mode 100644 index 00000000000..ef03d968209 --- /dev/null +++ b/x-pack/plugin/ccr/qa/build.gradle @@ -0,0 +1,11 @@ + +/* Remove assemble on all qa projects because we don't need to publish + * artifacts for them. */ +gradle.projectsEvaluated { + subprojects { + Task assemble = project.tasks.findByName('assemble') + if (assemble) { + assemble.enabled = false + } + } +} diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/build.gradle new file mode 100644 index 00000000000..845c9df533d --- /dev/null +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/build.gradle @@ -0,0 +1,59 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('ccr'), configuration: 'runtime') +} + +task leaderClusterTest(type: RestIntegTestTask) { + mustRunAfter(precommit) +} + +leaderClusterTestCluster { + numNodes = 1 + clusterName = 'leader-cluster' +} + +leaderClusterTestRunner { + systemProperty 'tests.is_leader_cluster', 'true' +} + +task writeJavaPolicy { + doLast { + final File tmp = file("${buildDir}/tmp") + if (tmp.exists() == false && tmp.mkdirs() == false) { + throw new GradleException("failed to create temporary directory [${tmp}]") + } + final File javaPolicy = file("${tmp}/java.policy") + javaPolicy.write( + [ + "grant {", + " permission java.io.FilePermission \"${-> followClusterTest.getNodes().get(0).homeDir}/logs/${-> followClusterTest.getNodes().get(0).clusterName}.log\", \"read\";", + "};" + ].join("\n")) + } +} + +task followClusterTest(type: RestIntegTestTask) {} +followClusterTest.dependsOn writeJavaPolicy + +followClusterTestCluster { + dependsOn leaderClusterTestRunner + numNodes = 1 + clusterName = 'follow-cluster' + setting 'xpack.license.self_generated.type', 'trial' + setting 'cluster.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\"" +} + +followClusterTestRunner { + systemProperty 'java.security.policy', "file://${buildDir}/tmp/java.policy" + systemProperty 'tests.is_leader_cluster', 'false' + systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + systemProperty 'log', "${-> followClusterTest.getNodes().get(0).homeDir}/logs/${-> followClusterTest.getNodes().get(0).clusterName}.log" + finalizedBy 'leaderClusterTestCluster#stop' +} + +check.dependsOn followClusterTest +test.enabled = false diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/src/test/java/org/elasticsearch/xpack/ccr/CcrMultiClusterLicenseIT.java b/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/src/test/java/org/elasticsearch/xpack/ccr/CcrMultiClusterLicenseIT.java new file mode 100644 index 00000000000..7bc952a3ea8 --- /dev/null +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/src/test/java/org/elasticsearch/xpack/ccr/CcrMultiClusterLicenseIT.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.apache.lucene.util.Constants; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.nio.file.Files; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; + +public class CcrMultiClusterLicenseIT extends ESRestTestCase { + + private final boolean runningAgainstLeaderCluster = Booleans.parseBoolean(System.getProperty("tests.is_leader_cluster")); + + @Override + protected boolean preserveClusterUponCompletion() { + return true; + } + + public void testFollowIndex() { + if (runningAgainstLeaderCluster == false) { + final Request request = new Request("POST", "/follower/_ccr/follow"); + request.setJsonEntity("{\"leader_index\": \"leader_cluster:leader\"}"); + assertNonCompliantLicense(request); + } + } + + public void testCreateAndFollowIndex() { + if (runningAgainstLeaderCluster == false) { + final Request request = new Request("POST", "/follower/_ccr/create_and_follow"); + request.setJsonEntity("{\"leader_index\": \"leader_cluster:leader\"}"); + assertNonCompliantLicense(request); + } + } + + public void testAutoFollow() throws Exception { + assumeFalse("windows is the worst", Constants.WINDOWS); + if (runningAgainstLeaderCluster == false) { + final Request request = new Request("PUT", "/_ccr/auto_follow/leader_cluster"); + request.setJsonEntity("{\"leader_index_patterns\":[\"*\"]}"); + client().performRequest(request); + + // parse the logs and ensure that the auto-coordinator skipped coordination on the leader cluster + assertBusy(() -> { + final List lines = Files.readAllLines(PathUtils.get(System.getProperty("log"))); + + final Iterator it = lines.iterator(); + + boolean warn = false; + while (it.hasNext()) { + final String line = it.next(); + if (line.matches(".*\\[WARN\\s*\\]\\[o\\.e\\.x\\.c\\.a\\.AutoFollowCoordinator\\s*\\] \\[node-0\\] " + + "failure occurred during auto-follower coordination")) { + warn = true; + break; + } + } + assertTrue(warn); + assertTrue(it.hasNext()); + final String lineAfterWarn = it.next(); + assertThat( + lineAfterWarn, + equalTo("org.elasticsearch.ElasticsearchStatusException: " + + "can not fetch remote cluster state as the remote cluster [leader_cluster] is not licensed for [ccr]; " + + "the license mode [BASIC] on cluster [leader_cluster] does not enable [ccr]")); + }); + } + } + + private static void assertNonCompliantLicense(final Request request) { + final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + final String expected = String.format( + Locale.ROOT, + "can not fetch remote index [%s] metadata as the remote cluster [%s] is not licensed for [ccr]; " + + "the license mode [BASIC] on cluster [%s] does not enable [ccr]", + "leader_cluster:leader", + "leader_cluster", + "leader_cluster"); + assertThat(e, hasToString(containsString(expected))); + } + +} diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle new file mode 100644 index 00000000000..d4fe9ee554c --- /dev/null +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle @@ -0,0 +1,75 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('ccr'), configuration: 'runtime') +} + +task leaderClusterTest(type: RestIntegTestTask) { + mustRunAfter(precommit) +} + +leaderClusterTestCluster { + numNodes = 1 + clusterName = 'leader-cluster' + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'true' + setting 'xpack.monitoring.enabled', 'false' + extraConfigFile 'roles.yml', 'roles.yml' + setupCommand 'setupTestAdmin', + 'bin/elasticsearch-users', 'useradd', "test_admin", '-p', 'x-pack-test-password', '-r', "superuser" + setupCommand 'setupCcrUser', + 'bin/elasticsearch-users', 'useradd', "test_ccr", '-p', 'x-pack-test-password', '-r', "manage_ccr" + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'test_admin', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} + +leaderClusterTestRunner { + systemProperty 'tests.is_leader_cluster', 'true' +} + +task followClusterTest(type: RestIntegTestTask) {} + +followClusterTestCluster { + dependsOn leaderClusterTestRunner + numNodes = 1 + clusterName = 'follow-cluster' + setting 'cluster.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\"" + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'true' + setting 'xpack.monitoring.enabled', 'false' + extraConfigFile 'roles.yml', 'roles.yml' + setupCommand 'setupTestAdmin', + 'bin/elasticsearch-users', 'useradd', "test_admin", '-p', 'x-pack-test-password', '-r', "superuser" + setupCommand 'setupCcrUser', + 'bin/elasticsearch-users', 'useradd', "test_ccr", '-p', 'x-pack-test-password', '-r', "ccruser" + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'test_admin', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} + +followClusterTestRunner { + systemProperty 'tests.is_leader_cluster', 'false' + systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + finalizedBy 'leaderClusterTestCluster#stop' +} + +check.dependsOn followClusterTest +test.enabled = false // no unit tests for multi-cluster-search, only the rest integration test diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-security/roles.yml b/x-pack/plugin/ccr/qa/multi-cluster-with-security/roles.yml new file mode 100644 index 00000000000..7916bc6eee2 --- /dev/null +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-security/roles.yml @@ -0,0 +1,10 @@ +ccruser: + cluster: + - manage_ccr + indices: + - names: [ 'allowed-index' ] + privileges: + - monitor + - read + - write + - create_follow_index diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java b/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java new file mode 100644 index 00000000000..d8357a74e8e --- /dev/null +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java @@ -0,0 +1,206 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class FollowIndexSecurityIT extends ESRestTestCase { + + private final boolean runningAgainstLeaderCluster = Booleans.parseBoolean(System.getProperty("tests.is_leader_cluster")); + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue("test_ccr", new SecureString("x-pack-test-password".toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue("test_admin", new SecureString("x-pack-test-password".toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @Override + protected boolean preserveClusterUponCompletion() { + return true; + } + + public void testFollowIndex() throws Exception { + final int numDocs = 16; + final String allowedIndex = "allowed-index"; + final String unallowedIndex = "unallowed-index"; + if (runningAgainstLeaderCluster) { + logger.info("Running against leader cluster"); + Settings indexSettings = Settings.builder().put("index.soft_deletes.enabled", true).build(); + createIndex(allowedIndex, indexSettings); + createIndex(unallowedIndex, indexSettings); + for (int i = 0; i < numDocs; i++) { + logger.info("Indexing doc [{}]", i); + index(allowedIndex, Integer.toString(i), "field", i); + } + for (int i = 0; i < numDocs; i++) { + logger.info("Indexing doc [{}]", i); + index(unallowedIndex, Integer.toString(i), "field", i); + } + refresh(allowedIndex); + verifyDocuments(adminClient(), allowedIndex, numDocs); + } else { + createAndFollowIndex("leader_cluster:" + allowedIndex, allowedIndex); + assertBusy(() -> verifyDocuments(client(), allowedIndex, numDocs)); + assertThat(countCcrNodeTasks(), equalTo(1)); + assertOK(client().performRequest(new Request("POST", "/" + allowedIndex + "/_ccr/unfollow"))); + // Make sure that there are no other ccr relates operations running: + assertBusy(() -> { + Map clusterState = toMap(adminClient().performRequest(new Request("GET", "/_cluster/state"))); + List tasks = (List) XContentMapValues.extractValue("metadata.persistent_tasks.tasks", clusterState); + assertThat(tasks.size(), equalTo(0)); + assertThat(countCcrNodeTasks(), equalTo(0)); + }); + + followIndex("leader_cluster:" + allowedIndex, allowedIndex); + assertThat(countCcrNodeTasks(), equalTo(1)); + assertOK(client().performRequest(new Request("POST", "/" + allowedIndex + "/_ccr/unfollow"))); + // Make sure that there are no other ccr relates operations running: + assertBusy(() -> { + Map clusterState = toMap(adminClient().performRequest(new Request("GET", "/_cluster/state"))); + List tasks = (List) XContentMapValues.extractValue("metadata.persistent_tasks.tasks", clusterState); + assertThat(tasks.size(), equalTo(0)); + assertThat(countCcrNodeTasks(), equalTo(0)); + }); + + Exception e = expectThrows(ResponseException.class, + () -> createAndFollowIndex("leader_cluster:" + unallowedIndex, unallowedIndex)); + assertThat(e.getMessage(), + containsString("action [indices:admin/xpack/ccr/create_and_follow_index] is unauthorized for user [test_ccr]")); + // Verify that the follow index has not been created and no node tasks are running + assertThat(indexExists(adminClient(), unallowedIndex), is(false)); + assertBusy(() -> assertThat(countCcrNodeTasks(), equalTo(0))); + + e = expectThrows(ResponseException.class, + () -> followIndex("leader_cluster:" + unallowedIndex, unallowedIndex)); + assertThat(e.getMessage(), containsString("follow index [" + unallowedIndex + "] does not exist")); + assertThat(indexExists(adminClient(), unallowedIndex), is(false)); + assertBusy(() -> assertThat(countCcrNodeTasks(), equalTo(0))); + } + } + + private int countCcrNodeTasks() throws IOException { + final Request request = new Request("GET", "/_tasks"); + request.addParameter("detailed", "true"); + Map rsp1 = toMap(adminClient().performRequest(request)); + Map nodes = (Map) rsp1.get("nodes"); + assertThat(nodes.size(), equalTo(1)); + Map node = (Map) nodes.values().iterator().next(); + Map nodeTasks = (Map) node.get("tasks"); + int numNodeTasks = 0; + for (Map.Entry entry : nodeTasks.entrySet()) { + Map nodeTask = (Map) entry.getValue(); + String action = (String) nodeTask.get("action"); + if (action.startsWith("xpack/ccr/shard_follow_task")) { + numNodeTasks++; + } + } + return numNodeTasks; + } + + private static void index(String index, String id, Object... fields) throws IOException { + XContentBuilder document = jsonBuilder().startObject(); + for (int i = 0; i < fields.length; i += 2) { + document.field((String) fields[i], fields[i + 1]); + } + document.endObject(); + final Request request = new Request("POST", "/" + index + "/_doc/" + id); + request.setJsonEntity(Strings.toString(document)); + assertOK(adminClient().performRequest(request)); + } + + private static void refresh(String index) throws IOException { + assertOK(adminClient().performRequest(new Request("POST", "/" + index + "/_refresh"))); + } + + private static void followIndex(String leaderIndex, String followIndex) throws IOException { + final Request request = new Request("POST", "/" + followIndex + "/_ccr/follow"); + request.setJsonEntity("{\"leader_index\": \"" + leaderIndex + "\", \"idle_shard_retry_delay\": \"10ms\"}"); + assertOK(client().performRequest(request)); + } + + private static void createAndFollowIndex(String leaderIndex, String followIndex) throws IOException { + final Request request = new Request("POST", "/" + followIndex + "/_ccr/create_and_follow"); + request.setJsonEntity("{\"leader_index\": \"" + leaderIndex + "\", \"idle_shard_retry_delay\": \"10ms\"}"); + assertOK(client().performRequest(request)); + } + + void verifyDocuments(RestClient client, String index, int expectedNumDocs) throws IOException { + final Request request = new Request("GET", "/" + index + "/_search"); + request.addParameter("pretty", "true"); + request.addParameter("size", Integer.toString(expectedNumDocs)); + request.addParameter("sort", "field:asc"); + Map response = toMap(client.performRequest(request)); + + int numDocs = (int) XContentMapValues.extractValue("hits.total", response); + assertThat(numDocs, equalTo(expectedNumDocs)); + + List hits = (List) XContentMapValues.extractValue("hits.hits", response); + assertThat(hits.size(), equalTo(expectedNumDocs)); + for (int i = 0; i < expectedNumDocs; i++) { + int value = (int) XContentMapValues.extractValue("_source.field", (Map) hits.get(i)); + assertThat(i, equalTo(value)); + } + } + + private static Map toMap(Response response) throws IOException { + return toMap(EntityUtils.toString(response.getEntity())); + } + + private static Map toMap(String response) { + return XContentHelper.convertToMap(JsonXContent.jsonXContent, response, false); + } + + protected static void createIndex(String name, Settings settings) throws IOException { + createIndex(name, settings, ""); + } + + protected static void createIndex(String name, Settings settings, String mapping) throws IOException { + final Request request = new Request("PUT", "/" + name); + request.setJsonEntity("{ \"settings\": " + Strings.toString(settings) + ", \"mappings\" : {" + mapping + "} }"); + assertOK(adminClient().performRequest(request)); + } + + private static boolean indexExists(RestClient client, String index) throws IOException { + Response response = client.performRequest(new Request("HEAD", "/" + index)); + return RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode(); + } + +} diff --git a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle new file mode 100644 index 00000000000..396c247af40 --- /dev/null +++ b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle @@ -0,0 +1,41 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('ccr'), configuration: 'runtime') +} + +task leaderClusterTest(type: RestIntegTestTask) { + mustRunAfter(precommit) +} + +leaderClusterTestCluster { + numNodes = 1 + clusterName = 'leader-cluster' + setting 'xpack.license.self_generated.type', 'trial' +} + +leaderClusterTestRunner { + systemProperty 'tests.is_leader_cluster', 'true' +} + +task followClusterTest(type: RestIntegTestTask) {} + +followClusterTestCluster { + dependsOn leaderClusterTestRunner + numNodes = 1 + clusterName = 'follow-cluster' + setting 'xpack.license.self_generated.type', 'trial' + setting 'cluster.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\"" +} + +followClusterTestRunner { + systemProperty 'tests.is_leader_cluster', 'false' + systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + finalizedBy 'leaderClusterTestCluster#stop' +} + +check.dependsOn followClusterTest +test.enabled = false // no unit tests for multi-cluster-search, only the rest integration test diff --git a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java new file mode 100644 index 00000000000..76d0e438135 --- /dev/null +++ b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -0,0 +1,184 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr; + +import org.apache.http.HttpHost; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; + +public class FollowIndexIT extends ESRestTestCase { + + private final boolean runningAgainstLeaderCluster = Booleans.parseBoolean(System.getProperty("tests.is_leader_cluster")); + + @Override + protected boolean preserveClusterUponCompletion() { + return true; + } + + public void testFollowIndex() throws Exception { + final int numDocs = 128; + final String leaderIndexName = "test_index1"; + if (runningAgainstLeaderCluster) { + logger.info("Running against leader cluster"); + String mapping = ""; + if (randomBoolean()) { // randomly do source filtering on indexing + mapping = + "\"_doc\": {" + + " \"_source\": {" + + " \"includes\": [\"field\"]," + + " \"excludes\": [\"filtered_field\"]" + + " }"+ + "}"; + } + Settings indexSettings = Settings.builder() + .put("index.soft_deletes.enabled", true) + .build(); + createIndex(leaderIndexName, indexSettings, mapping); + for (int i = 0; i < numDocs; i++) { + logger.info("Indexing doc [{}]", i); + index(client(), leaderIndexName, Integer.toString(i), "field", i, "filtered_field", "true"); + } + refresh(leaderIndexName); + verifyDocuments(leaderIndexName, numDocs); + } else { + logger.info("Running against follow cluster"); + final String followIndexName = "test_index2"; + createAndFollowIndex("leader_cluster:" + leaderIndexName, followIndexName); + assertBusy(() -> verifyDocuments(followIndexName, numDocs)); + // unfollow and then follow and then index a few docs in leader index: + unfollowIndex(followIndexName); + followIndex("leader_cluster:" + leaderIndexName, followIndexName); + try (RestClient leaderClient = buildLeaderClient()) { + int id = numDocs; + index(leaderClient, leaderIndexName, Integer.toString(id), "field", id, "filtered_field", "true"); + index(leaderClient, leaderIndexName, Integer.toString(id + 1), "field", id + 1, "filtered_field", "true"); + index(leaderClient, leaderIndexName, Integer.toString(id + 2), "field", id + 2, "filtered_field", "true"); + } + assertBusy(() -> verifyDocuments(followIndexName, numDocs + 3)); + } + } + + public void testAutoFollowPatterns() throws Exception { + assumeFalse("Test should only run when both clusters are running", runningAgainstLeaderCluster); + + Request request = new Request("PUT", "/_ccr/auto_follow/leader_cluster"); + request.setJsonEntity("{\"leader_index_patterns\": [\"logs-*\"]}"); + assertOK(client().performRequest(request)); + + try (RestClient leaderClient = buildLeaderClient()) { + Settings settings = Settings.builder() + .put("index.soft_deletes.enabled", true) + .build(); + request = new Request("PUT", "/logs-20190101"); + request.setJsonEntity("{\"settings\": " + Strings.toString(settings) + + ", \"mappings\": {\"_doc\": {\"properties\": {\"field\": {\"type\": \"keyword\"}}}} }"); + assertOK(leaderClient.performRequest(request)); + + for (int i = 0; i < 5; i++) { + String id = Integer.toString(i); + index(leaderClient, "logs-20190101", id, "field", i, "filtered_field", "true"); + } + } + + assertBusy(() -> { + ensureYellow("logs-20190101"); + verifyDocuments("logs-20190101", 5); + }); + } + + private static void index(RestClient client, String index, String id, Object... fields) throws IOException { + XContentBuilder document = jsonBuilder().startObject(); + for (int i = 0; i < fields.length; i += 2) { + document.field((String) fields[i], fields[i + 1]); + } + document.endObject(); + final Request request = new Request("POST", "/" + index + "/_doc/" + id); + request.setJsonEntity(Strings.toString(document)); + assertOK(client.performRequest(request)); + } + + private static void refresh(String index) throws IOException { + assertOK(client().performRequest(new Request("POST", "/" + index + "/_refresh"))); + } + + private static void followIndex(String leaderIndex, String followIndex) throws IOException { + final Request request = new Request("POST", "/" + followIndex + "/_ccr/follow"); + request.setJsonEntity("{\"leader_index\": \"" + leaderIndex + "\", \"idle_shard_retry_delay\": \"10ms\"}"); + assertOK(client().performRequest(request)); + } + + private static void createAndFollowIndex(String leaderIndex, String followIndex) throws IOException { + final Request request = new Request("POST", "/" + followIndex + "/_ccr/create_and_follow"); + request.setJsonEntity("{\"leader_index\": \"" + leaderIndex + "\", \"idle_shard_retry_delay\": \"10ms\"}"); + assertOK(client().performRequest(request)); + } + + private static void unfollowIndex(String followIndex) throws IOException { + assertOK(client().performRequest(new Request("POST", "/" + followIndex + "/_ccr/unfollow"))); + } + + private static void verifyDocuments(String index, int expectedNumDocs) throws IOException { + final Request request = new Request("GET", "/" + index + "/_search"); + request.addParameter("size", Integer.toString(expectedNumDocs)); + request.addParameter("sort", "field:asc"); + request.addParameter("q", "filtered_field:true"); + Map response = toMap(client().performRequest(request)); + + int numDocs = (int) XContentMapValues.extractValue("hits.total", response); + assertThat(numDocs, equalTo(expectedNumDocs)); + + List hits = (List) XContentMapValues.extractValue("hits.hits", response); + assertThat(hits.size(), equalTo(expectedNumDocs)); + for (int i = 0; i < expectedNumDocs; i++) { + int value = (int) XContentMapValues.extractValue("_source.field", (Map) hits.get(i)); + assertThat(i, equalTo(value)); + } + } + + private static Map toMap(Response response) throws IOException { + return toMap(EntityUtils.toString(response.getEntity())); + } + + private static Map toMap(String response) { + return XContentHelper.convertToMap(JsonXContent.jsonXContent, response, false); + } + + private static void ensureYellow(String index) throws IOException { + Request request = new Request("GET", "/_cluster/health/" + index); + request.addParameter("wait_for_status", "yellow"); + request.addParameter("wait_for_no_relocating_shards", "true"); + request.addParameter("timeout", "70s"); + request.addParameter("level", "shards"); + client().performRequest(request); + } + + private RestClient buildLeaderClient() throws IOException { + assert runningAgainstLeaderCluster == false; + String leaderUrl = System.getProperty("tests.leader_host"); + int portSeparator = leaderUrl.lastIndexOf(':'); + HttpHost httpHost = new HttpHost(leaderUrl.substring(0, portSeparator), + Integer.parseInt(leaderUrl.substring(portSeparator + 1)), getProtocol()); + return buildClient(Settings.EMPTY, new HttpHost[]{httpHost}); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java new file mode 100644 index 00000000000..353a66db263 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -0,0 +1,236 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.EnginePlugin; +import org.elasticsearch.plugins.PersistentTaskPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ExecutorBuilder; +import org.elasticsearch.threadpool.FixedExecutorBuilder; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator; +import org.elasticsearch.xpack.ccr.action.CcrStatsAction; +import org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction; +import org.elasticsearch.xpack.ccr.action.DeleteAutoFollowPatternAction; +import org.elasticsearch.xpack.ccr.action.FollowIndexAction; +import org.elasticsearch.xpack.ccr.action.PutAutoFollowPatternAction; +import org.elasticsearch.xpack.ccr.action.ShardChangesAction; +import org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask; +import org.elasticsearch.xpack.ccr.action.ShardFollowTask; +import org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor; +import org.elasticsearch.xpack.ccr.action.TransportCcrStatsAction; +import org.elasticsearch.xpack.ccr.action.TransportDeleteAutoFollowPatternAction; +import org.elasticsearch.xpack.ccr.action.TransportPutAutoFollowPatternAction; +import org.elasticsearch.xpack.ccr.action.UnfollowIndexAction; +import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsAction; +import org.elasticsearch.xpack.ccr.action.bulk.TransportBulkShardOperationsAction; +import org.elasticsearch.xpack.ccr.index.engine.FollowingEngineFactory; +import org.elasticsearch.xpack.ccr.rest.RestCcrStatsAction; +import org.elasticsearch.xpack.ccr.rest.RestCreateAndFollowIndexAction; +import org.elasticsearch.xpack.ccr.rest.RestDeleteAutoFollowPatternAction; +import org.elasticsearch.xpack.ccr.rest.RestFollowIndexAction; +import org.elasticsearch.xpack.ccr.rest.RestPutAutoFollowPatternAction; +import org.elasticsearch.xpack.ccr.rest.RestUnfollowIndexAction; +import org.elasticsearch.xpack.core.XPackPlugin; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.function.Supplier; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.xpack.ccr.CcrSettings.CCR_ENABLED_SETTING; +import static org.elasticsearch.xpack.ccr.CcrSettings.CCR_FOLLOWING_INDEX_SETTING; + +/** + * Container class for CCR functionality. + */ +public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, EnginePlugin { + + public static final String CCR_THREAD_POOL_NAME = "ccr"; + + private final boolean enabled; + private final Settings settings; + private final CcrLicenseChecker ccrLicenseChecker; + + /** + * Construct an instance of the CCR container with the specified settings. + * + * @param settings the settings + */ + @SuppressWarnings("unused") // constructed reflectively by the plugin infrastructure + public Ccr(final Settings settings) { + this(settings, new CcrLicenseChecker()); + } + + /** + * Construct an instance of the CCR container with the specified settings and license checker. + * + * @param settings the settings + * @param ccrLicenseChecker the CCR license checker + */ + Ccr(final Settings settings, final CcrLicenseChecker ccrLicenseChecker) { + this.settings = settings; + this.enabled = CCR_ENABLED_SETTING.get(settings); + this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); + } + + @Override + public Collection createComponents( + final Client client, + final ClusterService clusterService, + final ThreadPool threadPool, + final ResourceWatcherService resourceWatcherService, + final ScriptService scriptService, + final NamedXContentRegistry xContentRegistry, + final Environment environment, + final NodeEnvironment nodeEnvironment, + final NamedWriteableRegistry namedWriteableRegistry) { + if (enabled == false) { + return emptyList(); + } + + return Arrays.asList( + ccrLicenseChecker, + new AutoFollowCoordinator(settings, client, threadPool, clusterService, ccrLicenseChecker) + ); + } + + @Override + public List> getPersistentTasksExecutor(ClusterService clusterService, + ThreadPool threadPool, Client client) { + return Collections.singletonList(new ShardFollowTasksExecutor(settings, client, threadPool)); + } + + public List> getActions() { + if (enabled == false) { + return emptyList(); + } + + return Arrays.asList( + // internal actions + new ActionHandler<>(BulkShardOperationsAction.INSTANCE, TransportBulkShardOperationsAction.class), + new ActionHandler<>(ShardChangesAction.INSTANCE, ShardChangesAction.TransportAction.class), + // stats action + new ActionHandler<>(CcrStatsAction.INSTANCE, TransportCcrStatsAction.class), + // follow actions + new ActionHandler<>(CreateAndFollowIndexAction.INSTANCE, CreateAndFollowIndexAction.TransportAction.class), + new ActionHandler<>(FollowIndexAction.INSTANCE, FollowIndexAction.TransportAction.class), + new ActionHandler<>(UnfollowIndexAction.INSTANCE, UnfollowIndexAction.TransportAction.class), + // auto-follow actions + new ActionHandler<>(DeleteAutoFollowPatternAction.INSTANCE, TransportDeleteAutoFollowPatternAction.class), + new ActionHandler<>(PutAutoFollowPatternAction.INSTANCE, TransportPutAutoFollowPatternAction.class)); + } + + public List getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster) { + return Arrays.asList( + // stats API + new RestCcrStatsAction(settings, restController), + // follow APIs + new RestCreateAndFollowIndexAction(settings, restController), + new RestFollowIndexAction(settings, restController), + new RestUnfollowIndexAction(settings, restController), + // auto-follow APIs + new RestDeleteAutoFollowPatternAction(settings, restController), + new RestPutAutoFollowPatternAction(settings, restController)); + } + + public List getNamedWriteables() { + return Arrays.asList( + // Persistent action requests + new NamedWriteableRegistry.Entry(PersistentTaskParams.class, ShardFollowTask.NAME, + ShardFollowTask::new), + + // Task statuses + new NamedWriteableRegistry.Entry(Task.Status.class, ShardFollowNodeTask.Status.STATUS_PARSER_NAME, + ShardFollowNodeTask.Status::new) + ); + } + + public List getNamedXContent() { + return Arrays.asList( + // Persistent action requests + new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(ShardFollowTask.NAME), + ShardFollowTask::fromXContent), + + // Task statuses + new NamedXContentRegistry.Entry( + ShardFollowNodeTask.Status.class, + new ParseField(ShardFollowNodeTask.Status.STATUS_PARSER_NAME), + ShardFollowNodeTask.Status::fromXContent)); + } + + /** + * The settings defined by CCR. + * + * @return the settings + */ + public List> getSettings() { + return CcrSettings.getSettings(); + } + + /** + * The optional engine factory for CCR. This method inspects the index settings for the {@link CcrSettings#CCR_FOLLOWING_INDEX_SETTING} + * setting to determine whether or not the engine implementation should be a following engine. + * + * @return the optional engine factory + */ + public Optional getEngineFactory(final IndexSettings indexSettings) { + if (CCR_FOLLOWING_INDEX_SETTING.get(indexSettings.getSettings())) { + return Optional.of(new FollowingEngineFactory()); + } else { + return Optional.empty(); + } + } + + public List> getExecutorBuilders(Settings settings) { + if (enabled == false) { + return Collections.emptyList(); + } + + FixedExecutorBuilder ccrTp = new FixedExecutorBuilder(settings, CCR_THREAD_POOL_NAME, + 32, 100, "xpack.ccr.ccr_thread_pool"); + + return Collections.singletonList(ccrTp); + } + + protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java new file mode 100644 index 00000000000..f9a5d8fe830 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java @@ -0,0 +1,219 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.license.RemoteClusterLicenseChecker; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.XPackPlugin; + +import java.util.Collections; +import java.util.Locale; +import java.util.Objects; +import java.util.function.BooleanSupplier; +import java.util.function.Consumer; +import java.util.function.Function; + +/** + * Encapsulates licensing checking for CCR. + */ +public final class CcrLicenseChecker { + + private final BooleanSupplier isCcrAllowed; + + /** + * Constructs a CCR license checker with the default rule based on the license state for checking if CCR is allowed. + */ + CcrLicenseChecker() { + this(XPackPlugin.getSharedLicenseState()::isCcrAllowed); + } + + /** + * Constructs a CCR license checker with the specified boolean supplier. + * + * @param isCcrAllowed a boolean supplier that should return true if CCR is allowed and false otherwise + */ + CcrLicenseChecker(final BooleanSupplier isCcrAllowed) { + this.isCcrAllowed = Objects.requireNonNull(isCcrAllowed); + } + + /** + * Returns whether or not CCR is allowed. + * + * @return true if CCR is allowed, otherwise false + */ + public boolean isCcrAllowed() { + return isCcrAllowed.getAsBoolean(); + } + + /** + * Fetches the leader index metadata from the remote cluster. Before fetching the index metadata, the remote cluster is checked for + * license compatibility with CCR. If the remote cluster is not licensed for CCR, the {@code onFailure} consumer is is invoked. + * Otherwise, the specified consumer is invoked with the leader index metadata fetched from the remote cluster. + * + * @param client the client + * @param clusterAlias the remote cluster alias + * @param leaderIndex the name of the leader index + * @param onFailure the failure consumer + * @param leaderIndexMetadataConsumer the leader index metadata consumer + * @param the type of response the listener is waiting for + */ + public void checkRemoteClusterLicenseAndFetchLeaderIndexMetadata( + final Client client, + final String clusterAlias, + final String leaderIndex, + final Consumer onFailure, + final Consumer leaderIndexMetadataConsumer) { + + final ClusterStateRequest request = new ClusterStateRequest(); + request.clear(); + request.metaData(true); + request.indices(leaderIndex); + checkRemoteClusterLicenseAndFetchClusterState( + client, + clusterAlias, + request, + onFailure, + leaderClusterState -> leaderIndexMetadataConsumer.accept(leaderClusterState.getMetaData().index(leaderIndex)), + licenseCheck -> indexMetadataNonCompliantRemoteLicense(leaderIndex, licenseCheck), + e -> indexMetadataUnknownRemoteLicense(leaderIndex, clusterAlias, e)); + } + + /** + * Fetches the leader cluster state from the remote cluster by the specified cluster state request. Before fetching the cluster state, + * the remote cluster is checked for license compliance with CCR. If the remote cluster is not licensed for CCR, + * the {@code onFailure} consumer is invoked. Otherwise, the specified consumer is invoked with the leader cluster state fetched from + * the remote cluster. + * + * @param client the client + * @param clusterAlias the remote cluster alias + * @param request the cluster state request + * @param onFailure the failure consumer + * @param leaderClusterStateConsumer the leader cluster state consumer + * @param the type of response the listener is waiting for + */ + public void checkRemoteClusterLicenseAndFetchClusterState( + final Client client, + final String clusterAlias, + final ClusterStateRequest request, + final Consumer onFailure, + final Consumer leaderClusterStateConsumer) { + checkRemoteClusterLicenseAndFetchClusterState( + client, + clusterAlias, + request, + onFailure, + leaderClusterStateConsumer, + CcrLicenseChecker::clusterStateNonCompliantRemoteLicense, + e -> clusterStateUnknownRemoteLicense(clusterAlias, e)); + } + + /** + * Fetches the leader cluster state from the remote cluster by the specified cluster state request. Before fetching the cluster state, + * the remote cluster is checked for license compliance with CCR. If the remote cluster is not licensed for CCR, + * the {@code onFailure} consumer is invoked. Otherwise, the specified consumer is invoked with the leader cluster state fetched from + * the remote cluster. + * + * @param client the client + * @param clusterAlias the remote cluster alias + * @param request the cluster state request + * @param onFailure the failure consumer + * @param leaderClusterStateConsumer the leader cluster state consumer + * @param nonCompliantLicense the supplier for when the license state of the remote cluster is non-compliant + * @param unknownLicense the supplier for when the license state of the remote cluster is unknown due to failure + * @param the type of response the listener is waiting for + */ + private void checkRemoteClusterLicenseAndFetchClusterState( + final Client client, + final String clusterAlias, + final ClusterStateRequest request, + final Consumer onFailure, + final Consumer leaderClusterStateConsumer, + final Function nonCompliantLicense, + final Function unknownLicense) { + // we have to check the license on the remote cluster + new RemoteClusterLicenseChecker(client, XPackLicenseState::isCcrAllowedForOperationMode).checkRemoteClusterLicenses( + Collections.singletonList(clusterAlias), + new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck licenseCheck) { + if (licenseCheck.isSuccess()) { + final Client leaderClient = client.getRemoteClusterClient(clusterAlias); + final ActionListener clusterStateListener = + ActionListener.wrap(s -> leaderClusterStateConsumer.accept(s.getState()), onFailure); + // following an index in remote cluster, so use remote client to fetch leader index metadata + leaderClient.admin().cluster().state(request, clusterStateListener); + } else { + onFailure.accept(nonCompliantLicense.apply(licenseCheck)); + } + } + + @Override + public void onFailure(final Exception e) { + onFailure.accept(unknownLicense.apply(e)); + } + + }); + } + + private static ElasticsearchStatusException indexMetadataNonCompliantRemoteLicense( + final String leaderIndex, final RemoteClusterLicenseChecker.LicenseCheck licenseCheck) { + final String clusterAlias = licenseCheck.remoteClusterLicenseInfo().clusterAlias(); + final String message = String.format( + Locale.ROOT, + "can not fetch remote index [%s:%s] metadata as the remote cluster [%s] is not licensed for [ccr]; %s", + clusterAlias, + leaderIndex, + clusterAlias, + RemoteClusterLicenseChecker.buildErrorMessage( + "ccr", + licenseCheck.remoteClusterLicenseInfo(), + RemoteClusterLicenseChecker::isLicensePlatinumOrTrial)); + return new ElasticsearchStatusException(message, RestStatus.BAD_REQUEST); + } + + private static ElasticsearchStatusException clusterStateNonCompliantRemoteLicense( + final RemoteClusterLicenseChecker.LicenseCheck licenseCheck) { + final String clusterAlias = licenseCheck.remoteClusterLicenseInfo().clusterAlias(); + final String message = String.format( + Locale.ROOT, + "can not fetch remote cluster state as the remote cluster [%s] is not licensed for [ccr]; %s", + clusterAlias, + RemoteClusterLicenseChecker.buildErrorMessage( + "ccr", + licenseCheck.remoteClusterLicenseInfo(), + RemoteClusterLicenseChecker::isLicensePlatinumOrTrial)); + return new ElasticsearchStatusException(message, RestStatus.BAD_REQUEST); + } + + private static ElasticsearchStatusException indexMetadataUnknownRemoteLicense( + final String leaderIndex, final String clusterAlias, final Exception cause) { + final String message = String.format( + Locale.ROOT, + "can not fetch remote index [%s:%s] metadata as the license state of the remote cluster [%s] could not be determined", + clusterAlias, + leaderIndex, + clusterAlias); + return new ElasticsearchStatusException(message, RestStatus.BAD_REQUEST, cause); + } + + private static ElasticsearchStatusException clusterStateUnknownRemoteLicense(final String clusterAlias, final Exception cause) { + final String message = String.format( + Locale.ROOT, + "can not fetch remote cluster state as the license state of the remote cluster [%s] could not be determined", clusterAlias); + return new ElasticsearchStatusException(message, RestStatus.BAD_REQUEST, cause); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java new file mode 100644 index 00000000000..a942990ea5a --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.unit.TimeValue; + +import java.util.Arrays; +import java.util.List; + +/** + * Container class for CCR settings. + */ +public final class CcrSettings { + + // prevent construction + private CcrSettings() { + + } + + /** + * Setting for controlling whether or not CCR is enabled. + */ + static final Setting CCR_ENABLED_SETTING = Setting.boolSetting("xpack.ccr.enabled", true, Property.NodeScope); + + /** + * Index setting for a following index. + */ + public static final Setting CCR_FOLLOWING_INDEX_SETTING = + Setting.boolSetting("index.xpack.ccr.following_index", false, Setting.Property.IndexScope); + + /** + * Setting for controlling the interval in between polling leader clusters to check whether there are indices to follow + */ + public static final Setting CCR_AUTO_FOLLOW_POLL_INTERVAL = + Setting.timeSetting("xpack.ccr.auto_follow.poll_interval", TimeValue.timeValueMillis(2500), Property.NodeScope); + + /** + * The settings defined by CCR. + * + * @return the settings + */ + static List> getSettings() { + return Arrays.asList( + CCR_ENABLED_SETTING, + CCR_FOLLOWING_INDEX_SETTING, + CCR_AUTO_FOLLOW_POLL_INTERVAL); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java new file mode 100644 index 00000000000..e28214341a9 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -0,0 +1,325 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateApplier; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.index.Index; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.ccr.CcrLicenseChecker; +import org.elasticsearch.xpack.ccr.CcrSettings; +import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; +import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Function; + +/** + * A component that runs only on the elected master node and follows leader indices automatically + * if they match with a auto follow pattern that is defined in {@link AutoFollowMetadata}. + */ +public class AutoFollowCoordinator implements ClusterStateApplier { + + private static final Logger LOGGER = LogManager.getLogger(AutoFollowCoordinator.class); + + private final Client client; + private final TimeValue pollInterval; + private final ThreadPool threadPool; + private final ClusterService clusterService; + private final CcrLicenseChecker ccrLicenseChecker; + + private volatile boolean localNodeMaster = false; + + public AutoFollowCoordinator( + Settings settings, + Client client, + ThreadPool threadPool, + ClusterService clusterService, + CcrLicenseChecker ccrLicenseChecker) { + this.client = client; + this.threadPool = threadPool; + this.clusterService = clusterService; + this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker, "ccrLicenseChecker"); + + this.pollInterval = CcrSettings.CCR_AUTO_FOLLOW_POLL_INTERVAL.get(settings); + clusterService.addStateApplier(this); + } + + private void doAutoFollow() { + if (localNodeMaster == false) { + return; + } + ClusterState followerClusterState = clusterService.state(); + AutoFollowMetadata autoFollowMetadata = followerClusterState.getMetaData().custom(AutoFollowMetadata.TYPE); + if (autoFollowMetadata == null) { + threadPool.schedule(pollInterval, ThreadPool.Names.SAME, this::doAutoFollow); + return; + } + + if (autoFollowMetadata.getPatterns().isEmpty()) { + threadPool.schedule(pollInterval, ThreadPool.Names.SAME, this::doAutoFollow); + return; + } + + if (ccrLicenseChecker.isCcrAllowed() == false) { + // TODO: set non-compliant status on auto-follow coordination that can be viewed via a stats API + LOGGER.warn("skipping auto-follower coordination", LicenseUtils.newComplianceException("ccr")); + threadPool.schedule(pollInterval, ThreadPool.Names.SAME, this::doAutoFollow); + return; + } + + Consumer handler = e -> { + if (e != null) { + LOGGER.warn("failure occurred during auto-follower coordination", e); + } + threadPool.schedule(pollInterval, ThreadPool.Names.SAME, this::doAutoFollow); + }; + AutoFollower operation = new AutoFollower(handler, followerClusterState) { + + @Override + void getLeaderClusterState(final String leaderClusterAlias, final BiConsumer handler) { + final ClusterStateRequest request = new ClusterStateRequest(); + request.clear(); + request.metaData(true); + + if ("_local_".equals(leaderClusterAlias)) { + client.admin().cluster().state( + request, ActionListener.wrap(r -> handler.accept(r.getState(), null), e -> handler.accept(null, e))); + } else { + final Client leaderClient = client.getRemoteClusterClient(leaderClusterAlias); + // TODO: set non-compliant status on auto-follow coordination that can be viewed via a stats API + ccrLicenseChecker.checkRemoteClusterLicenseAndFetchClusterState( + leaderClient, + leaderClusterAlias, + request, + e -> handler.accept(null, e), + leaderClusterState -> handler.accept(leaderClusterState, null)); + } + + } + + @Override + void createAndFollow(FollowIndexAction.Request followRequest, + Runnable successHandler, + Consumer failureHandler) { + client.execute(CreateAndFollowIndexAction.INSTANCE, new CreateAndFollowIndexAction.Request(followRequest), + ActionListener.wrap(r -> successHandler.run(), failureHandler)); + } + + @Override + void updateAutoFollowMetadata(Function updateFunction, Consumer handler) { + clusterService.submitStateUpdateTask("update_auto_follow_metadata", new ClusterStateUpdateTask() { + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + return updateFunction.apply(currentState); + } + + @Override + public void onFailure(String source, Exception e) { + handler.accept(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + handler.accept(null); + } + }); + } + + }; + operation.autoFollowIndices(); + } + + @Override + public void applyClusterState(ClusterChangedEvent event) { + final boolean beforeLocalMasterNode = localNodeMaster; + localNodeMaster = event.localNodeMaster(); + if (beforeLocalMasterNode == false && localNodeMaster) { + threadPool.schedule(pollInterval, ThreadPool.Names.SAME, this::doAutoFollow); + } + } + + abstract static class AutoFollower { + + private final Consumer handler; + private final ClusterState followerClusterState; + private final AutoFollowMetadata autoFollowMetadata; + + private final CountDown autoFollowPatternsCountDown; + private final AtomicReference autoFollowPatternsErrorHolder = new AtomicReference<>(); + + AutoFollower(final Consumer handler, final ClusterState followerClusterState) { + this.handler = handler; + this.followerClusterState = followerClusterState; + this.autoFollowMetadata = followerClusterState.getMetaData().custom(AutoFollowMetadata.TYPE); + this.autoFollowPatternsCountDown = new CountDown(autoFollowMetadata.getPatterns().size()); + } + + void autoFollowIndices() { + for (Map.Entry entry : autoFollowMetadata.getPatterns().entrySet()) { + String clusterAlias = entry.getKey(); + AutoFollowPattern autoFollowPattern = entry.getValue(); + List followedIndices = autoFollowMetadata.getFollowedLeaderIndexUUIDs().get(clusterAlias); + + getLeaderClusterState(clusterAlias, (leaderClusterState, e) -> { + if (leaderClusterState != null) { + assert e == null; + handleClusterAlias(clusterAlias, autoFollowPattern, followedIndices, leaderClusterState); + } else { + finalise(e); + } + }); + } + } + + private void handleClusterAlias(String clusterAlias, AutoFollowPattern autoFollowPattern, + List followedIndexUUIDs, ClusterState leaderClusterState) { + final List leaderIndicesToFollow = + getLeaderIndicesToFollow(autoFollowPattern, leaderClusterState, followerClusterState, followedIndexUUIDs); + if (leaderIndicesToFollow.isEmpty()) { + finalise(null); + } else { + final CountDown leaderIndicesCountDown = new CountDown(leaderIndicesToFollow.size()); + final AtomicReference leaderIndicesErrorHolder = new AtomicReference<>(); + for (Index indexToFollow : leaderIndicesToFollow) { + final String leaderIndexName = indexToFollow.getName(); + final String followIndexName = getFollowerIndexName(autoFollowPattern, leaderIndexName); + + String leaderIndexNameWithClusterAliasPrefix = clusterAlias.equals("_local_") ? leaderIndexName : + clusterAlias + ":" + leaderIndexName; + FollowIndexAction.Request followRequest = + new FollowIndexAction.Request(leaderIndexNameWithClusterAliasPrefix, followIndexName, + autoFollowPattern.getMaxBatchOperationCount(), autoFollowPattern.getMaxConcurrentReadBatches(), + autoFollowPattern.getMaxOperationSizeInBytes(), autoFollowPattern.getMaxConcurrentWriteBatches(), + autoFollowPattern.getMaxWriteBufferSize(), autoFollowPattern.getRetryTimeout(), + autoFollowPattern.getIdleShardRetryDelay()); + + // Execute if the create and follow api call succeeds: + Runnable successHandler = () -> { + LOGGER.info("Auto followed leader index [{}] as follow index [{}]", leaderIndexName, followIndexName); + + // This function updates the auto follow metadata in the cluster to record that the leader index has been followed: + // (so that we do not try to follow it in subsequent auto follow runs) + Function function = recordLeaderIndexAsFollowFunction(clusterAlias, indexToFollow); + // The coordinator always runs on the elected master node, so we can update cluster state here: + updateAutoFollowMetadata(function, updateError -> { + if (updateError != null) { + LOGGER.error("Failed to mark leader index [" + leaderIndexName + "] as auto followed", updateError); + if (leaderIndicesErrorHolder.compareAndSet(null, updateError) == false) { + leaderIndicesErrorHolder.get().addSuppressed(updateError); + } + } else { + LOGGER.debug("Successfully marked leader index [{}] as auto followed", leaderIndexName); + } + if (leaderIndicesCountDown.countDown()) { + finalise(leaderIndicesErrorHolder.get()); + } + }); + }; + // Execute if the create and follow apu call fails: + Consumer failureHandler = followError -> { + assert followError != null; + LOGGER.warn("Failed to auto follow leader index [" + leaderIndexName + "]", followError); + if (leaderIndicesCountDown.countDown()) { + finalise(followError); + } + }; + createAndFollow(followRequest, successHandler, failureHandler); + } + } + } + + private void finalise(Exception failure) { + if (autoFollowPatternsErrorHolder.compareAndSet(null, failure) == false) { + autoFollowPatternsErrorHolder.get().addSuppressed(failure); + } + + if (autoFollowPatternsCountDown.countDown()) { + handler.accept(autoFollowPatternsErrorHolder.get()); + } + } + + static List getLeaderIndicesToFollow(AutoFollowPattern autoFollowPattern, + ClusterState leaderClusterState, + ClusterState followerClusterState, + List followedIndexUUIDs) { + List leaderIndicesToFollow = new ArrayList<>(); + for (IndexMetaData leaderIndexMetaData : leaderClusterState.getMetaData()) { + if (autoFollowPattern.match(leaderIndexMetaData.getIndex().getName())) { + if (followedIndexUUIDs.contains(leaderIndexMetaData.getIndex().getUUID()) == false) { + // TODO: iterate over the indices in the followerClusterState and check whether a IndexMetaData + // has a leader index uuid custom metadata entry that matches with uuid of leaderIndexMetaData variable + // If so then handle it differently: not follow it, but just add an entry to + // AutoFollowMetadata#followedLeaderIndexUUIDs + leaderIndicesToFollow.add(leaderIndexMetaData.getIndex()); + } + } + } + return leaderIndicesToFollow; + } + + static String getFollowerIndexName(AutoFollowPattern autoFollowPattern, String leaderIndexName) { + if (autoFollowPattern.getFollowIndexPattern() != null) { + return autoFollowPattern.getFollowIndexPattern().replace("{{leader_index}}", leaderIndexName); + } else { + return leaderIndexName; + } + } + + static Function recordLeaderIndexAsFollowFunction(String clusterAlias, Index indexToFollow) { + return currentState -> { + AutoFollowMetadata currentAutoFollowMetadata = currentState.metaData().custom(AutoFollowMetadata.TYPE); + + Map> newFollowedIndexUUIDS = + new HashMap<>(currentAutoFollowMetadata.getFollowedLeaderIndexUUIDs()); + newFollowedIndexUUIDS.get(clusterAlias).add(indexToFollow.getUUID()); + + ClusterState.Builder newState = ClusterState.builder(currentState); + AutoFollowMetadata newAutoFollowMetadata = + new AutoFollowMetadata(currentAutoFollowMetadata.getPatterns(), newFollowedIndexUUIDS); + newState.metaData(MetaData.builder(currentState.getMetaData()) + .putCustom(AutoFollowMetadata.TYPE, newAutoFollowMetadata) + .build()); + return newState.build(); + }; + } + + /** + * Fetch the cluster state from the leader with the specified cluster alias + * + * @param leaderClusterAlias the cluster alias of the leader + * @param handler the callback to invoke + */ + abstract void getLeaderClusterState(String leaderClusterAlias, BiConsumer handler); + + abstract void createAndFollow(FollowIndexAction.Request followRequest, Runnable successHandler, Consumer failureHandler); + + abstract void updateAutoFollowMetadata(Function updateFunction, Consumer handler); + + } +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrStatsAction.java new file mode 100644 index 00000000000..b5d6697fc73 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrStatsAction.java @@ -0,0 +1,178 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.tasks.BaseTasksRequest; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.tasks.Task; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +public class CcrStatsAction extends Action { + + public static final String NAME = "cluster:monitor/ccr/stats"; + + public static final CcrStatsAction INSTANCE = new CcrStatsAction(); + + private CcrStatsAction() { + super(NAME); + } + + @Override + public TasksResponse newResponse() { + return new TasksResponse(); + } + + public static class TasksResponse extends BaseTasksResponse implements ToXContentObject { + + private final List taskResponses; + + public TasksResponse() { + this(Collections.emptyList(), Collections.emptyList(), Collections.emptyList()); + } + + TasksResponse( + final List taskFailures, + final List nodeFailures, + final List taskResponses) { + super(taskFailures, nodeFailures); + this.taskResponses = taskResponses; + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + // sort by index name, then shard ID + final Map> taskResponsesByIndex = new TreeMap<>(); + for (final TaskResponse taskResponse : taskResponses) { + taskResponsesByIndex.computeIfAbsent( + taskResponse.followerShardId().getIndexName(), + k -> new TreeMap<>()).put(taskResponse.followerShardId().getId(), taskResponse); + } + builder.startObject(); + { + for (final Map.Entry> index : taskResponsesByIndex.entrySet()) { + builder.startArray(index.getKey()); + { + for (final Map.Entry shard : index.getValue().entrySet()) { + shard.getValue().status().toXContent(builder, params); + } + } + builder.endArray(); + } + } + builder.endObject(); + return builder; + } + } + + public static class TasksRequest extends BaseTasksRequest implements IndicesRequest { + + private String[] indices; + + @Override + public String[] indices() { + return indices; + } + + public void setIndices(final String[] indices) { + this.indices = indices; + } + + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed(); + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public void setIndicesOptions(final IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + } + + @Override + public boolean match(final Task task) { + /* + * This is a limitation of the current tasks API. When the transport action is executed, the tasks API invokes this match method + * to find the tasks on which to execute the task-level operation (see TransportTasksAction#nodeOperation and + * TransportTasksAction#processTasks). If we do the matching here, then we can not match index patterns. Therefore, we override + * TransportTasksAction#processTasks (see TransportCcrStatsAction#processTasks) and do the matching there. We should never see + * this method invoked and since we can not support matching a task on the basis of the request here, we throw that this + * operation is unsupported. + */ + throw new UnsupportedOperationException(); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(final StreamInput in) throws IOException { + super.readFrom(in); + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + } + + } + + public static class TaskResponse implements Writeable { + + private final ShardId followerShardId; + + ShardId followerShardId() { + return followerShardId; + } + + private final ShardFollowNodeTask.Status status; + + ShardFollowNodeTask.Status status() { + return status; + } + + TaskResponse(final ShardId followerShardId, final ShardFollowNodeTask.Status status) { + this.followerShardId = followerShardId; + this.status = status; + } + + TaskResponse(final StreamInput in) throws IOException { + this.followerShardId = ShardId.readShardId(in); + this.status = new ShardFollowNodeTask.Status(in); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + followerShardId.writeTo(out); + status.writeTo(out); + } + + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexAction.java new file mode 100644 index 00000000000..223f6ed8e6d --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexAction.java @@ -0,0 +1,367 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.ActiveShardsObserver; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.ccr.CcrLicenseChecker; +import org.elasticsearch.xpack.ccr.CcrSettings; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class CreateAndFollowIndexAction extends Action { + + public static final CreateAndFollowIndexAction INSTANCE = new CreateAndFollowIndexAction(); + public static final String NAME = "indices:admin/xpack/ccr/create_and_follow_index"; + + private CreateAndFollowIndexAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends AcknowledgedRequest implements IndicesRequest { + + private FollowIndexAction.Request followRequest; + + public Request(FollowIndexAction.Request followRequest) { + this.followRequest = Objects.requireNonNull(followRequest); + } + + Request() { + } + + public FollowIndexAction.Request getFollowRequest() { + return followRequest; + } + + @Override + public ActionRequestValidationException validate() { + return followRequest.validate(); + } + + @Override + public String[] indices() { + return new String[]{followRequest.getFollowerIndex()}; + } + + @Override + public IndicesOptions indicesOptions() { + return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + followRequest = new FollowIndexAction.Request(); + followRequest.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + followRequest.writeTo(out); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(followRequest, request.followRequest); + } + + @Override + public int hashCode() { + return Objects.hash(followRequest); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private boolean followIndexCreated; + private boolean followIndexShardsAcked; + private boolean indexFollowingStarted; + + Response() { + } + + Response(boolean followIndexCreated, boolean followIndexShardsAcked, boolean indexFollowingStarted) { + this.followIndexCreated = followIndexCreated; + this.followIndexShardsAcked = followIndexShardsAcked; + this.indexFollowingStarted = indexFollowingStarted; + } + + public boolean isFollowIndexCreated() { + return followIndexCreated; + } + + public boolean isFollowIndexShardsAcked() { + return followIndexShardsAcked; + } + + public boolean isIndexFollowingStarted() { + return indexFollowingStarted; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + followIndexCreated = in.readBoolean(); + followIndexShardsAcked = in.readBoolean(); + indexFollowingStarted = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(followIndexCreated); + out.writeBoolean(followIndexShardsAcked); + out.writeBoolean(indexFollowingStarted); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field("follow_index_created", followIndexCreated); + builder.field("follow_index_shards_acked", followIndexShardsAcked); + builder.field("index_following_started", indexFollowingStarted); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return followIndexCreated == response.followIndexCreated && + followIndexShardsAcked == response.followIndexShardsAcked && + indexFollowingStarted == response.indexFollowingStarted; + } + + @Override + public int hashCode() { + return Objects.hash(followIndexCreated, followIndexShardsAcked, indexFollowingStarted); + } + } + + public static class TransportAction extends TransportMasterNodeAction { + + private final Client client; + private final AllocationService allocationService; + private final RemoteClusterService remoteClusterService; + private final ActiveShardsObserver activeShardsObserver; + private final CcrLicenseChecker ccrLicenseChecker; + + @Inject + public TransportAction( + final Settings settings, + final ThreadPool threadPool, + final TransportService transportService, + final ClusterService clusterService, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final Client client, + final AllocationService allocationService, + final CcrLicenseChecker ccrLicenseChecker) { + super(settings, NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, Request::new); + this.client = client; + this.allocationService = allocationService; + this.remoteClusterService = transportService.getRemoteClusterService(); + this.activeShardsObserver = new ActiveShardsObserver(settings, clusterService, threadPool); + this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected Response newResponse() { + return new Response(); + } + + @Override + protected void masterOperation( + final Request request, final ClusterState state, final ActionListener listener) throws Exception { + if (ccrLicenseChecker.isCcrAllowed() == false) { + listener.onFailure(LicenseUtils.newComplianceException("ccr")); + return; + } + final String[] indices = new String[]{request.getFollowRequest().getLeaderIndex()}; + final Map> remoteClusterIndices = remoteClusterService.groupClusterIndices(indices, s -> false); + if (remoteClusterIndices.containsKey(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { + createFollowerIndexAndFollowLocalIndex(request, state, listener); + } else { + assert remoteClusterIndices.size() == 1; + final Map.Entry> entry = remoteClusterIndices.entrySet().iterator().next(); + assert entry.getValue().size() == 1; + final String clusterAlias = entry.getKey(); + final String leaderIndex = entry.getValue().get(0); + createFollowerIndexAndFollowRemoteIndex(request, clusterAlias, leaderIndex, listener); + } + } + + private void createFollowerIndexAndFollowLocalIndex( + final Request request, final ClusterState state, final ActionListener listener) { + // following an index in local cluster, so use local cluster state to fetch leader index metadata + final IndexMetaData leaderIndexMetadata = state.getMetaData().index(request.getFollowRequest().getLeaderIndex()); + createFollowerIndex(leaderIndexMetadata, request, listener); + } + + private void createFollowerIndexAndFollowRemoteIndex( + final Request request, + final String clusterAlias, + final String leaderIndex, + final ActionListener listener) { + ccrLicenseChecker.checkRemoteClusterLicenseAndFetchLeaderIndexMetadata( + client, + clusterAlias, + leaderIndex, + listener::onFailure, + leaderIndexMetaData -> createFollowerIndex(leaderIndexMetaData, request, listener)); + } + + private void createFollowerIndex( + final IndexMetaData leaderIndexMetaData, final Request request, final ActionListener listener) { + if (leaderIndexMetaData == null) { + listener.onFailure(new IllegalArgumentException("leader index [" + request.getFollowRequest().getLeaderIndex() + + "] does not exist")); + return; + } + + ActionListener handler = ActionListener.wrap( + result -> { + if (result) { + initiateFollowing(request, listener); + } else { + listener.onResponse(new Response(true, false, false)); + } + }, + listener::onFailure); + // Can't use create index api here, because then index templates can alter the mappings / settings. + // And index templates could introduce settings / mappings that are incompatible with the leader index. + clusterService.submitStateUpdateTask("follow_index_action", new AckedClusterStateUpdateTask(request, handler) { + + @Override + protected Boolean newResponse(boolean acknowledged) { + return acknowledged; + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + String followIndex = request.getFollowRequest().getFollowerIndex(); + IndexMetaData currentIndex = currentState.metaData().index(followIndex); + if (currentIndex != null) { + throw new ResourceAlreadyExistsException(currentIndex.getIndex()); + } + + MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); + IndexMetaData.Builder imdBuilder = IndexMetaData.builder(followIndex); + + // Copy all settings, but overwrite a few settings. + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(leaderIndexMetaData.getSettings()); + // Overwriting UUID here, because otherwise we can't follow indices in the same cluster + settingsBuilder.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()); + settingsBuilder.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followIndex); + settingsBuilder.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); + imdBuilder.settings(settingsBuilder); + + // Copy mappings from leader IMD to follow IMD + for (ObjectObjectCursor cursor : leaderIndexMetaData.getMappings()) { + imdBuilder.putMapping(cursor.value); + } + imdBuilder.setRoutingNumShards(leaderIndexMetaData.getRoutingNumShards()); + IndexMetaData followIMD = imdBuilder.build(); + mdBuilder.put(followIMD, false); + + ClusterState.Builder builder = ClusterState.builder(currentState); + builder.metaData(mdBuilder.build()); + ClusterState updatedState = builder.build(); + + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable()) + .addAsNew(updatedState.metaData().index(request.getFollowRequest().getFollowerIndex())); + updatedState = allocationService.reroute( + ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(), + "follow index [" + request.getFollowRequest().getFollowerIndex() + "] created"); + + logger.info("[{}] creating index, cause [ccr_create_and_follow], shards [{}]/[{}]", + followIndex, followIMD.getNumberOfShards(), followIMD.getNumberOfReplicas()); + + return updatedState; + } + }); + } + + private void initiateFollowing(Request request, ActionListener listener) { + activeShardsObserver.waitForActiveShards(new String[]{request.followRequest.getFollowerIndex()}, + ActiveShardCount.DEFAULT, request.timeout(), result -> { + if (result) { + client.execute(FollowIndexAction.INSTANCE, request.getFollowRequest(), ActionListener.wrap( + r -> listener.onResponse(new Response(true, true, r.isAcknowledged())), + listener::onFailure + )); + } else { + listener.onResponse(new Response(true, false, false)); + } + }, listener::onFailure); + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, request.getFollowRequest().getFollowerIndex()); + } + + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/DeleteAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/DeleteAutoFollowPatternAction.java new file mode 100644 index 00000000000..82e142202d2 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/DeleteAutoFollowPatternAction.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class DeleteAutoFollowPatternAction extends Action { + + public static final String NAME = "cluster:admin/xpack/ccr/auto_follow_pattern/delete"; + public static final DeleteAutoFollowPatternAction INSTANCE = new DeleteAutoFollowPatternAction(); + + private DeleteAutoFollowPatternAction() { + super(NAME); + } + + @Override + public AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + public static class Request extends AcknowledgedRequest { + + private String leaderClusterAlias; + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (leaderClusterAlias == null) { + validationException = addValidationError("leaderClusterAlias is missing", validationException); + } + return validationException; + } + + public String getLeaderClusterAlias() { + return leaderClusterAlias; + } + + public void setLeaderClusterAlias(String leaderClusterAlias) { + this.leaderClusterAlias = leaderClusterAlias; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + leaderClusterAlias = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(leaderClusterAlias); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(leaderClusterAlias, request.leaderClusterAlias); + } + + @Override + public int hashCode() { + return Objects.hash(leaderClusterAlias); + } + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/FollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/FollowIndexAction.java new file mode 100644 index 00000000000..49822455110 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/FollowIndexAction.java @@ -0,0 +1,571 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexingSlowLog; +import org.elasticsearch.index.SearchSlowLog; +import org.elasticsearch.index.cache.bitset.BitsetFilterCache; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesRequestCache; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.ccr.CcrLicenseChecker; +import org.elasticsearch.xpack.ccr.CcrSettings; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReferenceArray; +import java.util.stream.Collectors; + +public class FollowIndexAction extends Action { + + public static final FollowIndexAction INSTANCE = new FollowIndexAction(); + public static final String NAME = "cluster:admin/xpack/ccr/follow_index"; + + private FollowIndexAction() { + super(NAME); + } + + @Override + public AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + private static final ParseField LEADER_INDEX_FIELD = new ParseField("leader_index"); + private static final ParseField FOLLOWER_INDEX_FIELD = new ParseField("follower_index"); + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, + (args, followerIndex) -> { + if (args[1] != null) { + followerIndex = (String) args[1]; + } + return new Request((String) args[0], followerIndex, (Integer) args[2], (Integer) args[3], (Long) args[4], + (Integer) args[5], (Integer) args[6], (TimeValue) args[7], (TimeValue) args[8]); + }); + + static { + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), LEADER_INDEX_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FOLLOWER_INDEX_FIELD); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), ShardFollowTask.MAX_BATCH_OPERATION_COUNT); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), ShardFollowTask.MAX_CONCURRENT_READ_BATCHES); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), ShardFollowTask.MAX_BATCH_SIZE_IN_BYTES); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), ShardFollowTask.MAX_CONCURRENT_WRITE_BATCHES); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), ShardFollowTask.MAX_WRITE_BUFFER_SIZE); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), ShardFollowTask.RETRY_TIMEOUT.getPreferredName()), + ShardFollowTask.RETRY_TIMEOUT, ObjectParser.ValueType.STRING); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), ShardFollowTask.IDLE_SHARD_RETRY_DELAY.getPreferredName()), + ShardFollowTask.IDLE_SHARD_RETRY_DELAY, ObjectParser.ValueType.STRING); + } + + public static Request fromXContent(XContentParser parser, String followerIndex) throws IOException { + Request request = PARSER.parse(parser, followerIndex); + if (followerIndex != null) { + if (request.followerIndex == null) { + request.followerIndex = followerIndex; + } else { + if (request.followerIndex.equals(followerIndex) == false) { + throw new IllegalArgumentException("provided follower_index is not equal"); + } + } + } + return request; + } + + private String leaderIndex; + private String followerIndex; + private int maxBatchOperationCount; + private int maxConcurrentReadBatches; + private long maxOperationSizeInBytes; + private int maxConcurrentWriteBatches; + private int maxWriteBufferSize; + private TimeValue retryTimeout; + private TimeValue idleShardRetryDelay; + + public Request( + String leaderIndex, + String followerIndex, + Integer maxBatchOperationCount, + Integer maxConcurrentReadBatches, + Long maxOperationSizeInBytes, + Integer maxConcurrentWriteBatches, + Integer maxWriteBufferSize, + TimeValue retryTimeout, + TimeValue idleShardRetryDelay) { + + if (leaderIndex == null) { + throw new IllegalArgumentException("leader_index is missing"); + } + if (followerIndex == null) { + throw new IllegalArgumentException("follower_index is missing"); + } + if (maxBatchOperationCount == null) { + maxBatchOperationCount = ShardFollowNodeTask.DEFAULT_MAX_BATCH_OPERATION_COUNT; + } + if (maxConcurrentReadBatches == null) { + maxConcurrentReadBatches = ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_READ_BATCHES; + } + if (maxOperationSizeInBytes == null) { + maxOperationSizeInBytes = ShardFollowNodeTask.DEFAULT_MAX_BATCH_SIZE_IN_BYTES; + } + if (maxConcurrentWriteBatches == null) { + maxConcurrentWriteBatches = ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_WRITE_BATCHES; + } + if (maxWriteBufferSize == null) { + maxWriteBufferSize = ShardFollowNodeTask.DEFAULT_MAX_WRITE_BUFFER_SIZE; + } + if (retryTimeout == null) { + retryTimeout = ShardFollowNodeTask.DEFAULT_RETRY_TIMEOUT; + } + if (idleShardRetryDelay == null) { + idleShardRetryDelay = ShardFollowNodeTask.DEFAULT_IDLE_SHARD_RETRY_DELAY; + } + + if (maxBatchOperationCount < 1) { + throw new IllegalArgumentException("maxBatchOperationCount must be larger than 0"); + } + if (maxConcurrentReadBatches < 1) { + throw new IllegalArgumentException("concurrent_processors must be larger than 0"); + } + if (maxOperationSizeInBytes <= 0) { + throw new IllegalArgumentException("processor_max_translog_bytes must be larger than 0"); + } + if (maxConcurrentWriteBatches < 1) { + throw new IllegalArgumentException("maxConcurrentWriteBatches must be larger than 0"); + } + if (maxWriteBufferSize < 1) { + throw new IllegalArgumentException("maxWriteBufferSize must be larger than 0"); + } + + this.leaderIndex = leaderIndex; + this.followerIndex = followerIndex; + this.maxBatchOperationCount = maxBatchOperationCount; + this.maxConcurrentReadBatches = maxConcurrentReadBatches; + this.maxOperationSizeInBytes = maxOperationSizeInBytes; + this.maxConcurrentWriteBatches = maxConcurrentWriteBatches; + this.maxWriteBufferSize = maxWriteBufferSize; + this.retryTimeout = retryTimeout; + this.idleShardRetryDelay = idleShardRetryDelay; + } + + Request() { + } + + public String getLeaderIndex() { + return leaderIndex; + } + + public String getFollowerIndex() { + return followerIndex; + } + + public int getMaxBatchOperationCount() { + return maxBatchOperationCount; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + leaderIndex = in.readString(); + followerIndex = in.readString(); + maxBatchOperationCount = in.readVInt(); + maxConcurrentReadBatches = in.readVInt(); + maxOperationSizeInBytes = in.readVLong(); + maxConcurrentWriteBatches = in.readVInt(); + maxWriteBufferSize = in.readVInt(); + retryTimeout = in.readOptionalTimeValue(); + idleShardRetryDelay = in.readOptionalTimeValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(leaderIndex); + out.writeString(followerIndex); + out.writeVInt(maxBatchOperationCount); + out.writeVInt(maxConcurrentReadBatches); + out.writeVLong(maxOperationSizeInBytes); + out.writeVInt(maxConcurrentWriteBatches); + out.writeVInt(maxWriteBufferSize); + out.writeOptionalTimeValue(retryTimeout); + out.writeOptionalTimeValue(idleShardRetryDelay); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(LEADER_INDEX_FIELD.getPreferredName(), leaderIndex); + builder.field(FOLLOWER_INDEX_FIELD.getPreferredName(), followerIndex); + builder.field(ShardFollowTask.MAX_BATCH_OPERATION_COUNT.getPreferredName(), maxBatchOperationCount); + builder.field(ShardFollowTask.MAX_BATCH_SIZE_IN_BYTES.getPreferredName(), maxOperationSizeInBytes); + builder.field(ShardFollowTask.MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); + builder.field(ShardFollowTask.MAX_CONCURRENT_READ_BATCHES.getPreferredName(), maxConcurrentReadBatches); + builder.field(ShardFollowTask.MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); + builder.field(ShardFollowTask.RETRY_TIMEOUT.getPreferredName(), retryTimeout.getStringRep()); + builder.field(ShardFollowTask.IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay.getStringRep()); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return maxBatchOperationCount == request.maxBatchOperationCount && + maxConcurrentReadBatches == request.maxConcurrentReadBatches && + maxOperationSizeInBytes == request.maxOperationSizeInBytes && + maxConcurrentWriteBatches == request.maxConcurrentWriteBatches && + maxWriteBufferSize == request.maxWriteBufferSize && + Objects.equals(retryTimeout, request.retryTimeout) && + Objects.equals(idleShardRetryDelay, request.idleShardRetryDelay) && + Objects.equals(leaderIndex, request.leaderIndex) && + Objects.equals(followerIndex, request.followerIndex); + } + + @Override + public int hashCode() { + return Objects.hash( + leaderIndex, + followerIndex, + maxBatchOperationCount, + maxConcurrentReadBatches, + maxOperationSizeInBytes, + maxConcurrentWriteBatches, + maxWriteBufferSize, + retryTimeout, + idleShardRetryDelay + ); + } + } + + public static class TransportAction extends HandledTransportAction { + + private final Client client; + private final ThreadPool threadPool; + private final ClusterService clusterService; + private final RemoteClusterService remoteClusterService; + private final PersistentTasksService persistentTasksService; + private final IndicesService indicesService; + private final CcrLicenseChecker ccrLicenseChecker; + + @Inject + public TransportAction( + final Settings settings, + final ThreadPool threadPool, + final TransportService transportService, + final ActionFilters actionFilters, + final Client client, + final ClusterService clusterService, + final PersistentTasksService persistentTasksService, + final IndicesService indicesService, + final CcrLicenseChecker ccrLicenseChecker) { + super(settings, NAME, transportService, actionFilters, Request::new); + this.client = client; + this.threadPool = threadPool; + this.clusterService = clusterService; + this.remoteClusterService = transportService.getRemoteClusterService(); + this.persistentTasksService = persistentTasksService; + this.indicesService = indicesService; + this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); + } + + @Override + protected void doExecute(final Task task, + final Request request, + final ActionListener listener) { + if (ccrLicenseChecker.isCcrAllowed() == false) { + listener.onFailure(LicenseUtils.newComplianceException("ccr")); + return; + } + final String[] indices = new String[]{request.leaderIndex}; + final Map> remoteClusterIndices = remoteClusterService.groupClusterIndices(indices, s -> false); + if (remoteClusterIndices.containsKey(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { + followLocalIndex(request, listener); + } else { + assert remoteClusterIndices.size() == 1; + final Map.Entry> entry = remoteClusterIndices.entrySet().iterator().next(); + assert entry.getValue().size() == 1; + final String clusterAlias = entry.getKey(); + final String leaderIndex = entry.getValue().get(0); + followRemoteIndex(request, clusterAlias, leaderIndex, listener); + } + } + + private void followLocalIndex(final Request request, + final ActionListener listener) { + final ClusterState state = clusterService.state(); + final IndexMetaData followerIndexMetadata = state.getMetaData().index(request.getFollowerIndex()); + // following an index in local cluster, so use local cluster state to fetch leader index metadata + final IndexMetaData leaderIndexMetadata = state.getMetaData().index(request.getLeaderIndex()); + try { + start(request, null, leaderIndexMetadata, followerIndexMetadata, listener); + } catch (final IOException e) { + listener.onFailure(e); + } + } + + private void followRemoteIndex( + final Request request, + final String clusterAlias, + final String leaderIndex, + final ActionListener listener) { + final ClusterState state = clusterService.state(); + final IndexMetaData followerIndexMetadata = state.getMetaData().index(request.getFollowerIndex()); + ccrLicenseChecker.checkRemoteClusterLicenseAndFetchLeaderIndexMetadata( + client, + clusterAlias, + leaderIndex, + listener::onFailure, + leaderIndexMetadata -> { + try { + start(request, clusterAlias, leaderIndexMetadata, followerIndexMetadata, listener); + } catch (final IOException e) { + listener.onFailure(e); + } + }); + } + + /** + * Performs validation on the provided leader and follow {@link IndexMetaData} instances and then + * creates a persistent task for each leader primary shard. This persistent tasks track changes in the leader + * shard and replicate these changes to a follower shard. + * + * Currently the following validation is performed: + *
      + *
    • The leader index and follow index need to have the same number of primary shards
    • + *
    + */ + void start( + Request request, + String clusterNameAlias, + IndexMetaData leaderIndexMetadata, + IndexMetaData followIndexMetadata, + ActionListener handler) throws IOException { + + MapperService mapperService = followIndexMetadata != null ? indicesService.createIndexMapperService(followIndexMetadata) : null; + validate(request, leaderIndexMetadata, followIndexMetadata, mapperService); + final int numShards = followIndexMetadata.getNumberOfShards(); + final AtomicInteger counter = new AtomicInteger(numShards); + final AtomicReferenceArray responses = new AtomicReferenceArray<>(followIndexMetadata.getNumberOfShards()); + Map filteredHeaders = threadPool.getThreadContext().getHeaders().entrySet().stream() + .filter(e -> ShardFollowTask.HEADER_FILTERS.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));for (int i = 0; i < numShards; i++) { + final int shardId = i; + String taskId = followIndexMetadata.getIndexUUID() + "-" + shardId; + + ShardFollowTask shardFollowTask = new ShardFollowTask(clusterNameAlias, + new ShardId(followIndexMetadata.getIndex(), shardId), + new ShardId(leaderIndexMetadata.getIndex(), shardId), + request.maxBatchOperationCount, request.maxConcurrentReadBatches, request.maxOperationSizeInBytes, + request.maxConcurrentWriteBatches, request.maxWriteBufferSize, request.retryTimeout, + request.idleShardRetryDelay, filteredHeaders); + persistentTasksService.sendStartRequest(taskId, ShardFollowTask.NAME, shardFollowTask, + new ActionListener>() { + @Override + public void onResponse(PersistentTasksCustomMetaData.PersistentTask task) { + responses.set(shardId, task); + finalizeResponse(); + } + + @Override + public void onFailure(Exception e) { + responses.set(shardId, e); + finalizeResponse(); + } + + void finalizeResponse() { + Exception error = null; + if (counter.decrementAndGet() == 0) { + for (int j = 0; j < responses.length(); j++) { + Object response = responses.get(j); + if (response instanceof Exception) { + if (error == null) { + error = (Exception) response; + } else { + error.addSuppressed((Throwable) response); + } + } + } + + if (error == null) { + // include task ids? + handler.onResponse(new AcknowledgedResponse(true)); + } else { + // TODO: cancel all started tasks + handler.onFailure(error); + } + } + } + } + ); + } + } + } + + private static final Set> WHITELISTED_SETTINGS; + + static { + Set> whiteListedSettings = new HashSet<>(); + whiteListedSettings.add(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING); + whiteListedSettings.add(IndexMetaData.INDEX_AUTO_EXPAND_REPLICAS_SETTING); + + whiteListedSettings.add(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING); + whiteListedSettings.add(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING); + whiteListedSettings.add(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING); + whiteListedSettings.add(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING); + whiteListedSettings.add(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING); + whiteListedSettings.add(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING); + + whiteListedSettings.add(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING); + whiteListedSettings.add(IndexSettings.MAX_RESULT_WINDOW_SETTING); + whiteListedSettings.add(IndexSettings.INDEX_WARMER_ENABLED_SETTING); + whiteListedSettings.add(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING); + whiteListedSettings.add(IndexSettings.MAX_RESCORE_WINDOW_SETTING); + whiteListedSettings.add(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING); + whiteListedSettings.add(IndexSettings.DEFAULT_FIELD_SETTING); + whiteListedSettings.add(IndexSettings.QUERY_STRING_LENIENT_SETTING); + whiteListedSettings.add(IndexSettings.QUERY_STRING_ANALYZE_WILDCARD); + whiteListedSettings.add(IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD); + whiteListedSettings.add(IndexSettings.ALLOW_UNMAPPED); + whiteListedSettings.add(IndexSettings.INDEX_SEARCH_IDLE_AFTER); + whiteListedSettings.add(BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING); + + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING); + + whiteListedSettings.add(IndexSettings.INDEX_SOFT_DELETES_SETTING); + whiteListedSettings.add(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING); + + WHITELISTED_SETTINGS = Collections.unmodifiableSet(whiteListedSettings); + } + + static void validate(Request request, + IndexMetaData leaderIndex, + IndexMetaData followIndex, MapperService followerMapperService) { + if (leaderIndex == null) { + throw new IllegalArgumentException("leader index [" + request.leaderIndex + "] does not exist"); + } + if (followIndex == null) { + throw new IllegalArgumentException("follow index [" + request.followerIndex + "] does not exist"); + } + if (leaderIndex.getSettings().getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) == false) { + throw new IllegalArgumentException("leader index [" + request.leaderIndex + "] does not have soft deletes enabled"); + } + if (leaderIndex.getNumberOfShards() != followIndex.getNumberOfShards()) { + throw new IllegalArgumentException("leader index primary shards [" + leaderIndex.getNumberOfShards() + + "] does not match with the number of shards of the follow index [" + followIndex.getNumberOfShards() + "]"); + } + if (leaderIndex.getRoutingNumShards() != followIndex.getRoutingNumShards()) { + throw new IllegalArgumentException("leader index number_of_routing_shards [" + leaderIndex.getRoutingNumShards() + + "] does not match with the number_of_routing_shards of the follow index [" + followIndex.getRoutingNumShards() + "]"); + } + if (leaderIndex.getState() != IndexMetaData.State.OPEN || followIndex.getState() != IndexMetaData.State.OPEN) { + throw new IllegalArgumentException("leader and follow index must be open"); + } + if (CcrSettings.CCR_FOLLOWING_INDEX_SETTING.get(followIndex.getSettings()) == false) { + throw new IllegalArgumentException("the following index [" + request.followerIndex + "] is not ready " + + "to follow; the setting [" + CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey() + "] must be enabled."); + } + // Make a copy, remove settings that are allowed to be different and then compare if the settings are equal. + Settings leaderSettings = filter(leaderIndex.getSettings()); + Settings followerSettings = filter(followIndex.getSettings()); + if (leaderSettings.equals(followerSettings) == false) { + throw new IllegalArgumentException("the leader and follower index settings must be identical"); + } + + // Validates if the current follower mapping is mergable with the leader mapping. + // This also validates for example whether specific mapper plugins have been installed + followerMapperService.merge(leaderIndex, MapperService.MergeReason.MAPPING_RECOVERY); + } + + private static Settings filter(Settings originalSettings) { + Settings.Builder settings = Settings.builder().put(originalSettings); + // Remove settings that are always going to be different between leader and follow index: + settings.remove(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey()); + settings.remove(IndexMetaData.SETTING_INDEX_UUID); + settings.remove(IndexMetaData.SETTING_INDEX_PROVIDED_NAME); + settings.remove(IndexMetaData.SETTING_CREATION_DATE); + + Iterator iterator = settings.keys().iterator(); + while (iterator.hasNext()) { + String key = iterator.next(); + for (Setting whitelistedSetting : WHITELISTED_SETTINGS) { + if (whitelistedSetting.match(key)) { + iterator.remove(); + break; + } + } + } + return settings.build(); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternAction.java new file mode 100644 index 00000000000..a01fd8e3bc2 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternAction.java @@ -0,0 +1,284 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class PutAutoFollowPatternAction extends Action { + + public static final String NAME = "cluster:admin/xpack/ccr/auto_follow_pattern/put"; + public static final PutAutoFollowPatternAction INSTANCE = new PutAutoFollowPatternAction(); + + private PutAutoFollowPatternAction() { + super(NAME); + } + + @Override + public AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + public static class Request extends AcknowledgedRequest implements ToXContentObject { + + static final ParseField LEADER_CLUSTER_ALIAS_FIELD = new ParseField("leader_cluster_alias"); + static final ParseField LEADER_INDEX_PATTERNS_FIELD = new ParseField("leader_index_patterns"); + static final ParseField FOLLOW_INDEX_NAME_PATTERN_FIELD = new ParseField("follow_index_name_pattern"); + + private static final ObjectParser PARSER = new ObjectParser<>("put_auto_follow_pattern_request", Request::new); + + static { + PARSER.declareString(Request::setLeaderClusterAlias, LEADER_CLUSTER_ALIAS_FIELD); + PARSER.declareStringArray(Request::setLeaderIndexPatterns, LEADER_INDEX_PATTERNS_FIELD); + PARSER.declareString(Request::setFollowIndexNamePattern, FOLLOW_INDEX_NAME_PATTERN_FIELD); + PARSER.declareInt(Request::setMaxBatchOperationCount, AutoFollowPattern.MAX_BATCH_OPERATION_COUNT); + PARSER.declareInt(Request::setMaxConcurrentReadBatches, AutoFollowPattern.MAX_CONCURRENT_READ_BATCHES); + PARSER.declareLong(Request::setMaxOperationSizeInBytes, AutoFollowPattern.MAX_BATCH_SIZE_IN_BYTES); + PARSER.declareInt(Request::setMaxConcurrentWriteBatches, AutoFollowPattern.MAX_CONCURRENT_WRITE_BATCHES); + PARSER.declareInt(Request::setMaxWriteBufferSize, AutoFollowPattern.MAX_WRITE_BUFFER_SIZE); + PARSER.declareField(Request::setRetryTimeout, + (p, c) -> TimeValue.parseTimeValue(p.text(), AutoFollowPattern.RETRY_TIMEOUT.getPreferredName()), + ShardFollowTask.RETRY_TIMEOUT, ObjectParser.ValueType.STRING); + PARSER.declareField(Request::setIdleShardRetryDelay, + (p, c) -> TimeValue.parseTimeValue(p.text(), AutoFollowPattern.IDLE_SHARD_RETRY_DELAY.getPreferredName()), + ShardFollowTask.IDLE_SHARD_RETRY_DELAY, ObjectParser.ValueType.STRING); + } + + public static Request fromXContent(XContentParser parser, String remoteClusterAlias) throws IOException { + Request request = PARSER.parse(parser, null); + if (remoteClusterAlias != null) { + if (request.leaderClusterAlias == null) { + request.leaderClusterAlias = remoteClusterAlias; + } else { + if (request.leaderClusterAlias.equals(remoteClusterAlias) == false) { + throw new IllegalArgumentException("provided leaderClusterAlias is not equal"); + } + } + } + return request; + } + + private String leaderClusterAlias; + private List leaderIndexPatterns; + private String followIndexNamePattern; + + private Integer maxBatchOperationCount; + private Integer maxConcurrentReadBatches; + private Long maxOperationSizeInBytes; + private Integer maxConcurrentWriteBatches; + private Integer maxWriteBufferSize; + private TimeValue retryTimeout; + private TimeValue idleShardRetryDelay; + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (leaderClusterAlias == null) { + validationException = addValidationError("leaderClusterAlias is missing", validationException); + } + if (leaderIndexPatterns == null || leaderIndexPatterns.isEmpty()) { + validationException = addValidationError("leaderIndexPatterns is missing", validationException); + } + return validationException; + } + + public String getLeaderClusterAlias() { + return leaderClusterAlias; + } + + public void setLeaderClusterAlias(String leaderClusterAlias) { + this.leaderClusterAlias = leaderClusterAlias; + } + + public List getLeaderIndexPatterns() { + return leaderIndexPatterns; + } + + public void setLeaderIndexPatterns(List leaderIndexPatterns) { + this.leaderIndexPatterns = leaderIndexPatterns; + } + + public String getFollowIndexNamePattern() { + return followIndexNamePattern; + } + + public void setFollowIndexNamePattern(String followIndexNamePattern) { + this.followIndexNamePattern = followIndexNamePattern; + } + + public Integer getMaxBatchOperationCount() { + return maxBatchOperationCount; + } + + public void setMaxBatchOperationCount(Integer maxBatchOperationCount) { + this.maxBatchOperationCount = maxBatchOperationCount; + } + + public Integer getMaxConcurrentReadBatches() { + return maxConcurrentReadBatches; + } + + public void setMaxConcurrentReadBatches(Integer maxConcurrentReadBatches) { + this.maxConcurrentReadBatches = maxConcurrentReadBatches; + } + + public Long getMaxOperationSizeInBytes() { + return maxOperationSizeInBytes; + } + + public void setMaxOperationSizeInBytes(Long maxOperationSizeInBytes) { + this.maxOperationSizeInBytes = maxOperationSizeInBytes; + } + + public Integer getMaxConcurrentWriteBatches() { + return maxConcurrentWriteBatches; + } + + public void setMaxConcurrentWriteBatches(Integer maxConcurrentWriteBatches) { + this.maxConcurrentWriteBatches = maxConcurrentWriteBatches; + } + + public Integer getMaxWriteBufferSize() { + return maxWriteBufferSize; + } + + public void setMaxWriteBufferSize(Integer maxWriteBufferSize) { + this.maxWriteBufferSize = maxWriteBufferSize; + } + + public TimeValue getRetryTimeout() { + return retryTimeout; + } + + public void setRetryTimeout(TimeValue retryTimeout) { + this.retryTimeout = retryTimeout; + } + + public TimeValue getIdleShardRetryDelay() { + return idleShardRetryDelay; + } + + public void setIdleShardRetryDelay(TimeValue idleShardRetryDelay) { + this.idleShardRetryDelay = idleShardRetryDelay; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + leaderClusterAlias = in.readString(); + leaderIndexPatterns = in.readList(StreamInput::readString); + followIndexNamePattern = in.readOptionalString(); + maxBatchOperationCount = in.readOptionalVInt(); + maxConcurrentReadBatches = in.readOptionalVInt(); + maxOperationSizeInBytes = in.readOptionalLong(); + maxConcurrentWriteBatches = in.readOptionalVInt(); + maxWriteBufferSize = in.readOptionalVInt(); + retryTimeout = in.readOptionalTimeValue(); + idleShardRetryDelay = in.readOptionalTimeValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(leaderClusterAlias); + out.writeStringList(leaderIndexPatterns); + out.writeOptionalString(followIndexNamePattern); + out.writeOptionalVInt(maxBatchOperationCount); + out.writeOptionalVInt(maxConcurrentReadBatches); + out.writeOptionalLong(maxOperationSizeInBytes); + out.writeOptionalVInt(maxConcurrentWriteBatches); + out.writeOptionalVInt(maxWriteBufferSize); + out.writeOptionalTimeValue(retryTimeout); + out.writeOptionalTimeValue(idleShardRetryDelay); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(LEADER_CLUSTER_ALIAS_FIELD.getPreferredName(), leaderClusterAlias); + builder.field(LEADER_INDEX_PATTERNS_FIELD.getPreferredName(), leaderIndexPatterns); + if (followIndexNamePattern != null) { + builder.field(FOLLOW_INDEX_NAME_PATTERN_FIELD.getPreferredName(), followIndexNamePattern); + } + if (maxBatchOperationCount != null) { + builder.field(ShardFollowTask.MAX_BATCH_OPERATION_COUNT.getPreferredName(), maxBatchOperationCount); + } + if (maxOperationSizeInBytes != null) { + builder.field(ShardFollowTask.MAX_BATCH_SIZE_IN_BYTES.getPreferredName(), maxOperationSizeInBytes); + } + if (maxWriteBufferSize != null) { + builder.field(ShardFollowTask.MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); + } + if (maxConcurrentReadBatches != null) { + builder.field(ShardFollowTask.MAX_CONCURRENT_READ_BATCHES.getPreferredName(), maxConcurrentReadBatches); + } + if (maxConcurrentWriteBatches != null) { + builder.field(ShardFollowTask.MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); + } + if (retryTimeout != null) { + builder.field(ShardFollowTask.RETRY_TIMEOUT.getPreferredName(), retryTimeout.getStringRep()); + } + if (idleShardRetryDelay != null) { + builder.field(ShardFollowTask.IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay.getStringRep()); + } + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(leaderClusterAlias, request.leaderClusterAlias) && + Objects.equals(leaderIndexPatterns, request.leaderIndexPatterns) && + Objects.equals(followIndexNamePattern, request.followIndexNamePattern) && + Objects.equals(maxBatchOperationCount, request.maxBatchOperationCount) && + Objects.equals(maxConcurrentReadBatches, request.maxConcurrentReadBatches) && + Objects.equals(maxOperationSizeInBytes, request.maxOperationSizeInBytes) && + Objects.equals(maxConcurrentWriteBatches, request.maxConcurrentWriteBatches) && + Objects.equals(maxWriteBufferSize, request.maxWriteBufferSize) && + Objects.equals(retryTimeout, request.retryTimeout) && + Objects.equals(idleShardRetryDelay, request.idleShardRetryDelay); + } + + @Override + public int hashCode() { + return Objects.hash( + leaderClusterAlias, + leaderIndexPatterns, + followIndexNamePattern, + maxBatchOperationCount, + maxConcurrentReadBatches, + maxOperationSizeInBytes, + maxConcurrentWriteBatches, + maxWriteBufferSize, + retryTimeout, + idleShardRetryDelay + ); + } + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java new file mode 100644 index 00000000000..d102c6b5b7a --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -0,0 +1,321 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.single.shard.SingleShardRequest; +import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardNotStartedException; +import org.elasticsearch.index.shard.IndexShardState; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class ShardChangesAction extends Action { + + public static final ShardChangesAction INSTANCE = new ShardChangesAction(); + public static final String NAME = "indices:data/read/xpack/ccr/shard_changes"; + + private ShardChangesAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends SingleShardRequest { + + private long fromSeqNo; + private int maxOperationCount; + private ShardId shardId; + private long maxOperationSizeInBytes = ShardFollowNodeTask.DEFAULT_MAX_BATCH_SIZE_IN_BYTES; + + public Request(ShardId shardId) { + super(shardId.getIndexName()); + this.shardId = shardId; + } + + Request() { + } + + public ShardId getShard() { + return shardId; + } + + public long getFromSeqNo() { + return fromSeqNo; + } + + public void setFromSeqNo(long fromSeqNo) { + this.fromSeqNo = fromSeqNo; + } + + public int getMaxOperationCount() { + return maxOperationCount; + } + + public void setMaxOperationCount(int maxOperationCount) { + this.maxOperationCount = maxOperationCount; + } + + public long getMaxOperationSizeInBytes() { + return maxOperationSizeInBytes; + } + + public void setMaxOperationSizeInBytes(long maxOperationSizeInBytes) { + this.maxOperationSizeInBytes = maxOperationSizeInBytes; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (fromSeqNo < 0) { + validationException = addValidationError("fromSeqNo [" + fromSeqNo + "] cannot be lower than 0", validationException); + } + if (maxOperationCount < 0) { + validationException = addValidationError("maxOperationCount [" + maxOperationCount + + "] cannot be lower than 0", validationException); + } + if (maxOperationSizeInBytes <= 0) { + validationException = addValidationError("maxOperationSizeInBytes [" + maxOperationSizeInBytes + "] must be larger than 0", + validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + fromSeqNo = in.readVLong(); + maxOperationCount = in.readVInt(); + shardId = ShardId.readShardId(in); + maxOperationSizeInBytes = in.readVLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVLong(fromSeqNo); + out.writeVInt(maxOperationCount); + shardId.writeTo(out); + out.writeVLong(maxOperationSizeInBytes); + } + + + @Override + public boolean equals(final Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final Request request = (Request) o; + return fromSeqNo == request.fromSeqNo && + maxOperationCount == request.maxOperationCount && + Objects.equals(shardId, request.shardId) && + maxOperationSizeInBytes == request.maxOperationSizeInBytes; + } + + @Override + public int hashCode() { + return Objects.hash(fromSeqNo, maxOperationCount, shardId, maxOperationSizeInBytes); + } + + @Override + public String toString() { + return "Request{" + + "fromSeqNo=" + fromSeqNo + + ", maxOperationCount=" + maxOperationCount + + ", shardId=" + shardId + + ", maxOperationSizeInBytes=" + maxOperationSizeInBytes + + '}'; + } + + } + + public static final class Response extends ActionResponse { + + private long mappingVersion; + + public long getMappingVersion() { + return mappingVersion; + } + + private long globalCheckpoint; + + public long getGlobalCheckpoint() { + return globalCheckpoint; + } + + private long maxSeqNo; + + public long getMaxSeqNo() { + return maxSeqNo; + } + + private Translog.Operation[] operations; + + public Translog.Operation[] getOperations() { + return operations; + } + + Response() { + } + + Response(final long mappingVersion, final long globalCheckpoint, final long maxSeqNo, final Translog.Operation[] operations) { + this.mappingVersion = mappingVersion; + this.globalCheckpoint = globalCheckpoint; + this.maxSeqNo = maxSeqNo; + this.operations = operations; + } + + @Override + public void readFrom(final StreamInput in) throws IOException { + super.readFrom(in); + mappingVersion = in.readVLong(); + globalCheckpoint = in.readZLong(); + maxSeqNo = in.readZLong(); + operations = in.readArray(Translog.Operation::readOperation, Translog.Operation[]::new); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVLong(mappingVersion); + out.writeZLong(globalCheckpoint); + out.writeZLong(maxSeqNo); + out.writeArray(Translog.Operation::writeOperation, operations); + } + + @Override + public boolean equals(final Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final Response that = (Response) o; + return mappingVersion == that.mappingVersion && + globalCheckpoint == that.globalCheckpoint && + maxSeqNo == that.maxSeqNo && + Arrays.equals(operations, that.operations); + } + + @Override + public int hashCode() { + return Objects.hash(mappingVersion, globalCheckpoint, maxSeqNo, Arrays.hashCode(operations)); + } + } + + public static class TransportAction extends TransportSingleShardAction { + + private final IndicesService indicesService; + + @Inject + public TransportAction(Settings settings, + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + IndicesService indicesService) { + super(settings, NAME, threadPool, clusterService, transportService, actionFilters, + indexNameExpressionResolver, Request::new, ThreadPool.Names.GET); + this.indicesService = indicesService; + } + + @Override + protected Response shardOperation(Request request, ShardId shardId) throws IOException { + IndexService indexService = indicesService.indexServiceSafe(request.getShard().getIndex()); + IndexShard indexShard = indexService.getShard(request.getShard().id()); + final SeqNoStats seqNoStats = indexShard.seqNoStats(); + final long mappingVersion = clusterService.state().metaData().index(shardId.getIndex()).getMappingVersion(); + + final Translog.Operation[] operations = getOperations( + indexShard, + seqNoStats.getGlobalCheckpoint(), + request.fromSeqNo, + request.maxOperationCount, + request.maxOperationSizeInBytes); + return new Response(mappingVersion, seqNoStats.getGlobalCheckpoint(), seqNoStats.getMaxSeqNo(), operations); + } + + @Override + protected boolean resolveIndex(Request request) { + return false; + } + + @Override + protected ShardsIterator shards(ClusterState state, InternalRequest request) { + return state + .routingTable() + .shardRoutingTable(request.concreteIndex(), request.request().getShard().id()) + .activeInitializingShardsRandomIt(); + } + + @Override + protected Response newResponse() { + return new Response(); + } + + } + + private static final Translog.Operation[] EMPTY_OPERATIONS_ARRAY = new Translog.Operation[0]; + + /** + * Returns at most maxOperationCount operations from the specified from sequence number. + * This method will never return operations above the specified globalCheckpoint. + * + * Also if the sum of collected operations' size is above the specified maxOperationSizeInBytes then this method + * stops collecting more operations and returns what has been collected so far. + */ + static Translog.Operation[] getOperations(IndexShard indexShard, long globalCheckpoint, long fromSeqNo, int maxOperationCount, + long maxOperationSizeInBytes) throws IOException { + if (indexShard.state() != IndexShardState.STARTED) { + throw new IndexShardNotStartedException(indexShard.shardId(), indexShard.state()); + } + if (fromSeqNo > globalCheckpoint) { + return EMPTY_OPERATIONS_ARRAY; + } + int seenBytes = 0; + // - 1 is needed, because toSeqNo is inclusive + long toSeqNo = Math.min(globalCheckpoint, (fromSeqNo + maxOperationCount) - 1); + assert fromSeqNo <= toSeqNo : "invalid range from_seqno[" + fromSeqNo + "] > to_seqno[" + toSeqNo + "]"; + final List operations = new ArrayList<>(); + try (Translog.Snapshot snapshot = indexShard.newChangesSnapshot("ccr", fromSeqNo, toSeqNo, true)) { + Translog.Operation op; + while ((op = snapshot.next()) != null) { + operations.add(op); + seenBytes += op.estimateSize(); + if (seenBytes > maxOperationSizeInBytes) { + break; + } + } + } + return operations.toArray(EMPTY_OPERATIONS_ARRAY); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java new file mode 100644 index 00000000000..00e3aaaae2a --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java @@ -0,0 +1,929 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.support.TransportActions; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.transport.NetworkExceptionHelper; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.persistent.AllocatedPersistentTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; + +import java.io.IOException; +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Objects; +import java.util.PriorityQueue; +import java.util.Queue; +import java.util.TreeMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.LongConsumer; +import java.util.function.LongSupplier; +import java.util.stream.Collectors; + +/** + * The node task that fetch the write operations from a leader shard and + * persists these ops in the follower shard. + */ +public abstract class ShardFollowNodeTask extends AllocatedPersistentTask { + + public static final int DEFAULT_MAX_BATCH_OPERATION_COUNT = 1024; + public static final int DEFAULT_MAX_CONCURRENT_READ_BATCHES = 1; + public static final int DEFAULT_MAX_CONCURRENT_WRITE_BATCHES = 1; + public static final int DEFAULT_MAX_WRITE_BUFFER_SIZE = 10240; + public static final long DEFAULT_MAX_BATCH_SIZE_IN_BYTES = Long.MAX_VALUE; + private static final int RETRY_LIMIT = 10; + public static final TimeValue DEFAULT_RETRY_TIMEOUT = new TimeValue(500); + public static final TimeValue DEFAULT_IDLE_SHARD_RETRY_DELAY = TimeValue.timeValueSeconds(10); + + private static final Logger LOGGER = Loggers.getLogger(ShardFollowNodeTask.class); + + private final String leaderIndex; + private final ShardFollowTask params; + private final TimeValue retryTimeout; + private final TimeValue idleShardChangesRequestDelay; + private final BiConsumer scheduler; + private final LongSupplier relativeTimeProvider; + + private long leaderGlobalCheckpoint; + private long leaderMaxSeqNo; + private long lastRequestedSeqNo; + private long followerGlobalCheckpoint = 0; + private long followerMaxSeqNo = 0; + private int numConcurrentReads = 0; + private int numConcurrentWrites = 0; + private long currentMappingVersion = 0; + private long totalFetchTimeMillis = 0; + private long numberOfSuccessfulFetches = 0; + private long numberOfFailedFetches = 0; + private long operationsReceived = 0; + private long totalTransferredBytes = 0; + private long totalIndexTimeMillis = 0; + private long numberOfSuccessfulBulkOperations = 0; + private long numberOfFailedBulkOperations = 0; + private long numberOfOperationsIndexed = 0; + private long lastFetchTime = -1; + private final Queue buffer = new PriorityQueue<>(Comparator.comparing(Translog.Operation::seqNo)); + private final LinkedHashMap fetchExceptions; + + ShardFollowNodeTask(long id, String type, String action, String description, TaskId parentTask, Map headers, + ShardFollowTask params, BiConsumer scheduler, final LongSupplier relativeTimeProvider) { + super(id, type, action, description, parentTask, headers); + this.params = params; + this.scheduler = scheduler; + this.relativeTimeProvider = relativeTimeProvider; + this.retryTimeout = params.getRetryTimeout(); + this.idleShardChangesRequestDelay = params.getIdleShardRetryDelay(); + /* + * We keep track of the most recent fetch exceptions, with the number of exceptions that we track equal to the maximum number of + * concurrent fetches. For each failed fetch, we track the from sequence number associated with the request, and we clear the entry + * when the fetch task associated with that from sequence number succeeds. + */ + this.fetchExceptions = new LinkedHashMap() { + @Override + protected boolean removeEldestEntry(final Map.Entry eldest) { + return size() > params.getMaxConcurrentReadBatches(); + } + }; + + if (params.getLeaderClusterAlias() != null) { + leaderIndex = params.getLeaderClusterAlias() + ":" + params.getLeaderShardId().getIndexName(); + } else { + leaderIndex = params.getLeaderShardId().getIndexName(); + } + } + + void start( + final long leaderGlobalCheckpoint, + final long leaderMaxSeqNo, + final long followerGlobalCheckpoint, + final long followerMaxSeqNo) { + /* + * While this should only ever be called once and before any other threads can touch these fields, we use synchronization here to + * avoid the need to declare these fields as volatile. That is, we are ensuring thesefields are always accessed under the same lock. + */ + synchronized (this) { + this.leaderGlobalCheckpoint = leaderGlobalCheckpoint; + this.leaderMaxSeqNo = leaderMaxSeqNo; + this.followerGlobalCheckpoint = followerGlobalCheckpoint; + this.followerMaxSeqNo = followerMaxSeqNo; + this.lastRequestedSeqNo = followerGlobalCheckpoint; + } + + // updates follower mapping, this gets us the leader mapping version and makes sure that leader and follower mapping are identical + updateMapping(mappingVersion -> { + synchronized (ShardFollowNodeTask.this) { + currentMappingVersion = mappingVersion; + } + LOGGER.info("{} Started to follow leader shard {}, followGlobalCheckPoint={}, mappingVersion={}", + params.getFollowShardId(), params.getLeaderShardId(), followerGlobalCheckpoint, mappingVersion); + coordinateReads(); + }); + } + + synchronized void coordinateReads() { + if (isStopped()) { + LOGGER.info("{} shard follow task has been stopped", params.getFollowShardId()); + return; + } + + LOGGER.trace("{} coordinate reads, lastRequestedSeqNo={}, leaderGlobalCheckpoint={}", + params.getFollowShardId(), lastRequestedSeqNo, leaderGlobalCheckpoint); + final int maxBatchOperationCount = params.getMaxBatchOperationCount(); + while (hasReadBudget() && lastRequestedSeqNo < leaderGlobalCheckpoint) { + final long from = lastRequestedSeqNo + 1; + final long maxRequiredSeqNo = Math.min(leaderGlobalCheckpoint, from + maxBatchOperationCount - 1); + final int requestBatchCount; + if (numConcurrentReads == 0) { + // This is the only request, we can optimistically fetch more documents if possible but not enforce max_required_seqno. + requestBatchCount = maxBatchOperationCount; + } else { + requestBatchCount = Math.toIntExact(maxRequiredSeqNo - from + 1); + } + assert 0 < requestBatchCount && requestBatchCount <= maxBatchOperationCount : "request_batch_count=" + requestBatchCount; + LOGGER.trace("{}[{} ongoing reads] read from_seqno={} max_required_seqno={} batch_count={}", + params.getFollowShardId(), numConcurrentReads, from, maxRequiredSeqNo, requestBatchCount); + numConcurrentReads++; + sendShardChangesRequest(from, requestBatchCount, maxRequiredSeqNo); + lastRequestedSeqNo = maxRequiredSeqNo; + } + + if (numConcurrentReads == 0 && hasReadBudget()) { + assert lastRequestedSeqNo == leaderGlobalCheckpoint; + // We sneak peek if there is any thing new in the leader. + // If there is we will happily accept + numConcurrentReads++; + long from = lastRequestedSeqNo + 1; + LOGGER.trace("{}[{}] peek read [{}]", params.getFollowShardId(), numConcurrentReads, from); + sendShardChangesRequest(from, maxBatchOperationCount, lastRequestedSeqNo); + } + } + + private boolean hasReadBudget() { + assert Thread.holdsLock(this); + if (numConcurrentReads >= params.getMaxConcurrentReadBatches()) { + LOGGER.trace("{} no new reads, maximum number of concurrent reads have been reached [{}]", + params.getFollowShardId(), numConcurrentReads); + return false; + } + if (buffer.size() > params.getMaxWriteBufferSize()) { + LOGGER.trace("{} no new reads, buffer limit has been reached [{}]", params.getFollowShardId(), buffer.size()); + return false; + } + return true; + } + + private synchronized void coordinateWrites() { + if (isStopped()) { + LOGGER.info("{} shard follow task has been stopped", params.getFollowShardId()); + return; + } + + while (hasWriteBudget() && buffer.isEmpty() == false) { + long sumEstimatedSize = 0L; + int length = Math.min(params.getMaxBatchOperationCount(), buffer.size()); + List ops = new ArrayList<>(length); + for (int i = 0; i < length; i++) { + Translog.Operation op = buffer.remove(); + ops.add(op); + sumEstimatedSize += op.estimateSize(); + if (sumEstimatedSize > params.getMaxBatchSizeInBytes()) { + break; + } + } + numConcurrentWrites++; + LOGGER.trace("{}[{}] write [{}/{}] [{}]", params.getFollowShardId(), numConcurrentWrites, ops.get(0).seqNo(), + ops.get(ops.size() - 1).seqNo(), ops.size()); + sendBulkShardOperationsRequest(ops); + } + } + + private boolean hasWriteBudget() { + assert Thread.holdsLock(this); + if (numConcurrentWrites >= params.getMaxConcurrentWriteBatches()) { + LOGGER.trace("{} maximum number of concurrent writes have been reached [{}]", + params.getFollowShardId(), numConcurrentWrites); + return false; + } + return true; + } + + private void sendShardChangesRequest(long from, int maxOperationCount, long maxRequiredSeqNo) { + sendShardChangesRequest(from, maxOperationCount, maxRequiredSeqNo, new AtomicInteger(0)); + } + + private void sendShardChangesRequest(long from, int maxOperationCount, long maxRequiredSeqNo, AtomicInteger retryCounter) { + final long startTime = relativeTimeProvider.getAsLong(); + synchronized (this) { + lastFetchTime = startTime; + } + innerSendShardChangesRequest(from, maxOperationCount, + response -> { + synchronized (ShardFollowNodeTask.this) { + totalFetchTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); + numberOfSuccessfulFetches++; + fetchExceptions.remove(from); + operationsReceived += response.getOperations().length; + totalTransferredBytes += Arrays.stream(response.getOperations()).mapToLong(Translog.Operation::estimateSize).sum(); + } + handleReadResponse(from, maxRequiredSeqNo, response); + }, + e -> { + synchronized (ShardFollowNodeTask.this) { + totalFetchTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); + numberOfFailedFetches++; + fetchExceptions.put(from, new ElasticsearchException(e)); + } + handleFailure(e, retryCounter, () -> sendShardChangesRequest(from, maxOperationCount, maxRequiredSeqNo, retryCounter)); + }); + } + + void handleReadResponse(long from, long maxRequiredSeqNo, ShardChangesAction.Response response) { + maybeUpdateMapping(response.getMappingVersion(), () -> innerHandleReadResponse(from, maxRequiredSeqNo, response)); + } + + /** Called when some operations are fetched from the leading */ + protected void onOperationsFetched(Translog.Operation[] operations) { + + } + + synchronized void innerHandleReadResponse(long from, long maxRequiredSeqNo, ShardChangesAction.Response response) { + onOperationsFetched(response.getOperations()); + leaderGlobalCheckpoint = Math.max(leaderGlobalCheckpoint, response.getGlobalCheckpoint()); + leaderMaxSeqNo = Math.max(leaderMaxSeqNo, response.getMaxSeqNo()); + final long newFromSeqNo; + if (response.getOperations().length == 0) { + newFromSeqNo = from; + } else { + assert response.getOperations()[0].seqNo() == from : + "first operation is not what we asked for. From is [" + from + "], got " + response.getOperations()[0]; + buffer.addAll(Arrays.asList(response.getOperations())); + final long maxSeqNo = response.getOperations()[response.getOperations().length - 1].seqNo(); + assert maxSeqNo == + Arrays.stream(response.getOperations()).mapToLong(Translog.Operation::seqNo).max().getAsLong(); + newFromSeqNo = maxSeqNo + 1; + // update last requested seq no as we may have gotten more than we asked for and we don't want to ask it again. + lastRequestedSeqNo = Math.max(lastRequestedSeqNo, maxSeqNo); + assert lastRequestedSeqNo <= leaderGlobalCheckpoint : "lastRequestedSeqNo [" + lastRequestedSeqNo + + "] is larger than the global checkpoint [" + leaderGlobalCheckpoint + "]"; + coordinateWrites(); + } + if (newFromSeqNo <= maxRequiredSeqNo && isStopped() == false) { + int newSize = Math.toIntExact(maxRequiredSeqNo - newFromSeqNo + 1); + LOGGER.trace("{} received [{}] ops, still missing [{}/{}], continuing to read...", + params.getFollowShardId(), response.getOperations().length, newFromSeqNo, maxRequiredSeqNo); + sendShardChangesRequest(newFromSeqNo, newSize, maxRequiredSeqNo); + } else { + // read is completed, decrement + numConcurrentReads--; + if (response.getOperations().length == 0 && leaderGlobalCheckpoint == lastRequestedSeqNo) { + // we got nothing and we have no reason to believe asking again well get us more, treat shard as idle and delay + // future requests + LOGGER.trace("{} received no ops and no known ops to fetch, scheduling to coordinate reads", + params.getFollowShardId()); + scheduler.accept(idleShardChangesRequestDelay, this::coordinateReads); + } else { + coordinateReads(); + } + } + } + + private void sendBulkShardOperationsRequest(List operations) { + sendBulkShardOperationsRequest(operations, new AtomicInteger(0)); + } + + private void sendBulkShardOperationsRequest(List operations, AtomicInteger retryCounter) { + final long startTime = relativeTimeProvider.getAsLong(); + innerSendBulkShardOperationsRequest(operations, + response -> { + synchronized (ShardFollowNodeTask.this) { + totalIndexTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); + numberOfSuccessfulBulkOperations++; + numberOfOperationsIndexed += operations.size(); + } + handleWriteResponse(response); + }, + e -> { + synchronized (ShardFollowNodeTask.this) { + totalIndexTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); + numberOfFailedBulkOperations++; + } + handleFailure(e, retryCounter, () -> sendBulkShardOperationsRequest(operations, retryCounter)); + } + ); + } + + private synchronized void handleWriteResponse(final BulkShardOperationsResponse response) { + this.followerGlobalCheckpoint = Math.max(this.followerGlobalCheckpoint, response.getGlobalCheckpoint()); + this.followerMaxSeqNo = Math.max(this.followerMaxSeqNo, response.getMaxSeqNo()); + numConcurrentWrites--; + assert numConcurrentWrites >= 0; + coordinateWrites(); + + // In case that buffer has more ops than is allowed then reads may all have been stopped, + // this invocation makes sure that we start a read when there is budget in case no reads are being performed. + coordinateReads(); + } + + private synchronized void maybeUpdateMapping(Long minimumRequiredMappingVersion, Runnable task) { + if (currentMappingVersion >= minimumRequiredMappingVersion) { + LOGGER.trace("{} mapping version [{}] is higher or equal than minimum required mapping version [{}]", + params.getFollowShardId(), currentMappingVersion, minimumRequiredMappingVersion); + task.run(); + } else { + LOGGER.trace("{} updating mapping, mapping version [{}] is lower than minimum required mapping version [{}]", + params.getFollowShardId(), currentMappingVersion, minimumRequiredMappingVersion); + updateMapping(mappingVersion -> { + currentMappingVersion = mappingVersion; + task.run(); + }); + } + } + + private void updateMapping(LongConsumer handler) { + updateMapping(handler, new AtomicInteger(0)); + } + + private void updateMapping(LongConsumer handler, AtomicInteger retryCounter) { + innerUpdateMapping(handler, e -> handleFailure(e, retryCounter, () -> updateMapping(handler, retryCounter))); + } + + private void handleFailure(Exception e, AtomicInteger retryCounter, Runnable task) { + assert e != null; + if (shouldRetry(e)) { + if (isStopped() == false && retryCounter.incrementAndGet() <= RETRY_LIMIT) { + LOGGER.debug(new ParameterizedMessage("{} error during follow shard task, retrying...", params.getFollowShardId()), e); + scheduler.accept(retryTimeout, task); + } else { + markAsFailed(new ElasticsearchException("retrying failed [" + retryCounter.get() + + "] times, aborting...", e)); + } + } else { + markAsFailed(e); + } + } + + private boolean shouldRetry(Exception e) { + return NetworkExceptionHelper.isConnectException(e) || + NetworkExceptionHelper.isCloseConnectionException(e) || + TransportActions.isShardNotAvailableException(e); + } + + // These methods are protected for testing purposes: + protected abstract void innerUpdateMapping(LongConsumer handler, Consumer errorHandler); + + protected abstract void innerSendBulkShardOperationsRequest( + List operations, Consumer handler, Consumer errorHandler); + + protected abstract void innerSendShardChangesRequest(long from, int maxOperationCount, Consumer handler, + Consumer errorHandler); + + @Override + protected void onCancelled() { + markAsCompleted(); + } + + protected boolean isStopped() { + return isCancelled() || isCompleted(); + } + + public ShardId getFollowShardId() { + return params.getFollowShardId(); + } + + @Override + public synchronized Status getStatus() { + final long timeSinceLastFetchMillis; + if (lastFetchTime != -1) { + timeSinceLastFetchMillis = TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - lastFetchTime); + } else { + // To avoid confusion when ccr didn't yet execute a fetch: + timeSinceLastFetchMillis = -1; + } + return new Status( + leaderIndex, + getFollowShardId().getId(), + leaderGlobalCheckpoint, + leaderMaxSeqNo, + followerGlobalCheckpoint, + followerMaxSeqNo, + lastRequestedSeqNo, + numConcurrentReads, + numConcurrentWrites, + buffer.size(), + currentMappingVersion, + totalFetchTimeMillis, + numberOfSuccessfulFetches, + numberOfFailedFetches, + operationsReceived, + totalTransferredBytes, + totalIndexTimeMillis, + numberOfSuccessfulBulkOperations, + numberOfFailedBulkOperations, + numberOfOperationsIndexed, + new TreeMap<>(fetchExceptions), + timeSinceLastFetchMillis); + } + + public static class Status implements Task.Status { + + public static final String STATUS_PARSER_NAME = "shard-follow-node-task-status"; + + static final ParseField LEADER_INDEX = new ParseField("leader_index"); + static final ParseField SHARD_ID = new ParseField("shard_id"); + static final ParseField LEADER_GLOBAL_CHECKPOINT_FIELD = new ParseField("leader_global_checkpoint"); + static final ParseField LEADER_MAX_SEQ_NO_FIELD = new ParseField("leader_max_seq_no"); + static final ParseField FOLLOWER_GLOBAL_CHECKPOINT_FIELD = new ParseField("follower_global_checkpoint"); + static final ParseField FOLLOWER_MAX_SEQ_NO_FIELD = new ParseField("follower_max_seq_no"); + static final ParseField LAST_REQUESTED_SEQ_NO_FIELD = new ParseField("last_requested_seq_no"); + static final ParseField NUMBER_OF_CONCURRENT_READS_FIELD = new ParseField("number_of_concurrent_reads"); + static final ParseField NUMBER_OF_CONCURRENT_WRITES_FIELD = new ParseField("number_of_concurrent_writes"); + static final ParseField NUMBER_OF_QUEUED_WRITES_FIELD = new ParseField("number_of_queued_writes"); + static final ParseField MAPPING_VERSION_FIELD = new ParseField("mapping_version"); + static final ParseField TOTAL_FETCH_TIME_MILLIS_FIELD = new ParseField("total_fetch_time_millis"); + static final ParseField NUMBER_OF_SUCCESSFUL_FETCHES_FIELD = new ParseField("number_of_successful_fetches"); + static final ParseField NUMBER_OF_FAILED_FETCHES_FIELD = new ParseField("number_of_failed_fetches"); + static final ParseField OPERATIONS_RECEIVED_FIELD = new ParseField("operations_received"); + static final ParseField TOTAL_TRANSFERRED_BYTES = new ParseField("total_transferred_bytes"); + static final ParseField TOTAL_INDEX_TIME_MILLIS_FIELD = new ParseField("total_index_time_millis"); + static final ParseField NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD = new ParseField("number_of_successful_bulk_operations"); + static final ParseField NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD = new ParseField("number_of_failed_bulk_operations"); + static final ParseField NUMBER_OF_OPERATIONS_INDEXED_FIELD = new ParseField("number_of_operations_indexed"); + static final ParseField FETCH_EXCEPTIONS = new ParseField("fetch_exceptions"); + static final ParseField TIME_SINCE_LAST_FETCH_MILLIS_FIELD = new ParseField("time_since_last_fetch_millis"); + + @SuppressWarnings("unchecked") + static final ConstructingObjectParser STATUS_PARSER = new ConstructingObjectParser<>(STATUS_PARSER_NAME, + args -> new Status( + (String) args[0], + (int) args[1], + (long) args[2], + (long) args[3], + (long) args[4], + (long) args[5], + (long) args[6], + (int) args[7], + (int) args[8], + (int) args[9], + (long) args[10], + (long) args[11], + (long) args[12], + (long) args[13], + (long) args[14], + (long) args[15], + (long) args[16], + (long) args[17], + (long) args[18], + (long) args[19], + new TreeMap<>( + ((List>) args[20]) + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))), + (long) args[21])); + + public static final String FETCH_EXCEPTIONS_ENTRY_PARSER_NAME = "shard-follow-node-task-status-fetch-exceptions-entry"; + + static final ConstructingObjectParser, Void> FETCH_EXCEPTIONS_ENTRY_PARSER = + new ConstructingObjectParser<>( + FETCH_EXCEPTIONS_ENTRY_PARSER_NAME, + args -> new AbstractMap.SimpleEntry<>((long) args[0], (ElasticsearchException) args[1])); + + static { + STATUS_PARSER.declareString(ConstructingObjectParser.constructorArg(), LEADER_INDEX); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), SHARD_ID); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LEADER_GLOBAL_CHECKPOINT_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LEADER_MAX_SEQ_NO_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FOLLOWER_GLOBAL_CHECKPOINT_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FOLLOWER_MAX_SEQ_NO_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LAST_REQUESTED_SEQ_NO_FIELD); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_CONCURRENT_READS_FIELD); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_CONCURRENT_WRITES_FIELD); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_QUEUED_WRITES_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), MAPPING_VERSION_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_FETCH_TIME_MILLIS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_SUCCESSFUL_FETCHES_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_FAILED_FETCHES_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), OPERATIONS_RECEIVED_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_TRANSFERRED_BYTES); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_INDEX_TIME_MILLIS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_OPERATIONS_INDEXED_FIELD); + STATUS_PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), FETCH_EXCEPTIONS_ENTRY_PARSER, FETCH_EXCEPTIONS); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TIME_SINCE_LAST_FETCH_MILLIS_FIELD); + } + + static final ParseField FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO = new ParseField("from_seq_no"); + static final ParseField FETCH_EXCEPTIONS_ENTRY_EXCEPTION = new ParseField("exception"); + + static { + FETCH_EXCEPTIONS_ENTRY_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO); + FETCH_EXCEPTIONS_ENTRY_PARSER.declareObject( + ConstructingObjectParser.constructorArg(), + (p, c) -> ElasticsearchException.fromXContent(p), + FETCH_EXCEPTIONS_ENTRY_EXCEPTION); + } + + private final String leaderIndex; + + public String leaderIndex() { + return leaderIndex; + } + + private final int shardId; + + public int getShardId() { + return shardId; + } + + private final long leaderGlobalCheckpoint; + + public long leaderGlobalCheckpoint() { + return leaderGlobalCheckpoint; + } + + private final long leaderMaxSeqNo; + + public long leaderMaxSeqNo() { + return leaderMaxSeqNo; + } + + private final long followerGlobalCheckpoint; + + public long followerGlobalCheckpoint() { + return followerGlobalCheckpoint; + } + + private final long followerMaxSeqNo; + + public long followerMaxSeqNo() { + return followerMaxSeqNo; + } + + private final long lastRequestedSeqNo; + + public long lastRequestedSeqNo() { + return lastRequestedSeqNo; + } + + private final int numberOfConcurrentReads; + + public int numberOfConcurrentReads() { + return numberOfConcurrentReads; + } + + private final int numberOfConcurrentWrites; + + public int numberOfConcurrentWrites() { + return numberOfConcurrentWrites; + } + + private final int numberOfQueuedWrites; + + public int numberOfQueuedWrites() { + return numberOfQueuedWrites; + } + + private final long mappingVersion; + + public long mappingVersion() { + return mappingVersion; + } + + private final long totalFetchTimeMillis; + + public long totalFetchTimeMillis() { + return totalFetchTimeMillis; + } + + private final long numberOfSuccessfulFetches; + + public long numberOfSuccessfulFetches() { + return numberOfSuccessfulFetches; + } + + private final long numberOfFailedFetches; + + public long numberOfFailedFetches() { + return numberOfFailedFetches; + } + + private final long operationsReceived; + + public long operationsReceived() { + return operationsReceived; + } + + private final long totalTransferredBytes; + + public long totalTransferredBytes() { + return totalTransferredBytes; + } + + private final long totalIndexTimeMillis; + + public long totalIndexTimeMillis() { + return totalIndexTimeMillis; + } + + private final long numberOfSuccessfulBulkOperations; + + public long numberOfSuccessfulBulkOperations() { + return numberOfSuccessfulBulkOperations; + } + + private final long numberOfFailedBulkOperations; + + public long numberOfFailedBulkOperations() { + return numberOfFailedBulkOperations; + } + + private final long numberOfOperationsIndexed; + + public long numberOfOperationsIndexed() { + return numberOfOperationsIndexed; + } + + private final NavigableMap fetchExceptions; + + public NavigableMap fetchExceptions() { + return fetchExceptions; + } + + private final long timeSinceLastFetchMillis; + + public long timeSinceLastFetchMillis() { + return timeSinceLastFetchMillis; + } + + Status( + final String leaderIndex, + final int shardId, + final long leaderGlobalCheckpoint, + final long leaderMaxSeqNo, + final long followerGlobalCheckpoint, + final long followerMaxSeqNo, + final long lastRequestedSeqNo, + final int numberOfConcurrentReads, + final int numberOfConcurrentWrites, + final int numberOfQueuedWrites, + final long mappingVersion, + final long totalFetchTimeMillis, + final long numberOfSuccessfulFetches, + final long numberOfFailedFetches, + final long operationsReceived, + final long totalTransferredBytes, + final long totalIndexTimeMillis, + final long numberOfSuccessfulBulkOperations, + final long numberOfFailedBulkOperations, + final long numberOfOperationsIndexed, + final NavigableMap fetchExceptions, + final long timeSinceLastFetchMillis) { + this.leaderIndex = leaderIndex; + this.shardId = shardId; + this.leaderGlobalCheckpoint = leaderGlobalCheckpoint; + this.leaderMaxSeqNo = leaderMaxSeqNo; + this.followerGlobalCheckpoint = followerGlobalCheckpoint; + this.followerMaxSeqNo = followerMaxSeqNo; + this.lastRequestedSeqNo = lastRequestedSeqNo; + this.numberOfConcurrentReads = numberOfConcurrentReads; + this.numberOfConcurrentWrites = numberOfConcurrentWrites; + this.numberOfQueuedWrites = numberOfQueuedWrites; + this.mappingVersion = mappingVersion; + this.totalFetchTimeMillis = totalFetchTimeMillis; + this.numberOfSuccessfulFetches = numberOfSuccessfulFetches; + this.numberOfFailedFetches = numberOfFailedFetches; + this.operationsReceived = operationsReceived; + this.totalTransferredBytes = totalTransferredBytes; + this.totalIndexTimeMillis = totalIndexTimeMillis; + this.numberOfSuccessfulBulkOperations = numberOfSuccessfulBulkOperations; + this.numberOfFailedBulkOperations = numberOfFailedBulkOperations; + this.numberOfOperationsIndexed = numberOfOperationsIndexed; + this.fetchExceptions = Objects.requireNonNull(fetchExceptions); + this.timeSinceLastFetchMillis = timeSinceLastFetchMillis; + } + + public Status(final StreamInput in) throws IOException { + this.leaderIndex = in.readString(); + this.shardId = in.readVInt(); + this.leaderGlobalCheckpoint = in.readZLong(); + this.leaderMaxSeqNo = in.readZLong(); + this.followerGlobalCheckpoint = in.readZLong(); + this.followerMaxSeqNo = in.readZLong(); + this.lastRequestedSeqNo = in.readZLong(); + this.numberOfConcurrentReads = in.readVInt(); + this.numberOfConcurrentWrites = in.readVInt(); + this.numberOfQueuedWrites = in.readVInt(); + this.mappingVersion = in.readVLong(); + this.totalFetchTimeMillis = in.readVLong(); + this.numberOfSuccessfulFetches = in.readVLong(); + this.numberOfFailedFetches = in.readVLong(); + this.operationsReceived = in.readVLong(); + this.totalTransferredBytes = in.readVLong(); + this.totalIndexTimeMillis = in.readVLong(); + this.numberOfSuccessfulBulkOperations = in.readVLong(); + this.numberOfFailedBulkOperations = in.readVLong(); + this.numberOfOperationsIndexed = in.readVLong(); + this.fetchExceptions = new TreeMap<>(in.readMap(StreamInput::readVLong, StreamInput::readException)); + this.timeSinceLastFetchMillis = in.readZLong(); + } + + @Override + public String getWriteableName() { + return STATUS_PARSER_NAME; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeString(leaderIndex); + out.writeVInt(shardId); + out.writeZLong(leaderGlobalCheckpoint); + out.writeZLong(leaderMaxSeqNo); + out.writeZLong(followerGlobalCheckpoint); + out.writeZLong(followerMaxSeqNo); + out.writeZLong(lastRequestedSeqNo); + out.writeVInt(numberOfConcurrentReads); + out.writeVInt(numberOfConcurrentWrites); + out.writeVInt(numberOfQueuedWrites); + out.writeVLong(mappingVersion); + out.writeVLong(totalFetchTimeMillis); + out.writeVLong(numberOfSuccessfulFetches); + out.writeVLong(numberOfFailedFetches); + out.writeVLong(operationsReceived); + out.writeVLong(totalTransferredBytes); + out.writeVLong(totalIndexTimeMillis); + out.writeVLong(numberOfSuccessfulBulkOperations); + out.writeVLong(numberOfFailedBulkOperations); + out.writeVLong(numberOfOperationsIndexed); + out.writeMap(fetchExceptions, StreamOutput::writeVLong, StreamOutput::writeException); + out.writeZLong(timeSinceLastFetchMillis); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + { + builder.field(LEADER_INDEX.getPreferredName(), leaderIndex); + builder.field(SHARD_ID.getPreferredName(), shardId); + builder.field(LEADER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), leaderGlobalCheckpoint); + builder.field(LEADER_MAX_SEQ_NO_FIELD.getPreferredName(), leaderMaxSeqNo); + builder.field(FOLLOWER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), followerGlobalCheckpoint); + builder.field(FOLLOWER_MAX_SEQ_NO_FIELD.getPreferredName(), followerMaxSeqNo); + builder.field(LAST_REQUESTED_SEQ_NO_FIELD.getPreferredName(), lastRequestedSeqNo); + builder.field(NUMBER_OF_CONCURRENT_READS_FIELD.getPreferredName(), numberOfConcurrentReads); + builder.field(NUMBER_OF_CONCURRENT_WRITES_FIELD.getPreferredName(), numberOfConcurrentWrites); + builder.field(NUMBER_OF_QUEUED_WRITES_FIELD.getPreferredName(), numberOfQueuedWrites); + builder.field(MAPPING_VERSION_FIELD.getPreferredName(), mappingVersion); + builder.humanReadableField( + TOTAL_FETCH_TIME_MILLIS_FIELD.getPreferredName(), + "total_fetch_time", + new TimeValue(totalFetchTimeMillis, TimeUnit.MILLISECONDS)); + builder.field(NUMBER_OF_SUCCESSFUL_FETCHES_FIELD.getPreferredName(), numberOfSuccessfulFetches); + builder.field(NUMBER_OF_FAILED_FETCHES_FIELD.getPreferredName(), numberOfFailedFetches); + builder.field(OPERATIONS_RECEIVED_FIELD.getPreferredName(), operationsReceived); + builder.humanReadableField( + TOTAL_TRANSFERRED_BYTES.getPreferredName(), + "total_transferred", + new ByteSizeValue(totalTransferredBytes, ByteSizeUnit.BYTES)); + builder.humanReadableField( + TOTAL_INDEX_TIME_MILLIS_FIELD.getPreferredName(), + "total_index_time", + new TimeValue(totalIndexTimeMillis, TimeUnit.MILLISECONDS)); + builder.field(NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD.getPreferredName(), numberOfSuccessfulBulkOperations); + builder.field(NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD.getPreferredName(), numberOfFailedBulkOperations); + builder.field(NUMBER_OF_OPERATIONS_INDEXED_FIELD.getPreferredName(), numberOfOperationsIndexed); + builder.startArray(FETCH_EXCEPTIONS.getPreferredName()); + { + for (final Map.Entry entry : fetchExceptions.entrySet()) { + builder.startObject(); + { + builder.field(FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO.getPreferredName(), entry.getKey()); + builder.field(FETCH_EXCEPTIONS_ENTRY_EXCEPTION.getPreferredName()); + builder.startObject(); + { + ElasticsearchException.generateThrowableXContent(builder, params, entry.getValue()); + } + builder.endObject(); + } + builder.endObject(); + } + } + builder.endArray(); + builder.humanReadableField( + TIME_SINCE_LAST_FETCH_MILLIS_FIELD.getPreferredName(), + "time_since_last_fetch", + new TimeValue(timeSinceLastFetchMillis, TimeUnit.MILLISECONDS)); + } + builder.endObject(); + return builder; + } + + public static Status fromXContent(final XContentParser parser) { + return STATUS_PARSER.apply(parser, null); + } + + @Override + public boolean equals(final Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final Status that = (Status) o; + return leaderIndex.equals(that.leaderIndex) && + shardId == that.shardId && + leaderGlobalCheckpoint == that.leaderGlobalCheckpoint && + leaderMaxSeqNo == that.leaderMaxSeqNo && + followerGlobalCheckpoint == that.followerGlobalCheckpoint && + followerMaxSeqNo == that.followerMaxSeqNo && + lastRequestedSeqNo == that.lastRequestedSeqNo && + numberOfConcurrentReads == that.numberOfConcurrentReads && + numberOfConcurrentWrites == that.numberOfConcurrentWrites && + numberOfQueuedWrites == that.numberOfQueuedWrites && + mappingVersion == that.mappingVersion && + totalFetchTimeMillis == that.totalFetchTimeMillis && + numberOfSuccessfulFetches == that.numberOfSuccessfulFetches && + numberOfFailedFetches == that.numberOfFailedFetches && + operationsReceived == that.operationsReceived && + totalTransferredBytes == that.totalTransferredBytes && + numberOfSuccessfulBulkOperations == that.numberOfSuccessfulBulkOperations && + numberOfFailedBulkOperations == that.numberOfFailedBulkOperations && + numberOfOperationsIndexed == that.numberOfOperationsIndexed && + /* + * ElasticsearchException does not implement equals so we will assume the fetch exceptions are equal if they are equal + * up to the key set and their messages. Note that we are relying on the fact that the fetch exceptions are ordered by + * keys. + */ + fetchExceptions.keySet().equals(that.fetchExceptions.keySet()) && + getFetchExceptionMessages(this).equals(getFetchExceptionMessages(that)) && + timeSinceLastFetchMillis == that.timeSinceLastFetchMillis; + } + + @Override + public int hashCode() { + return Objects.hash( + leaderIndex, + shardId, + leaderGlobalCheckpoint, + leaderMaxSeqNo, + followerGlobalCheckpoint, + followerMaxSeqNo, + lastRequestedSeqNo, + numberOfConcurrentReads, + numberOfConcurrentWrites, + numberOfQueuedWrites, + mappingVersion, + totalFetchTimeMillis, + numberOfSuccessfulFetches, + numberOfFailedFetches, + operationsReceived, + totalTransferredBytes, + numberOfSuccessfulBulkOperations, + numberOfFailedBulkOperations, + numberOfOperationsIndexed, + /* + * ElasticsearchException does not implement hash code so we will compute the hash code based on the key set and the + * messages. Note that we are relying on the fact that the fetch exceptions are ordered by keys. + */ + fetchExceptions.keySet(), + getFetchExceptionMessages(this), + timeSinceLastFetchMillis); + } + + private static List getFetchExceptionMessages(final Status status) { + return status.fetchExceptions().values().stream().map(ElasticsearchException::getMessage).collect(Collectors.toList()); + } + + public String toString() { + return Strings.toString(this); + } + + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java new file mode 100644 index 00000000000..82482792f39 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java @@ -0,0 +1,251 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.Version; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.xpack.core.XPackPlugin; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { + + public static final String NAME = "xpack/ccr/shard_follow_task"; + + // list of headers that will be stored when a job is created + public static final Set HEADER_FILTERS = + Collections.unmodifiableSet(new HashSet<>(Arrays.asList("es-security-runas-user", "_xpack_security_authentication"))); + + static final ParseField LEADER_CLUSTER_ALIAS_FIELD = new ParseField("leader_cluster_alias"); + static final ParseField FOLLOW_SHARD_INDEX_FIELD = new ParseField("follow_shard_index"); + static final ParseField FOLLOW_SHARD_INDEX_UUID_FIELD = new ParseField("follow_shard_index_uuid"); + static final ParseField FOLLOW_SHARD_SHARDID_FIELD = new ParseField("follow_shard_shard"); + static final ParseField LEADER_SHARD_INDEX_FIELD = new ParseField("leader_shard_index"); + static final ParseField LEADER_SHARD_INDEX_UUID_FIELD = new ParseField("leader_shard_index_uuid"); + static final ParseField LEADER_SHARD_SHARDID_FIELD = new ParseField("leader_shard_shard"); + static final ParseField HEADERS = new ParseField("headers"); + public static final ParseField MAX_BATCH_OPERATION_COUNT = new ParseField("max_batch_operation_count"); + public static final ParseField MAX_CONCURRENT_READ_BATCHES = new ParseField("max_concurrent_read_batches"); + public static final ParseField MAX_BATCH_SIZE_IN_BYTES = new ParseField("max_batch_size_in_bytes"); + public static final ParseField MAX_CONCURRENT_WRITE_BATCHES = new ParseField("max_concurrent_write_batches"); + public static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); + public static final ParseField RETRY_TIMEOUT = new ParseField("retry_timeout"); + public static final ParseField IDLE_SHARD_RETRY_DELAY = new ParseField("idle_shard_retry_delay"); + + @SuppressWarnings("unchecked") + private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + (a) -> new ShardFollowTask((String) a[0], new ShardId((String) a[1], (String) a[2], (int) a[3]), + new ShardId((String) a[4], (String) a[5], (int) a[6]), (int) a[7], (int) a[8], (long) a[9], + (int) a[10], (int) a[11], (TimeValue) a[12], (TimeValue) a[13], (Map) a[14])); + + static { + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), LEADER_CLUSTER_ALIAS_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), FOLLOW_SHARD_INDEX_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), FOLLOW_SHARD_INDEX_UUID_FIELD); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), FOLLOW_SHARD_SHARDID_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), LEADER_SHARD_INDEX_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), LEADER_SHARD_INDEX_UUID_FIELD); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), LEADER_SHARD_SHARDID_FIELD); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_BATCH_OPERATION_COUNT); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_CONCURRENT_READ_BATCHES); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), MAX_BATCH_SIZE_IN_BYTES); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_CONCURRENT_WRITE_BATCHES); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_WRITE_BUFFER_SIZE); + PARSER.declareField(ConstructingObjectParser.constructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), RETRY_TIMEOUT.getPreferredName()), + RETRY_TIMEOUT, ObjectParser.ValueType.STRING); + PARSER.declareField(ConstructingObjectParser.constructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), IDLE_SHARD_RETRY_DELAY.getPreferredName()), + IDLE_SHARD_RETRY_DELAY, ObjectParser.ValueType.STRING); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> p.mapStrings(), HEADERS); + } + + private final String leaderClusterAlias; + private final ShardId followShardId; + private final ShardId leaderShardId; + private final int maxBatchOperationCount; + private final int maxConcurrentReadBatches; + private final long maxBatchSizeInBytes; + private final int maxConcurrentWriteBatches; + private final int maxWriteBufferSize; + private final TimeValue retryTimeout; + private final TimeValue idleShardRetryDelay; + private final Map headers; + + ShardFollowTask(String leaderClusterAlias, ShardId followShardId, ShardId leaderShardId, int maxBatchOperationCount, + int maxConcurrentReadBatches, long maxBatchSizeInBytes, int maxConcurrentWriteBatches, + int maxWriteBufferSize, TimeValue retryTimeout, TimeValue idleShardRetryDelay, Map headers) { + this.leaderClusterAlias = leaderClusterAlias; + this.followShardId = followShardId; + this.leaderShardId = leaderShardId; + this.maxBatchOperationCount = maxBatchOperationCount; + this.maxConcurrentReadBatches = maxConcurrentReadBatches; + this.maxBatchSizeInBytes = maxBatchSizeInBytes; + this.maxConcurrentWriteBatches = maxConcurrentWriteBatches; + this.maxWriteBufferSize = maxWriteBufferSize; + this.retryTimeout = retryTimeout; + this.idleShardRetryDelay = idleShardRetryDelay; + this.headers = headers != null ? Collections.unmodifiableMap(headers) : Collections.emptyMap(); + } + + public ShardFollowTask(StreamInput in) throws IOException { + this.leaderClusterAlias = in.readOptionalString(); + this.followShardId = ShardId.readShardId(in); + this.leaderShardId = ShardId.readShardId(in); + this.maxBatchOperationCount = in.readVInt(); + this.maxConcurrentReadBatches = in.readVInt(); + this.maxBatchSizeInBytes = in.readVLong(); + this.maxConcurrentWriteBatches = in.readVInt(); + this.maxWriteBufferSize = in.readVInt(); + this.retryTimeout = in.readTimeValue(); + this.idleShardRetryDelay = in.readTimeValue(); + this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); + } + + public String getLeaderClusterAlias() { + return leaderClusterAlias; + } + + public ShardId getFollowShardId() { + return followShardId; + } + + public ShardId getLeaderShardId() { + return leaderShardId; + } + + public int getMaxBatchOperationCount() { + return maxBatchOperationCount; + } + + public int getMaxConcurrentReadBatches() { + return maxConcurrentReadBatches; + } + + public int getMaxConcurrentWriteBatches() { + return maxConcurrentWriteBatches; + } + + public int getMaxWriteBufferSize() { + return maxWriteBufferSize; + } + + public long getMaxBatchSizeInBytes() { + return maxBatchSizeInBytes; + } + + public TimeValue getRetryTimeout() { + return retryTimeout; + } + + public TimeValue getIdleShardRetryDelay() { + return idleShardRetryDelay; + } + + public String getTaskId() { + return followShardId.getIndex().getUUID() + "-" + followShardId.getId(); + } + + public Map getHeaders() { + return headers; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(leaderClusterAlias); + followShardId.writeTo(out); + leaderShardId.writeTo(out); + out.writeVLong(maxBatchOperationCount); + out.writeVInt(maxConcurrentReadBatches); + out.writeVLong(maxBatchSizeInBytes); + out.writeVInt(maxConcurrentWriteBatches); + out.writeVInt(maxWriteBufferSize); + out.writeTimeValue(retryTimeout); + out.writeTimeValue(idleShardRetryDelay); + out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); + } + + public static ShardFollowTask fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (leaderClusterAlias != null) { + builder.field(LEADER_CLUSTER_ALIAS_FIELD.getPreferredName(), leaderClusterAlias); + } + builder.field(FOLLOW_SHARD_INDEX_FIELD.getPreferredName(), followShardId.getIndex().getName()); + builder.field(FOLLOW_SHARD_INDEX_UUID_FIELD.getPreferredName(), followShardId.getIndex().getUUID()); + builder.field(FOLLOW_SHARD_SHARDID_FIELD.getPreferredName(), followShardId.id()); + builder.field(LEADER_SHARD_INDEX_FIELD.getPreferredName(), leaderShardId.getIndex().getName()); + builder.field(LEADER_SHARD_INDEX_UUID_FIELD.getPreferredName(), leaderShardId.getIndex().getUUID()); + builder.field(LEADER_SHARD_SHARDID_FIELD.getPreferredName(), leaderShardId.id()); + builder.field(MAX_BATCH_OPERATION_COUNT.getPreferredName(), maxBatchOperationCount); + builder.field(MAX_CONCURRENT_READ_BATCHES.getPreferredName(), maxConcurrentReadBatches); + builder.field(MAX_BATCH_SIZE_IN_BYTES.getPreferredName(), maxBatchSizeInBytes); + builder.field(MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); + builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); + builder.field(RETRY_TIMEOUT.getPreferredName(), retryTimeout.getStringRep()); + builder.field(IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay.getStringRep()); + builder.field(HEADERS.getPreferredName(), headers); + return builder.endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ShardFollowTask that = (ShardFollowTask) o; + return Objects.equals(leaderClusterAlias, that.leaderClusterAlias) && + Objects.equals(followShardId, that.followShardId) && + Objects.equals(leaderShardId, that.leaderShardId) && + maxBatchOperationCount == that.maxBatchOperationCount && + maxConcurrentReadBatches == that.maxConcurrentReadBatches && + maxConcurrentWriteBatches == that.maxConcurrentWriteBatches && + maxBatchSizeInBytes == that.maxBatchSizeInBytes && + maxWriteBufferSize == that.maxWriteBufferSize && + Objects.equals(retryTimeout, that.retryTimeout) && + Objects.equals(idleShardRetryDelay, that.idleShardRetryDelay) && + Objects.equals(headers, that.headers); + } + + @Override + public int hashCode() { + return Objects.hash(leaderClusterAlias, followShardId, leaderShardId, maxBatchOperationCount, maxConcurrentReadBatches, + maxConcurrentWriteBatches, maxBatchSizeInBytes, maxWriteBufferSize, retryTimeout, idleShardRetryDelay, headers); + } + + public String toString() { + return Strings.toString(this); + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_6_4_0; + } +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java new file mode 100644 index 00000000000..83e3e4806e1 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -0,0 +1,207 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.stats.IndexStats; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.FilterClient; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.persistent.AllocatedPersistentTask; +import org.elasticsearch.persistent.PersistentTaskState; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.ccr.Ccr; +import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsAction; +import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsRequest; +import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.LongConsumer; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +public class ShardFollowTasksExecutor extends PersistentTasksExecutor { + + private final Client client; + private final ThreadPool threadPool; + + public ShardFollowTasksExecutor(Settings settings, Client client, ThreadPool threadPool) { + super(settings, ShardFollowTask.NAME, Ccr.CCR_THREAD_POOL_NAME); + this.client = client; + this.threadPool = threadPool; + } + + @Override + public void validate(ShardFollowTask params, ClusterState clusterState) { + if (params.getLeaderClusterAlias() == null) { + // We can only validate IndexRoutingTable in local cluster, + // for remote cluster we would need to make a remote call and we cannot do this here. + IndexRoutingTable routingTable = clusterState.getRoutingTable().index(params.getLeaderShardId().getIndex()); + if (routingTable.shard(params.getLeaderShardId().id()).primaryShard().started() == false) { + throw new IllegalArgumentException("Not all copies of leader shard are started"); + } + } + + IndexRoutingTable routingTable = clusterState.getRoutingTable().index(params.getFollowShardId().getIndex()); + if (routingTable.shard(params.getFollowShardId().id()).primaryShard().started() == false) { + throw new IllegalArgumentException("Not all copies of follow shard are started"); + } + } + + @Override + protected AllocatedPersistentTask createTask(long id, String type, String action, TaskId parentTaskId, + PersistentTasksCustomMetaData.PersistentTask taskInProgress, + Map headers) { + ShardFollowTask params = taskInProgress.getParams(); + final Client leaderClient; + if (params.getLeaderClusterAlias() != null) { + leaderClient = wrapClient(client.getRemoteClusterClient(params.getLeaderClusterAlias()), params); + } else { + leaderClient = wrapClient(client, params); + } + Client followerClient = wrapClient(client, params); + BiConsumer scheduler = + (delay, command) -> threadPool.schedule(delay, Ccr.CCR_THREAD_POOL_NAME, command); + return new ShardFollowNodeTask( + id, type, action, getDescription(taskInProgress), parentTaskId, headers, params, scheduler, System::nanoTime) { + + @Override + protected void innerUpdateMapping(LongConsumer handler, Consumer errorHandler) { + Index leaderIndex = params.getLeaderShardId().getIndex(); + Index followIndex = params.getFollowShardId().getIndex(); + + ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.clear(); + clusterStateRequest.metaData(true); + clusterStateRequest.indices(leaderIndex.getName()); + + leaderClient.admin().cluster().state(clusterStateRequest, ActionListener.wrap(clusterStateResponse -> { + IndexMetaData indexMetaData = clusterStateResponse.getState().metaData().getIndexSafe(leaderIndex); + assert indexMetaData.getMappings().size() == 1 : "expected exactly one mapping, but got [" + + indexMetaData.getMappings().size() + "]"; + MappingMetaData mappingMetaData = indexMetaData.getMappings().iterator().next().value; + + PutMappingRequest putMappingRequest = new PutMappingRequest(followIndex.getName()); + putMappingRequest.type(mappingMetaData.type()); + putMappingRequest.source(mappingMetaData.source().string(), XContentType.JSON); + followerClient.admin().indices().putMapping(putMappingRequest, ActionListener.wrap( + putMappingResponse -> handler.accept(indexMetaData.getMappingVersion()), + errorHandler)); + }, errorHandler)); + } + + @Override + protected void innerSendBulkShardOperationsRequest( + final List operations, + final Consumer handler, + final Consumer errorHandler) { + final BulkShardOperationsRequest request = new BulkShardOperationsRequest(params.getFollowShardId(), operations); + followerClient.execute(BulkShardOperationsAction.INSTANCE, request, + ActionListener.wrap(response -> handler.accept(response), errorHandler)); + } + + @Override + protected void innerSendShardChangesRequest(long from, int maxOperationCount, Consumer handler, + Consumer errorHandler) { + ShardChangesAction.Request request = new ShardChangesAction.Request(params.getLeaderShardId()); + request.setFromSeqNo(from); + request.setMaxOperationCount(maxOperationCount); + request.setMaxOperationSizeInBytes(params.getMaxBatchSizeInBytes()); + leaderClient.execute(ShardChangesAction.INSTANCE, request, ActionListener.wrap(handler::accept, errorHandler)); + } + }; + } + + interface BiLongConsumer { + void accept(long x, long y); + } + + @Override + protected void nodeOperation(final AllocatedPersistentTask task, final ShardFollowTask params, final PersistentTaskState state) { + Client followerClient = wrapClient(client, params); + ShardFollowNodeTask shardFollowNodeTask = (ShardFollowNodeTask) task; + logger.info("{} Started to track leader shard {}", params.getFollowShardId(), params.getLeaderShardId()); + fetchGlobalCheckpoint(followerClient, params.getFollowShardId(), + (followerGCP, maxSeqNo) -> shardFollowNodeTask.start(followerGCP, maxSeqNo, followerGCP, maxSeqNo), task::markAsFailed); + } + + private void fetchGlobalCheckpoint( + final Client client, + final ShardId shardId, + final BiLongConsumer handler, + final Consumer errorHandler) { + client.admin().indices().stats(new IndicesStatsRequest().indices(shardId.getIndexName()), ActionListener.wrap(r -> { + IndexStats indexStats = r.getIndex(shardId.getIndexName()); + Optional filteredShardStats = Arrays.stream(indexStats.getShards()) + .filter(shardStats -> shardStats.getShardRouting().shardId().equals(shardId)) + .filter(shardStats -> shardStats.getShardRouting().primary()) + .findAny(); + if (filteredShardStats.isPresent()) { + final SeqNoStats seqNoStats = filteredShardStats.get().getSeqNoStats(); + final long globalCheckpoint = seqNoStats.getGlobalCheckpoint(); + final long maxSeqNo = seqNoStats.getMaxSeqNo(); + handler.accept(globalCheckpoint, maxSeqNo); + } else { + errorHandler.accept(new IllegalArgumentException("Cannot find shard stats for shard " + shardId)); + } + }, errorHandler)); + } + + private static Client wrapClient(Client client, ShardFollowTask shardFollowTask) { + if (shardFollowTask.getHeaders().isEmpty()) { + return client; + } else { + final ThreadContext threadContext = client.threadPool().getThreadContext(); + Map filteredHeaders = shardFollowTask.getHeaders().entrySet().stream() + .filter(e -> ShardFollowTask.HEADER_FILTERS.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + return new FilterClient(client) { + @Override + protected + void doExecute(Action action, Request request, ActionListener listener) { + final Supplier supplier = threadContext.newRestorableContext(false); + try (ThreadContext.StoredContext ignore = stashWithHeaders(threadContext, filteredHeaders)) { + super.doExecute(action, request, new ContextPreservingActionListener<>(supplier, listener)); + } + } + }; + } + } + + private static ThreadContext.StoredContext stashWithHeaders(ThreadContext threadContext, Map headers) { + final ThreadContext.StoredContext storedContext = threadContext.stashContext(); + threadContext.copyHeaders(headers.entrySet()); + return storedContext; + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java new file mode 100644 index 00000000000..3b5d0ac53cf --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.tasks.TransportTasksAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.ccr.Ccr; +import org.elasticsearch.xpack.ccr.CcrLicenseChecker; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.function.Consumer; + +public class TransportCcrStatsAction extends TransportTasksAction< + ShardFollowNodeTask, + CcrStatsAction.TasksRequest, + CcrStatsAction.TasksResponse, CcrStatsAction.TaskResponse> { + + private final IndexNameExpressionResolver resolver; + private final CcrLicenseChecker ccrLicenseChecker; + + @Inject + public TransportCcrStatsAction( + final Settings settings, + final ClusterService clusterService, + final TransportService transportService, + final ActionFilters actionFilters, + final IndexNameExpressionResolver resolver, + final CcrLicenseChecker ccrLicenseChecker) { + super( + settings, + CcrStatsAction.NAME, + clusterService, + transportService, + actionFilters, + CcrStatsAction.TasksRequest::new, + CcrStatsAction.TasksResponse::new, + Ccr.CCR_THREAD_POOL_NAME); + this.resolver = Objects.requireNonNull(resolver); + this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); + } + + @Override + protected void doExecute( + final Task task, + final CcrStatsAction.TasksRequest request, + final ActionListener listener) { + if (ccrLicenseChecker.isCcrAllowed() == false) { + listener.onFailure(LicenseUtils.newComplianceException("ccr")); + return; + } + super.doExecute(task, request, listener); + } + + @Override + protected CcrStatsAction.TasksResponse newResponse( + final CcrStatsAction.TasksRequest request, + final List taskResponses, + final List taskOperationFailures, + final List failedNodeExceptions) { + return new CcrStatsAction.TasksResponse(taskOperationFailures, failedNodeExceptions, taskResponses); + } + + @Override + protected CcrStatsAction.TaskResponse readTaskResponse(final StreamInput in) throws IOException { + return new CcrStatsAction.TaskResponse(in); + } + + @Override + protected void processTasks(final CcrStatsAction.TasksRequest request, final Consumer operation) { + final ClusterState state = clusterService.state(); + final Set concreteIndices = new HashSet<>(Arrays.asList(resolver.concreteIndexNames(state, request))); + for (final Task task : taskManager.getTasks().values()) { + if (task instanceof ShardFollowNodeTask) { + final ShardFollowNodeTask shardFollowNodeTask = (ShardFollowNodeTask) task; + if (concreteIndices.contains(shardFollowNodeTask.getFollowShardId().getIndexName())) { + operation.accept(shardFollowNodeTask); + } + } + } + } + + @Override + protected void taskOperation( + final CcrStatsAction.TasksRequest request, + final ShardFollowNodeTask task, + final ActionListener listener) { + listener.onResponse(new CcrStatsAction.TaskResponse(task.getFollowShardId(), task.getStatus())); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java new file mode 100644 index 00000000000..6c1ca81e7c4 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; +import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class TransportDeleteAutoFollowPatternAction extends + TransportMasterNodeAction { + + @Inject + public TransportDeleteAutoFollowPatternAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, DeleteAutoFollowPatternAction.NAME, transportService, clusterService, threadPool, actionFilters, + indexNameExpressionResolver, DeleteAutoFollowPatternAction.Request::new); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + @Override + protected void masterOperation(DeleteAutoFollowPatternAction.Request request, + ClusterState state, + ActionListener listener) throws Exception { + clusterService.submitStateUpdateTask("put-auto-follow-pattern-" + request.getLeaderClusterAlias(), + new AckedClusterStateUpdateTask(request, listener) { + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + return new AcknowledgedResponse(acknowledged); + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + return innerDelete(request, currentState); + } + }); + } + + static ClusterState innerDelete(DeleteAutoFollowPatternAction.Request request, ClusterState currentState) { + AutoFollowMetadata currentAutoFollowMetadata = currentState.metaData().custom(AutoFollowMetadata.TYPE); + if (currentAutoFollowMetadata == null) { + throw new ResourceNotFoundException("no auto-follow patterns for cluster alias [{}] found", + request.getLeaderClusterAlias()); + } + Map patterns = currentAutoFollowMetadata.getPatterns(); + AutoFollowPattern autoFollowPatternToRemove = patterns.get(request.getLeaderClusterAlias()); + if (autoFollowPatternToRemove == null) { + throw new ResourceNotFoundException("no auto-follow patterns for cluster alias [{}] found", + request.getLeaderClusterAlias()); + } + + final Map patternsCopy = new HashMap<>(patterns); + final Map> followedLeaderIndexUUIDSCopy = + new HashMap<>(currentAutoFollowMetadata.getFollowedLeaderIndexUUIDs()); + patternsCopy.remove(request.getLeaderClusterAlias()); + followedLeaderIndexUUIDSCopy.remove(request.getLeaderClusterAlias()); + + AutoFollowMetadata newAutoFollowMetadata = new AutoFollowMetadata(patternsCopy, followedLeaderIndexUUIDSCopy); + ClusterState.Builder newState = ClusterState.builder(currentState); + newState.metaData(MetaData.builder(currentState.getMetaData()) + .putCustom(AutoFollowMetadata.TYPE, newAutoFollowMetadata) + .build()); + return newState.build(); + } + + @Override + protected ClusterBlockException checkBlock(DeleteAutoFollowPatternAction.Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java new file mode 100644 index 00000000000..a4ff9511cfb --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java @@ -0,0 +1,192 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.ccr.CcrLicenseChecker; +import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; +import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +public class TransportPutAutoFollowPatternAction extends + TransportMasterNodeAction { + + private final Client client; + private final CcrLicenseChecker ccrLicenseChecker; + + @Inject + public TransportPutAutoFollowPatternAction( + final Settings settings, + final TransportService transportService, + final ClusterService clusterService, + final ThreadPool threadPool, + final ActionFilters actionFilters, + final Client client, + final IndexNameExpressionResolver indexNameExpressionResolver, + final CcrLicenseChecker ccrLicenseChecker) { + super(settings, PutAutoFollowPatternAction.NAME, transportService, clusterService, threadPool, actionFilters, + indexNameExpressionResolver, PutAutoFollowPatternAction.Request::new); + this.client = client; + this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker, "ccrLicenseChecker"); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + @Override + protected void masterOperation(PutAutoFollowPatternAction.Request request, + ClusterState state, + ActionListener listener) throws Exception { + if (ccrLicenseChecker.isCcrAllowed() == false) { + listener.onFailure(LicenseUtils.newComplianceException("ccr")); + return; + } + final Client leaderClient; + if (request.getLeaderClusterAlias().equals("_local_")) { + leaderClient = client; + } else { + leaderClient = client.getRemoteClusterClient(request.getLeaderClusterAlias()); + } + + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.clear(); + clusterStateRequest.metaData(true); + + leaderClient.admin().cluster().state( + clusterStateRequest, + ActionListener.wrap( + clusterStateResponse -> { + final ClusterState leaderClusterState = clusterStateResponse.getState(); + clusterService.submitStateUpdateTask("put-auto-follow-pattern-" + request.getLeaderClusterAlias(), + new AckedClusterStateUpdateTask(request, listener) { + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + return new AcknowledgedResponse(acknowledged); + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + return innerPut(request, currentState, leaderClusterState); + } + }); + }, + listener::onFailure)); + } + + static ClusterState innerPut(PutAutoFollowPatternAction.Request request, + ClusterState localState, + ClusterState leaderClusterState) { + // auto patterns are always overwritten + // only already followed index uuids are updated + + AutoFollowMetadata currentAutoFollowMetadata = localState.metaData().custom(AutoFollowMetadata.TYPE); + Map> followedLeaderIndices; + Map patterns; + if (currentAutoFollowMetadata != null) { + patterns = new HashMap<>(currentAutoFollowMetadata.getPatterns()); + followedLeaderIndices = new HashMap<>(currentAutoFollowMetadata.getFollowedLeaderIndexUUIDs()); + } else { + patterns = new HashMap<>(); + followedLeaderIndices = new HashMap<>(); + } + + AutoFollowPattern previousPattern = patterns.get(request.getLeaderClusterAlias()); + List followedIndexUUIDs = followedLeaderIndices.get(request.getLeaderClusterAlias()); + if (followedIndexUUIDs == null) { + followedIndexUUIDs = new ArrayList<>(); + followedLeaderIndices.put(request.getLeaderClusterAlias(), followedIndexUUIDs); + } + + // Mark existing leader indices as already auto followed: + if (previousPattern != null) { + markExistingIndicesAsAutoFollowedForNewPatterns(request.getLeaderIndexPatterns(), leaderClusterState.metaData(), + previousPattern, followedIndexUUIDs); + } else { + markExistingIndicesAsAutoFollowed(request.getLeaderIndexPatterns(), leaderClusterState.metaData(), + followedIndexUUIDs); + } + + AutoFollowPattern autoFollowPattern = new AutoFollowPattern( + request.getLeaderIndexPatterns(), + request.getFollowIndexNamePattern(), + request.getMaxBatchOperationCount(), + request.getMaxConcurrentReadBatches(), + request.getMaxOperationSizeInBytes(), + request.getMaxConcurrentWriteBatches(), + request.getMaxWriteBufferSize(), + request.getRetryTimeout(), + request.getIdleShardRetryDelay() + ); + patterns.put(request.getLeaderClusterAlias(), autoFollowPattern); + ClusterState.Builder newState = ClusterState.builder(localState); + newState.metaData(MetaData.builder(localState.getMetaData()) + .putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, followedLeaderIndices)) + .build()); + return newState.build(); + } + + private static void markExistingIndicesAsAutoFollowedForNewPatterns( + List leaderIndexPatterns, + MetaData leaderMetaData, + AutoFollowPattern previousPattern, + List followedIndexUUIDS) { + + final List newPatterns = leaderIndexPatterns + .stream() + .filter(p -> previousPattern.getLeaderIndexPatterns().contains(p) == false) + .collect(Collectors.toList()); + markExistingIndicesAsAutoFollowed(newPatterns, leaderMetaData, followedIndexUUIDS); + } + + private static void markExistingIndicesAsAutoFollowed( + List patterns, + MetaData leaderMetaData, + List followedIndexUUIDS) { + + for (final IndexMetaData indexMetaData : leaderMetaData) { + if (AutoFollowPattern.match(patterns, indexMetaData.getIndex().getName())) { + followedIndexUUIDS.add(indexMetaData.getIndexUUID()); + } + } + } + + @Override + protected ClusterBlockException checkBlock(PutAutoFollowPatternAction.Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/UnfollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/UnfollowIndexAction.java new file mode 100644 index 00000000000..93b2bcc3e40 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/UnfollowIndexAction.java @@ -0,0 +1,152 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReferenceArray; + +public class UnfollowIndexAction extends Action { + + public static final UnfollowIndexAction INSTANCE = new UnfollowIndexAction(); + public static final String NAME = "cluster:admin/xpack/ccr/unfollow_index"; + + private UnfollowIndexAction() { + super(NAME); + } + + @Override + public AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + public static class Request extends ActionRequest { + + private String followIndex; + + public String getFollowIndex() { + return followIndex; + } + + public void setFollowIndex(String followIndex) { + this.followIndex = followIndex; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + followIndex = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(followIndex); + } + } + + public static class TransportAction extends HandledTransportAction { + + private final Client client; + private final PersistentTasksService persistentTasksService; + + @Inject + public TransportAction(Settings settings, + TransportService transportService, + ActionFilters actionFilters, + Client client, + PersistentTasksService persistentTasksService) { + super(settings, NAME, transportService, actionFilters, Request::new); + this.client = client; + this.persistentTasksService = persistentTasksService; + } + + @Override + protected void doExecute(Task task, + Request request, + ActionListener listener) { + + client.admin().cluster().state(new ClusterStateRequest(), ActionListener.wrap(r -> { + IndexMetaData followIndexMetadata = r.getState().getMetaData().index(request.followIndex); + if (followIndexMetadata == null) { + listener.onFailure(new IllegalArgumentException("follow index [" + request.followIndex + "] does not exist")); + return; + } + + final int numShards = followIndexMetadata.getNumberOfShards(); + final AtomicInteger counter = new AtomicInteger(numShards); + final AtomicReferenceArray responses = new AtomicReferenceArray<>(followIndexMetadata.getNumberOfShards()); + for (int i = 0; i < numShards; i++) { + final int shardId = i; + String taskId = followIndexMetadata.getIndexUUID() + "-" + shardId; + persistentTasksService.sendRemoveRequest(taskId, + new ActionListener>() { + @Override + public void onResponse(PersistentTasksCustomMetaData.PersistentTask task) { + responses.set(shardId, task); + finalizeResponse(); + } + + @Override + public void onFailure(Exception e) { + responses.set(shardId, e); + finalizeResponse(); + } + + void finalizeResponse() { + Exception error = null; + if (counter.decrementAndGet() == 0) { + for (int j = 0; j < responses.length(); j++) { + Object response = responses.get(j); + if (response instanceof Exception) { + if (error == null) { + error = (Exception) response; + } else { + error.addSuppressed((Throwable) response); + } + } + } + + if (error == null) { + // include task ids? + listener.onResponse(new AcknowledgedResponse(true)); + } else { + // TODO: cancel all started tasks + listener.onFailure(error); + } + } + } + }); + } + }, listener::onFailure)); + } + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsAction.java new file mode 100644 index 00000000000..a85e5c50e84 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsAction.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action.bulk; + +import org.elasticsearch.action.Action; + +public class BulkShardOperationsAction extends Action { + + public static final BulkShardOperationsAction INSTANCE = new BulkShardOperationsAction(); + public static final String NAME = "indices:data/write/bulk_shard_operations[s]"; + + private BulkShardOperationsAction() { + super(NAME); + } + + @Override + public BulkShardOperationsResponse newResponse() { + return new BulkShardOperationsResponse(); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsRequest.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsRequest.java new file mode 100644 index 00000000000..c28789fb580 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsRequest.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action.bulk; + +import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; + +import java.io.IOException; +import java.util.List; + +public final class BulkShardOperationsRequest extends ReplicatedWriteRequest { + + private List operations; + + public BulkShardOperationsRequest() { + } + + public BulkShardOperationsRequest(final ShardId shardId, final List operations) { + super(shardId); + setRefreshPolicy(RefreshPolicy.NONE); + this.operations = operations; + } + + public List getOperations() { + return operations; + } + + @Override + public void readFrom(final StreamInput in) throws IOException { + super.readFrom(in); + operations = in.readList(Translog.Operation::readOperation); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(operations.size()); + for (Translog.Operation operation : operations) { + Translog.Operation.writeOperation(out, operation); + } + } + + @Override + public String toString() { + return "BulkShardOperationsRequest{" + + "operations=" + operations.size()+ + ", shardId=" + shardId + + ", timeout=" + timeout + + ", index='" + index + '\'' + + ", waitForActiveShards=" + waitForActiveShards + + '}'; + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsResponse.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsResponse.java new file mode 100644 index 00000000000..0c72f02fde1 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsResponse.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action.bulk; + +import org.elasticsearch.action.support.WriteResponse; +import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public final class BulkShardOperationsResponse extends ReplicationResponse implements WriteResponse { + + private long globalCheckpoint; + + public long getGlobalCheckpoint() { + return globalCheckpoint; + } + + public void setGlobalCheckpoint(final long globalCheckpoint) { + this.globalCheckpoint = globalCheckpoint; + } + + private long maxSeqNo; + + public long getMaxSeqNo() { + return maxSeqNo; + } + + public void setMaxSeqNo(final long maxSeqNo) { + this.maxSeqNo = maxSeqNo; + } + + public BulkShardOperationsResponse() { + } + + @Override + public void setForcedRefresh(final boolean forcedRefresh) { + } + + @Override + public void readFrom(final StreamInput in) throws IOException { + super.readFrom(in); + globalCheckpoint = in.readZLong(); + maxSeqNo = in.readZLong(); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeZLong(globalCheckpoint); + out.writeZLong(maxSeqNo); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java new file mode 100644 index 00000000000..4b4bce1fcce --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java @@ -0,0 +1,158 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action.bulk; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.replication.TransportWriteAction; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; +import java.util.stream.Collectors; + +public class TransportBulkShardOperationsAction + extends TransportWriteAction { + + @Inject + public TransportBulkShardOperationsAction( + final Settings settings, + final TransportService transportService, + final ClusterService clusterService, + final IndicesService indicesService, + final ThreadPool threadPool, + final ShardStateAction shardStateAction, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver) { + super( + settings, + BulkShardOperationsAction.NAME, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + actionFilters, + indexNameExpressionResolver, + BulkShardOperationsRequest::new, + BulkShardOperationsRequest::new, + ThreadPool.Names.WRITE); + } + + @Override + protected WritePrimaryResult shardOperationOnPrimary( + final BulkShardOperationsRequest request, final IndexShard primary) throws Exception { + return shardOperationOnPrimary(request.shardId(), request.getOperations(), primary, logger); + } + + // public for testing purposes only + public static WritePrimaryResult shardOperationOnPrimary( + final ShardId shardId, + final List sourceOperations, + final IndexShard primary, + final Logger logger) throws IOException { + final List targetOperations = sourceOperations.stream().map(operation -> { + final Translog.Operation operationWithPrimaryTerm; + switch (operation.opType()) { + case INDEX: + final Translog.Index index = (Translog.Index) operation; + operationWithPrimaryTerm = new Translog.Index( + index.type(), + index.id(), + index.seqNo(), + primary.getOperationPrimaryTerm(), + index.version(), + BytesReference.toBytes(index.source()), + index.routing(), + index.getAutoGeneratedIdTimestamp()); + break; + case DELETE: + final Translog.Delete delete = (Translog.Delete) operation; + operationWithPrimaryTerm = new Translog.Delete( + delete.type(), + delete.id(), + delete.uid(), + delete.seqNo(), + primary.getOperationPrimaryTerm(), + delete.version()); + break; + case NO_OP: + final Translog.NoOp noOp = (Translog.NoOp) operation; + operationWithPrimaryTerm = new Translog.NoOp(noOp.seqNo(), primary.getOperationPrimaryTerm(), noOp.reason()); + break; + default: + throw new IllegalStateException("unexpected operation type [" + operation.opType() + "]"); + } + return operationWithPrimaryTerm; + }).collect(Collectors.toList()); + final Translog.Location location = applyTranslogOperations(targetOperations, primary, Engine.Operation.Origin.PRIMARY); + final BulkShardOperationsRequest replicaRequest = new BulkShardOperationsRequest(shardId, targetOperations); + return new CcrWritePrimaryResult(replicaRequest, location, primary, logger); + } + + @Override + protected WriteReplicaResult shardOperationOnReplica( + final BulkShardOperationsRequest request, final IndexShard replica) throws Exception { + final Translog.Location location = applyTranslogOperations(request.getOperations(), replica, Engine.Operation.Origin.REPLICA); + return new WriteReplicaResult<>(request, location, null, replica, logger); + } + + // public for testing purposes only + public static Translog.Location applyTranslogOperations( + final List operations, final IndexShard shard, final Engine.Operation.Origin origin) throws IOException { + Translog.Location location = null; + for (final Translog.Operation operation : operations) { + final Engine.Result result = shard.applyTranslogOperation(operation, origin); + assert result.getSeqNo() == operation.seqNo(); + assert result.getResultType() == Engine.Result.Type.SUCCESS; + location = locationToSync(location, result.getTranslogLocation()); + } + assert operations.size() == 0 || location != null; + return location; + } + + @Override + protected BulkShardOperationsResponse newResponseInstance() { + return new BulkShardOperationsResponse(); + } + + /** + * Custom write result to include global checkpoint after ops have been replicated. + */ + static class CcrWritePrimaryResult extends WritePrimaryResult { + + CcrWritePrimaryResult(BulkShardOperationsRequest request, Translog.Location location, IndexShard primary, Logger logger) { + super(request, new BulkShardOperationsResponse(), location, null, primary, logger); + } + + @Override + public synchronized void respond(ActionListener listener) { + final BulkShardOperationsResponse response = finalResponseIfSuccessful; + final SeqNoStats seqNoStats = primary.seqNoStats(); + // return a fresh global checkpoint after the operations have been replicated for the shard follow task + response.setGlobalCheckpoint(seqNoStats.getGlobalCheckpoint()); + response.setMaxSeqNo(seqNoStats.getMaxSeqNo()); + listener.onResponse(response); + } + + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java new file mode 100644 index 00000000000..24ada3755cb --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.index.engine; + +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.engine.EngineConfig; +import org.elasticsearch.index.engine.InternalEngine; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.xpack.ccr.CcrSettings; + +import java.io.IOException; + +/** + * An engine implementation for following shards. + */ +public final class FollowingEngine extends InternalEngine { + + /** + * Construct a new following engine with the specified engine configuration. + * + * @param engineConfig the engine configuration + */ + FollowingEngine(final EngineConfig engineConfig) { + super(validateEngineConfig(engineConfig)); + } + + private static EngineConfig validateEngineConfig(final EngineConfig engineConfig) { + if (CcrSettings.CCR_FOLLOWING_INDEX_SETTING.get(engineConfig.getIndexSettings().getSettings()) == false) { + throw new IllegalArgumentException("a following engine can not be constructed for a non-following index"); + } + return engineConfig; + } + + private void preFlight(final Operation operation) { + /* + * We assert here so that this goes uncaught in unit tests and fails nodes in standalone tests (we want a harsh failure so that we + * do not have a situation where a shard fails and is recovered elsewhere and a test subsequently passes). We throw an exception so + * that we also prevent issues in production code. + */ + assert operation.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO; + if (operation.seqNo() == SequenceNumbers.UNASSIGNED_SEQ_NO) { + throw new IllegalStateException("a following engine does not accept operations without an assigned sequence number"); + } + assert (operation.origin() == Operation.Origin.PRIMARY) == (operation.versionType() == VersionType.EXTERNAL) : + "invalid version_type in a following engine; version_type=" + operation.versionType() + "origin=" + operation.origin(); + } + + @Override + protected InternalEngine.IndexingStrategy indexingStrategyForOperation(final Index index) throws IOException { + preFlight(index); + return planIndexingAsNonPrimary(index); + } + + @Override + protected InternalEngine.DeletionStrategy deletionStrategyForOperation(final Delete delete) throws IOException { + preFlight(delete); + return planDeletionAsNonPrimary(delete); + } + + @Override + public int fillSeqNoGaps(long primaryTerm) throws IOException { + // a noop implementation, because follow shard does not own the history but the leader shard does. + return 0; + } + + @Override + protected boolean assertPrimaryIncomingSequenceNumber(final Operation.Origin origin, final long seqNo) { + // sequence number should be set when operation origin is primary + assert seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO : "primary operations on a following index must have an assigned sequence number"; + return true; + } + + @Override + protected boolean assertNonPrimaryOrigin(final Operation operation) { + return true; + } + + @Override + protected boolean assertPrimaryCanOptimizeAddDocument(final Index index) { + assert index.version() == 1 && index.versionType() == VersionType.EXTERNAL + : "version [" + index.version() + "], type [" + index.versionType() + "]"; + return true; + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineFactory.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineFactory.java new file mode 100644 index 00000000000..ab76d02c66e --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineFactory.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.index.engine; + +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineConfig; +import org.elasticsearch.index.engine.EngineFactory; + +/** + * An engine factory for following engines. + */ +public final class FollowingEngineFactory implements EngineFactory { + + @Override + public Engine newReadWriteEngine(final EngineConfig config) { + return new FollowingEngine(config); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java new file mode 100644 index 00000000000..df34fd6cd45 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.rest; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.ccr.action.CcrStatsAction; + +import java.io.IOException; + +public class RestCcrStatsAction extends BaseRestHandler { + + public RestCcrStatsAction(final Settings settings, final RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.GET, "/_ccr/stats", this); + controller.registerHandler(RestRequest.Method.GET, "/_ccr/stats/{index}", this); + } + + @Override + public String getName() { + return "ccr_stats"; + } + + @Override + protected RestChannelConsumer prepareRequest(final RestRequest restRequest, final NodeClient client) throws IOException { + final CcrStatsAction.TasksRequest request = new CcrStatsAction.TasksRequest(); + request.setIndices(Strings.splitStringByCommaToArray(restRequest.param("index"))); + request.setIndicesOptions(IndicesOptions.fromRequest(restRequest, request.indicesOptions())); + return channel -> client.execute(CcrStatsAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCreateAndFollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCreateAndFollowIndexAction.java new file mode 100644 index 00000000000..4d9079b36c9 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCreateAndFollowIndexAction.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.rest; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; + +import java.io.IOException; + +import static org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction.INSTANCE; +import static org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction.Request; + +public class RestCreateAndFollowIndexAction extends BaseRestHandler { + + public RestCreateAndFollowIndexAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST, "/{index}/_ccr/create_and_follow", this); + } + + @Override + public String getName() { + return "ccr_create_and_follow_index_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + Request request = new Request(RestFollowIndexAction.createRequest(restRequest)); + return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestDeleteAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestDeleteAutoFollowPatternAction.java new file mode 100644 index 00000000000..d25e9bf65fd --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestDeleteAutoFollowPatternAction.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.rest; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.ccr.action.DeleteAutoFollowPatternAction.Request; + +import java.io.IOException; + +import static org.elasticsearch.xpack.ccr.action.DeleteAutoFollowPatternAction.INSTANCE; + +public class RestDeleteAutoFollowPatternAction extends BaseRestHandler { + + public RestDeleteAutoFollowPatternAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.DELETE, "/_ccr/auto_follow/{leader_cluster_alias}", this); + } + + @Override + public String getName() { + return "ccr_delete_auto_follow_pattern_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + Request request = new Request(); + request.setLeaderClusterAlias(restRequest.param("leader_cluster_alias")); + return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowIndexAction.java new file mode 100644 index 00000000000..88f5b74f4b1 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowIndexAction.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.rest; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; + +import java.io.IOException; + +import static org.elasticsearch.xpack.ccr.action.FollowIndexAction.INSTANCE; +import static org.elasticsearch.xpack.ccr.action.FollowIndexAction.Request; + +public class RestFollowIndexAction extends BaseRestHandler { + + public RestFollowIndexAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST, "/{index}/_ccr/follow", this); + } + + @Override + public String getName() { + return "ccr_follow_index_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + Request request = createRequest(restRequest); + return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); + } + + static Request createRequest(RestRequest restRequest) throws IOException { + try (XContentParser parser = restRequest.contentOrSourceParamParser()) { + return Request.fromXContent(parser, restRequest.param("index")); + } + } +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutAutoFollowPatternAction.java new file mode 100644 index 00000000000..9b3aac3bbb5 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutAutoFollowPatternAction.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.rest; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.ccr.action.PutAutoFollowPatternAction.Request; + +import java.io.IOException; + +import static org.elasticsearch.xpack.ccr.action.PutAutoFollowPatternAction.INSTANCE; + +public class RestPutAutoFollowPatternAction extends BaseRestHandler { + + public RestPutAutoFollowPatternAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.PUT, "/_ccr/auto_follow/{leader_cluster_alias}", this); + } + + @Override + public String getName() { + return "ccr_put_auto_follow_pattern_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + Request request = createRequest(restRequest); + return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); + } + + static Request createRequest(RestRequest restRequest) throws IOException { + try (XContentParser parser = restRequest.contentOrSourceParamParser()) { + return Request.fromXContent(parser, restRequest.param("leader_cluster_alias")); + } + } +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowIndexAction.java new file mode 100644 index 00000000000..2df6c77379b --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowIndexAction.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.rest; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; + +import java.io.IOException; + +import static org.elasticsearch.xpack.ccr.action.UnfollowIndexAction.INSTANCE; +import static org.elasticsearch.xpack.ccr.action.UnfollowIndexAction.Request; + +public class RestUnfollowIndexAction extends BaseRestHandler { + + public RestUnfollowIndexAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST, "/{index}/_ccr/unfollow", this); + } + + @Override + public String getName() { + return "ccr_unfollow_index_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + Request request = new Request(); + request.setFollowIndex(restRequest.param("index")); + return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ccr/src/main/plugin-metadata/plugin-security.policy b/x-pack/plugin/ccr/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 00000000000..45d92fd2b8a --- /dev/null +++ b/x-pack/plugin/ccr/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,50 @@ +grant { + // needed because of problems in unbound LDAP library + permission java.util.PropertyPermission "*", "read,write"; + + // required to configure the custom mailcap for watcher + permission java.lang.RuntimePermission "setFactory"; + + // needed when sending emails for javax.activation + // otherwise a classnotfound exception is thrown due to trying + // to load the class with the application class loader + permission java.lang.RuntimePermission "setContextClassLoader"; + permission java.lang.RuntimePermission "getClassLoader"; + // TODO: remove use of this jar as soon as possible!!!! + permission java.lang.RuntimePermission "accessClassInPackage.com.sun.activation.registries"; + + // bouncy castle + permission java.security.SecurityPermission "putProviderProperty.BC"; + + // needed for x-pack security extension + permission java.security.SecurityPermission "createPolicy.JavaPolicy"; + permission java.security.SecurityPermission "getPolicy"; + permission java.security.SecurityPermission "setPolicy"; + + // needed for multiple server implementations used in tests + permission java.net.SocketPermission "*", "accept,connect"; + + // needed for Windows named pipes in machine learning + permission java.io.FilePermission "\\\\.\\pipe\\*", "read,write"; +}; + +grant codeBase "${codebase.netty-common}" { + // for reading the system-wide configuration for the backlog of established sockets + permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read"; +}; + +grant codeBase "${codebase.netty-transport}" { + // Netty NioEventLoop wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854 + // the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely! + permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write"; +}; + +grant codeBase "${codebase.elasticsearch-rest-client}" { + // rest client uses system properties which gets the default proxy + permission java.net.NetPermission "getProxySelector"; +}; + +grant codeBase "${codebase.httpasyncclient}" { + // rest client uses system properties which gets the default proxy + permission java.net.NetPermission "getProxySelector"; +}; \ No newline at end of file diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java new file mode 100644 index 00000000000..2d58358d11f --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java @@ -0,0 +1,208 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator; +import org.elasticsearch.xpack.ccr.action.CcrStatsAction; +import org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction; +import org.elasticsearch.xpack.ccr.action.FollowIndexAction; +import org.elasticsearch.xpack.ccr.action.PutAutoFollowPatternAction; +import org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask; +import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; +import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; + +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.CountDownLatch; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class CcrLicenseIT extends ESSingleNodeTestCase { + + @Override + protected Collection> getPlugins() { + return Collections.singletonList(NonCompliantLicenseLocalStateCcr.class); + } + + public void testThatFollowingIndexIsUnavailableWithNonCompliantLicense() throws InterruptedException { + final FollowIndexAction.Request followRequest = getFollowRequest(); + final CountDownLatch latch = new CountDownLatch(1); + client().execute( + FollowIndexAction.INSTANCE, + followRequest, + new ActionListener() { + @Override + public void onResponse(final AcknowledgedResponse response) { + latch.countDown(); + fail(); + } + + @Override + public void onFailure(final Exception e) { + assertNonCompliantLicense(e); + latch.countDown(); + } + }); + latch.await(); + } + + public void testThatCreateAndFollowingIndexIsUnavailableWithNonCompliantLicense() throws InterruptedException { + final FollowIndexAction.Request followRequest = getFollowRequest(); + final CreateAndFollowIndexAction.Request createAndFollowRequest = new CreateAndFollowIndexAction.Request(followRequest); + final CountDownLatch latch = new CountDownLatch(1); + client().execute( + CreateAndFollowIndexAction.INSTANCE, + createAndFollowRequest, + new ActionListener() { + @Override + public void onResponse(final CreateAndFollowIndexAction.Response response) { + latch.countDown(); + fail(); + } + + @Override + public void onFailure(final Exception e) { + assertNonCompliantLicense(e); + latch.countDown(); + } + }); + latch.await(); + } + + public void testThatCcrStatsAreUnavailableWithNonCompliantLicense() throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(1); + client().execute(CcrStatsAction.INSTANCE, new CcrStatsAction.TasksRequest(), new ActionListener() { + @Override + public void onResponse(final CcrStatsAction.TasksResponse tasksResponse) { + latch.countDown(); + fail(); + } + + @Override + public void onFailure(final Exception e) { + assertNonCompliantLicense(e); + latch.countDown(); + } + }); + + latch.await(); + } + + public void testThatPutAutoFollowPatternsIsUnavailableWithNonCompliantLicense() throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(1); + final PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request(); + request.setLeaderClusterAlias("leader"); + request.setLeaderIndexPatterns(Collections.singletonList("*")); + client().execute( + PutAutoFollowPatternAction.INSTANCE, + request, + new ActionListener() { + @Override + public void onResponse(final AcknowledgedResponse response) { + latch.countDown(); + fail(); + } + + @Override + public void onFailure(final Exception e) { + assertNonCompliantLicense(e); + latch.countDown(); + } + }); + latch.await(); + } + + public void testAutoFollowCoordinatorLogsSkippingAutoFollowCoordinationWithNonCompliantLicense() throws Exception { + // Update the cluster state so that we have auto follow patterns and verify that we log a warning in case of incompatible license: + CountDownLatch latch = new CountDownLatch(1); + ClusterService clusterService = getInstanceFromNode(ClusterService.class); + clusterService.submitStateUpdateTask("test-add-auto-follow-pattern", new ClusterStateUpdateTask() { + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + AutoFollowPattern autoFollowPattern = + new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null); + AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata( + Collections.singletonMap("test_alias", autoFollowPattern), + Collections.emptyMap() + ); + + ClusterState.Builder newState = ClusterState.builder(currentState); + newState.metaData(MetaData.builder(currentState.getMetaData()) + .putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata) + .build()); + return newState.build(); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Exception e) { + latch.countDown(); + fail("unexpected error [" + e.getMessage() + "]"); + } + }); + latch.await(); + + final Logger logger = LogManager.getLogger(AutoFollowCoordinator.class); + final MockLogAppender appender = new MockLogAppender(); + appender.start(); + appender.addExpectation( + new MockLogAppender.ExceptionSeenEventExpectation( + getTestName(), + logger.getName(), + Level.WARN, + "skipping auto-follower coordination", + ElasticsearchSecurityException.class, + "current license is non-compliant for [ccr]")); + Loggers.addAppender(logger, appender); + try { + assertBusy(appender::assertAllExpectationsMatched); + } finally { + Loggers.removeAppender(logger, appender); + appender.stop(); + } + } + + private void assertNonCompliantLicense(final Exception e) { + assertThat(e, instanceOf(ElasticsearchSecurityException.class)); + assertThat(e.getMessage(), equalTo("current license is non-compliant for [ccr]")); + } + + private FollowIndexAction.Request getFollowRequest() { + return new FollowIndexAction.Request( + "leader", + "follower", + ShardFollowNodeTask.DEFAULT_MAX_BATCH_OPERATION_COUNT, + ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_READ_BATCHES, + ShardFollowNodeTask.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, + ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_WRITE_BATCHES, + ShardFollowNodeTask.DEFAULT_MAX_WRITE_BUFFER_SIZE, + TimeValue.timeValueMillis(10), + TimeValue.timeValueMillis(10)); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrTests.java new file mode 100644 index 00000000000..0a9ca00590b --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrTests.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.ccr.Ccr; +import org.elasticsearch.xpack.ccr.index.engine.FollowingEngineFactory; + +import java.io.IOException; +import java.util.Optional; + +import static org.hamcrest.Matchers.instanceOf; + +public class CcrTests extends ESTestCase { + + public void testGetEngineFactory() throws IOException { + final Boolean[] values = new Boolean[] { true, false, null }; + for (final Boolean value : values) { + final String indexName = "following-" + value; + final Index index = new Index(indexName, UUIDs.randomBase64UUID()); + final Settings.Builder builder = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + if (value != null) { + builder.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), value); + } + + final IndexMetaData indexMetaData = new IndexMetaData.Builder(index.getName()) + .settings(builder.build()) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + final Ccr ccr = new Ccr(Settings.EMPTY, new CcrLicenseChecker(() -> true)); + final Optional engineFactory = ccr.getEngineFactory(new IndexSettings(indexMetaData, Settings.EMPTY)); + if (value != null && value) { + assertTrue(engineFactory.isPresent()); + assertThat(engineFactory.get(), instanceOf(FollowingEngineFactory.class)); + } else { + assertFalse(engineFactory.isPresent()); + } + } + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/LocalStateCcr.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/LocalStateCcr.java new file mode 100644 index 00000000000..cfc30b8dfac --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/LocalStateCcr.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; + +import java.nio.file.Path; + +public class LocalStateCcr extends LocalStateCompositeXPackPlugin { + + public LocalStateCcr(final Settings settings, final Path configPath) throws Exception { + super(settings, configPath); + + plugins.add(new Ccr(settings, new CcrLicenseChecker(() -> true)) { + + @Override + protected XPackLicenseState getLicenseState() { + return LocalStateCcr.this.getLicenseState(); + } + + }); + } + +} + diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/NonCompliantLicenseLocalStateCcr.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/NonCompliantLicenseLocalStateCcr.java new file mode 100644 index 00000000000..f960668a7df --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/NonCompliantLicenseLocalStateCcr.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; + +import java.nio.file.Path; + +public class NonCompliantLicenseLocalStateCcr extends LocalStateCompositeXPackPlugin { + + public NonCompliantLicenseLocalStateCcr(final Settings settings, final Path configPath) throws Exception { + super(settings, configPath); + + plugins.add(new Ccr(settings, new CcrLicenseChecker(() -> false)) { + + @Override + protected XPackLicenseState getLicenseState() { + return NonCompliantLicenseLocalStateCcr.this.getLicenseState(); + } + + }); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java new file mode 100644 index 00000000000..7980e128140 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java @@ -0,0 +1,673 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.bulk.BulkProcessor; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.CheckedRunnable; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.MockHttpTransport; +import org.elasticsearch.test.discovery.TestZenDiscovery; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction; +import org.elasticsearch.xpack.ccr.action.FollowIndexAction; +import org.elasticsearch.xpack.ccr.action.ShardChangesAction; +import org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask; +import org.elasticsearch.xpack.ccr.action.ShardFollowTask; +import org.elasticsearch.xpack.ccr.action.UnfollowIndexAction; +import org.elasticsearch.xpack.core.XPackSettings; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, transportClientRatio = 0) +public class ShardChangesIT extends ESIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder newSettings = Settings.builder(); + newSettings.put(super.nodeSettings(nodeOrdinal)); + newSettings.put(XPackSettings.SECURITY_ENABLED.getKey(), false); + newSettings.put(XPackSettings.MONITORING_ENABLED.getKey(), false); + newSettings.put(XPackSettings.WATCHER_ENABLED.getKey(), false); + newSettings.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false); + newSettings.put(XPackSettings.LOGSTASH_ENABLED.getKey(), false); + return newSettings.build(); + } + + @Override + protected Collection> getMockPlugins() { + return Arrays.asList(TestSeedPlugin.class, TestZenDiscovery.TestPlugin.class, MockHttpTransport.TestPlugin.class); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateCcr.class, CommonAnalysisPlugin.class); + } + + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + @Override + protected Collection> transportClientPlugins() { + return nodePlugins(); + } + + // this emulates what the CCR persistent task will do for pulling + public void testGetOperationsBasedOnGlobalSequenceId() throws Exception { + client().admin().indices().prepareCreate("index") + .setSettings(Settings.builder().put("index.number_of_shards", 1)) + .get(); + + client().prepareIndex("index", "doc", "1").setSource("{}", XContentType.JSON).get(); + client().prepareIndex("index", "doc", "2").setSource("{}", XContentType.JSON).get(); + client().prepareIndex("index", "doc", "3").setSource("{}", XContentType.JSON).get(); + + ShardStats shardStats = client().admin().indices().prepareStats("index").get().getIndex("index").getShards()[0]; + long globalCheckPoint = shardStats.getSeqNoStats().getGlobalCheckpoint(); + assertThat(globalCheckPoint, equalTo(2L)); + + ShardChangesAction.Request request = new ShardChangesAction.Request(shardStats.getShardRouting().shardId()); + request.setFromSeqNo(0L); + request.setMaxOperationCount(3); + ShardChangesAction.Response response = client().execute(ShardChangesAction.INSTANCE, request).get(); + assertThat(response.getOperations().length, equalTo(3)); + Translog.Index operation = (Translog.Index) response.getOperations()[0]; + assertThat(operation.seqNo(), equalTo(0L)); + assertThat(operation.id(), equalTo("1")); + + operation = (Translog.Index) response.getOperations()[1]; + assertThat(operation.seqNo(), equalTo(1L)); + assertThat(operation.id(), equalTo("2")); + + operation = (Translog.Index) response.getOperations()[2]; + assertThat(operation.seqNo(), equalTo(2L)); + assertThat(operation.id(), equalTo("3")); + + client().prepareIndex("index", "doc", "3").setSource("{}", XContentType.JSON).get(); + client().prepareIndex("index", "doc", "4").setSource("{}", XContentType.JSON).get(); + client().prepareIndex("index", "doc", "5").setSource("{}", XContentType.JSON).get(); + + shardStats = client().admin().indices().prepareStats("index").get().getIndex("index").getShards()[0]; + globalCheckPoint = shardStats.getSeqNoStats().getGlobalCheckpoint(); + assertThat(globalCheckPoint, equalTo(5L)); + + request = new ShardChangesAction.Request(shardStats.getShardRouting().shardId()); + request.setFromSeqNo(3L); + request.setMaxOperationCount(3); + response = client().execute(ShardChangesAction.INSTANCE, request).get(); + assertThat(response.getOperations().length, equalTo(3)); + operation = (Translog.Index) response.getOperations()[0]; + assertThat(operation.seqNo(), equalTo(3L)); + assertThat(operation.id(), equalTo("3")); + + operation = (Translog.Index) response.getOperations()[1]; + assertThat(operation.seqNo(), equalTo(4L)); + assertThat(operation.id(), equalTo("4")); + + operation = (Translog.Index) response.getOperations()[2]; + assertThat(operation.seqNo(), equalTo(5L)); + assertThat(operation.id(), equalTo("5")); + } + + public void testFollowIndex() throws Exception { + final int numberOfPrimaryShards = randomIntBetween(1, 3); + final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, between(0, 1), + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureYellow("index1"); + + final FollowIndexAction.Request followRequest = createFollowRequest("index1", "index2"); + final CreateAndFollowIndexAction.Request createAndFollowRequest = new CreateAndFollowIndexAction.Request(followRequest); + client().execute(CreateAndFollowIndexAction.INSTANCE, createAndFollowRequest).get(); + + final int firstBatchNumDocs = randomIntBetween(2, 64); + logger.info("Indexing [{}] docs as first batch", firstBatchNumDocs); + for (int i = 0; i < firstBatchNumDocs; i++) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); + client().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get(); + } + + final Map firstBatchNumDocsPerShard = new HashMap<>(); + final ShardStats[] firstBatchShardStats = client().admin().indices().prepareStats("index1").get().getIndex("index1").getShards(); + for (final ShardStats shardStats : firstBatchShardStats) { + if (shardStats.getShardRouting().primary()) { + long value = shardStats.getStats().getIndexing().getTotal().getIndexCount() - 1; + firstBatchNumDocsPerShard.put(shardStats.getShardRouting().shardId(), value); + } + } + + assertBusy(assertTask(numberOfPrimaryShards, firstBatchNumDocsPerShard)); + + for (int i = 0; i < firstBatchNumDocs; i++) { + assertBusy(assertExpectedDocumentRunnable(i)); + } + + unfollowIndex("index2"); + client().execute(FollowIndexAction.INSTANCE, followRequest).get(); + final int secondBatchNumDocs = randomIntBetween(2, 64); + logger.info("Indexing [{}] docs as second batch", secondBatchNumDocs); + for (int i = firstBatchNumDocs; i < firstBatchNumDocs + secondBatchNumDocs; i++) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); + client().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get(); + } + + final Map secondBatchNumDocsPerShard = new HashMap<>(); + final ShardStats[] secondBatchShardStats = client().admin().indices().prepareStats("index1").get().getIndex("index1").getShards(); + for (final ShardStats shardStats : secondBatchShardStats) { + if (shardStats.getShardRouting().primary()) { + final long value = shardStats.getStats().getIndexing().getTotal().getIndexCount() - 1; + secondBatchNumDocsPerShard.put(shardStats.getShardRouting().shardId(), value); + } + } + + assertBusy(assertTask(numberOfPrimaryShards, secondBatchNumDocsPerShard)); + + for (int i = firstBatchNumDocs; i < firstBatchNumDocs + secondBatchNumDocs; i++) { + assertBusy(assertExpectedDocumentRunnable(i)); + } + unfollowIndex("index2"); + } + + public void testSyncMappings() throws Exception { + final String leaderIndexSettings = getIndexSettings(2, between(0, 1), + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureYellow("index1"); + + final FollowIndexAction.Request followRequest = createFollowRequest("index1", "index2"); + final CreateAndFollowIndexAction.Request createAndFollowRequest = new CreateAndFollowIndexAction.Request(followRequest); + client().execute(CreateAndFollowIndexAction.INSTANCE, createAndFollowRequest).get(); + + final long firstBatchNumDocs = randomIntBetween(2, 64); + for (long i = 0; i < firstBatchNumDocs; i++) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); + client().prepareIndex("index1", "doc", Long.toString(i)).setSource(source, XContentType.JSON).get(); + } + + assertBusy(() -> assertThat(client().prepareSearch("index2").get().getHits().totalHits, equalTo(firstBatchNumDocs))); + MappingMetaData mappingMetaData = client().admin().indices().prepareGetMappings("index2").get().getMappings() + .get("index2").get("doc"); + assertThat(XContentMapValues.extractValue("properties.f.type", mappingMetaData.sourceAsMap()), equalTo("integer")); + assertThat(XContentMapValues.extractValue("properties.k", mappingMetaData.sourceAsMap()), nullValue()); + + final int secondBatchNumDocs = randomIntBetween(2, 64); + for (long i = firstBatchNumDocs; i < firstBatchNumDocs + secondBatchNumDocs; i++) { + final String source = String.format(Locale.ROOT, "{\"k\":%d}", i); + client().prepareIndex("index1", "doc", Long.toString(i)).setSource(source, XContentType.JSON).get(); + } + + assertBusy(() -> assertThat(client().prepareSearch("index2").get().getHits().totalHits, + equalTo(firstBatchNumDocs + secondBatchNumDocs))); + mappingMetaData = client().admin().indices().prepareGetMappings("index2").get().getMappings() + .get("index2").get("doc"); + assertThat(XContentMapValues.extractValue("properties.f.type", mappingMetaData.sourceAsMap()), equalTo("integer")); + assertThat(XContentMapValues.extractValue("properties.k.type", mappingMetaData.sourceAsMap()), equalTo("long")); + unfollowIndex("index2"); + } + + public void testFollowIndex_backlog() throws Exception { + String leaderIndexSettings = getIndexSettings(between(1, 5), between(0, 1), + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); + BulkProcessor.Listener listener = new BulkProcessor.Listener() { + @Override + public void beforeBulk(long executionId, BulkRequest request) {} + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {} + + @Override + public void afterBulk(long executionId, BulkRequest request, Throwable failure) {} + }; + BulkProcessor bulkProcessor = BulkProcessor.builder(client(), listener) + .setBulkActions(100) + .setConcurrentRequests(4) + .build(); + AtomicBoolean run = new AtomicBoolean(true); + Thread thread = new Thread(() -> { + int counter = 0; + while (run.get()) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", counter++); + IndexRequest indexRequest = new IndexRequest("index1", "doc") + .source(source, XContentType.JSON) + .timeout(TimeValue.timeValueSeconds(1)); + bulkProcessor.add(indexRequest); + } + }); + thread.start(); + + // Waiting for some document being index before following the index: + int maxReadSize = randomIntBetween(128, 2048); + long numDocsIndexed = Math.min(3000 * 2, randomLongBetween(maxReadSize, maxReadSize * 10)); + atLeastDocsIndexed("index1", numDocsIndexed / 3); + + final FollowIndexAction.Request followRequest = new FollowIndexAction.Request("index1", "index2", maxReadSize, + randomIntBetween(2, 10), Long.MAX_VALUE, randomIntBetween(2, 10), + randomIntBetween(1024, 10240), TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(10)); + CreateAndFollowIndexAction.Request createAndFollowRequest = new CreateAndFollowIndexAction.Request(followRequest); + client().execute(CreateAndFollowIndexAction.INSTANCE, createAndFollowRequest).get(); + + atLeastDocsIndexed("index1", numDocsIndexed); + run.set(false); + thread.join(); + assertThat(bulkProcessor.awaitClose(1L, TimeUnit.MINUTES), is(true)); + + assertSameDocCount("index1", "index2"); + unfollowIndex("index2"); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33337") + public void testFollowIndexAndCloseNode() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(3); + String leaderIndexSettings = getIndexSettings(3, 1, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); + + String followerIndexSettings = getIndexSettings(3, 1, singletonMap(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("index2").setSource(followerIndexSettings, XContentType.JSON)); + ensureGreen("index1", "index2"); + + AtomicBoolean run = new AtomicBoolean(true); + Thread thread = new Thread(() -> { + int counter = 0; + while (run.get()) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", counter++); + try { + client().prepareIndex("index1", "doc") + .setSource(source, XContentType.JSON) + .setTimeout(TimeValue.timeValueSeconds(1)) + .get(); + } catch (Exception e) { + logger.error("Error while indexing into leader index", e); + } + } + }); + thread.start(); + + final FollowIndexAction.Request followRequest = new FollowIndexAction.Request("index1", "index2", randomIntBetween(32, 2048), + randomIntBetween(2, 10), Long.MAX_VALUE, randomIntBetween(2, 10), + ShardFollowNodeTask.DEFAULT_MAX_WRITE_BUFFER_SIZE, TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(10)); + client().execute(FollowIndexAction.INSTANCE, followRequest).get(); + + long maxNumDocsReplicated = Math.min(1000, randomLongBetween(followRequest.getMaxBatchOperationCount(), + followRequest.getMaxBatchOperationCount() * 10)); + long minNumDocsReplicated = maxNumDocsReplicated / 3L; + logger.info("waiting for at least [{}] documents to be indexed and then stop a random data node", minNumDocsReplicated); + atLeastDocsIndexed("index2", minNumDocsReplicated); + internalCluster().stopRandomNonMasterNode(); + logger.info("waiting for at least [{}] documents to be indexed", maxNumDocsReplicated); + atLeastDocsIndexed("index2", maxNumDocsReplicated); + run.set(false); + thread.join(); + + assertSameDocCount("index1", "index2"); + unfollowIndex("index2"); + } + + public void testFollowIndexWithNestedField() throws Exception { + final String leaderIndexSettings = + getIndexSettingsWithNestedMapping(1, between(0, 1), singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); + + final String followerIndexSettings = + getIndexSettingsWithNestedMapping(1, between(0, 1), singletonMap(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("index2").setSource(followerIndexSettings, XContentType.JSON)); + + internalCluster().ensureAtLeastNumDataNodes(2); + ensureGreen("index1", "index2"); + + final FollowIndexAction.Request followRequest = createFollowRequest("index1", "index2"); + client().execute(FollowIndexAction.INSTANCE, followRequest).get(); + + final int numDocs = randomIntBetween(2, 64); + for (int i = 0; i < numDocs; i++) { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + builder.field("field", "value"); + builder.startArray("objects"); + { + builder.startObject(); + builder.field("field", i); + builder.endObject(); + } + builder.endArray(); + builder.endObject(); + client().prepareIndex("index1", "doc", Integer.toString(i)).setSource(builder).get(); + } + } + + for (int i = 0; i < numDocs; i++) { + int value = i; + assertBusy(() -> { + final GetResponse getResponse = client().prepareGet("index2", "doc", Integer.toString(value)).get(); + assertTrue(getResponse.isExists()); + assertTrue((getResponse.getSource().containsKey("field"))); + assertThat(XContentMapValues.extractValue("objects.field", getResponse.getSource()), + equalTo(Collections.singletonList(value))); + }); + } + unfollowIndex("index2"); + } + + public void testUnfollowNonExistingIndex() { + UnfollowIndexAction.Request unfollowRequest = new UnfollowIndexAction.Request(); + unfollowRequest.setFollowIndex("non-existing-index"); + expectThrows(IllegalArgumentException.class, () -> client().execute(UnfollowIndexAction.INSTANCE, unfollowRequest).actionGet()); + } + + public void testFollowNonExistentIndex() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test-leader").get()); + assertAcked(client().admin().indices().prepareCreate("test-follower").get()); + // Leader index does not exist. + FollowIndexAction.Request followRequest1 = createFollowRequest("non-existent-leader", "test-follower"); + expectThrows(IllegalArgumentException.class, () -> client().execute(FollowIndexAction.INSTANCE, followRequest1).actionGet()); + // Follower index does not exist. + FollowIndexAction.Request followRequest2 = createFollowRequest("non-test-leader", "non-existent-follower"); + expectThrows(IllegalArgumentException.class, () -> client().execute(FollowIndexAction.INSTANCE, followRequest2).actionGet()); + // Both indices do not exist. + FollowIndexAction.Request followRequest3 = createFollowRequest("non-existent-leader", "non-existent-follower"); + expectThrows(IllegalArgumentException.class, () -> client().execute(FollowIndexAction.INSTANCE, followRequest3).actionGet()); + } + + @TestLogging("_root:DEBUG") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33379") + public void testValidateFollowingIndexSettings() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test-leader") + .setSettings(Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true))); + // TODO: indexing should be optional but the current mapping logic requires for now. + client().prepareIndex("test-leader", "doc", "id").setSource("{\"f\": \"v\"}", XContentType.JSON).get(); + assertAcked(client().admin().indices().prepareCreate("test-follower").get()); + IllegalArgumentException followError = expectThrows(IllegalArgumentException.class, () -> client().execute( + FollowIndexAction.INSTANCE, createFollowRequest("test-leader", "test-follower")).actionGet()); + assertThat(followError.getMessage(), equalTo("the following index [test-follower] is not ready to follow;" + + " the setting [index.xpack.ccr.following_index] must be enabled.")); + // updating the `following_index` with an open index must not be allowed. + IllegalArgumentException updateError = expectThrows(IllegalArgumentException.class, () -> { + client().admin().indices().prepareUpdateSettings("test-follower") + .setSettings(Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true)).get(); + }); + assertThat(updateError.getMessage(), containsString("Can't update non dynamic settings " + + "[[index.xpack.ccr.following_index]] for open indices [[test-follower/")); + assertAcked(client().admin().indices().prepareClose("test-follower")); + assertAcked(client().admin().indices().prepareUpdateSettings("test-follower") + .setSettings(Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true))); + assertAcked(client().admin().indices().prepareOpen("test-follower")); + assertAcked(client().execute(FollowIndexAction.INSTANCE, + createFollowRequest("test-leader", "test-follower")).actionGet()); + unfollowIndex("test-follower"); + } + + public void testFollowIndex_lowMaxTranslogBytes() throws Exception { + final String leaderIndexSettings = getIndexSettings(1, between(0, 1), + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureYellow("index1"); + + final int numDocs = 1024; + logger.info("Indexing [{}] docs", numDocs); + for (int i = 0; i < numDocs; i++) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); + client().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get(); + } + + final FollowIndexAction.Request followRequest = new FollowIndexAction.Request("index1", "index2", 1024, 1, 1024L, + 1, 10240, TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(10)); + final CreateAndFollowIndexAction.Request createAndFollowRequest = new CreateAndFollowIndexAction.Request(followRequest); + client().execute(CreateAndFollowIndexAction.INSTANCE, createAndFollowRequest).get(); + + final Map firstBatchNumDocsPerShard = new HashMap<>(); + final ShardStats[] firstBatchShardStats = client().admin().indices().prepareStats("index1").get().getIndex("index1").getShards(); + for (final ShardStats shardStats : firstBatchShardStats) { + if (shardStats.getShardRouting().primary()) { + long value = shardStats.getStats().getIndexing().getTotal().getIndexCount() - 1; + firstBatchNumDocsPerShard.put(shardStats.getShardRouting().shardId(), value); + } + } + + assertBusy(assertTask(1, firstBatchNumDocsPerShard)); + for (int i = 0; i < numDocs; i++) { + assertBusy(assertExpectedDocumentRunnable(i)); + } + unfollowIndex("index2"); + } + + private CheckedRunnable assertTask(final int numberOfPrimaryShards, final Map numDocsPerShard) { + return () -> { + final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + final PersistentTasksCustomMetaData taskMetadata = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + + ListTasksRequest listTasksRequest = new ListTasksRequest(); + listTasksRequest.setDetailed(true); + listTasksRequest.setActions(ShardFollowTask.NAME + "[c]"); + ListTasksResponse listTasksResponse = client().admin().cluster().listTasks(listTasksRequest).actionGet(); + assertThat(listTasksResponse.getNodeFailures().size(), equalTo(0)); + assertThat(listTasksResponse.getTaskFailures().size(), equalTo(0)); + + List taskInfos = listTasksResponse.getTasks(); + assertThat(taskInfos.size(), equalTo(numberOfPrimaryShards)); + Collection> shardFollowTasks = + taskMetadata.findTasks(ShardFollowTask.NAME, Objects::nonNull); + for (PersistentTasksCustomMetaData.PersistentTask shardFollowTask : shardFollowTasks) { + final ShardFollowTask shardFollowTaskParams = (ShardFollowTask) shardFollowTask.getParams(); + TaskInfo taskInfo = null; + String expectedId = "id=" + shardFollowTask.getId(); + for (TaskInfo info : taskInfos) { + if (expectedId.equals(info.getDescription())) { + taskInfo = info; + break; + } + } + assertThat(taskInfo, notNullValue()); + ShardFollowNodeTask.Status status = (ShardFollowNodeTask.Status) taskInfo.getStatus(); + assertThat(status, notNullValue()); + assertThat("incorrect global checkpoint " + shardFollowTaskParams, + status.followerGlobalCheckpoint(), + equalTo(numDocsPerShard.get(shardFollowTaskParams.getLeaderShardId()))); + } + }; + } + + private void unfollowIndex(String index) throws Exception { + final UnfollowIndexAction.Request unfollowRequest = new UnfollowIndexAction.Request(); + unfollowRequest.setFollowIndex(index); + client().execute(UnfollowIndexAction.INSTANCE, unfollowRequest).get(); + assertBusy(() -> { + final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + final PersistentTasksCustomMetaData tasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + assertThat(tasks.tasks().size(), equalTo(0)); + + ListTasksRequest listTasksRequest = new ListTasksRequest(); + listTasksRequest.setDetailed(true); + ListTasksResponse listTasksResponse = client().admin().cluster().listTasks(listTasksRequest).get(); + int numNodeTasks = 0; + for (TaskInfo taskInfo : listTasksResponse.getTasks()) { + if (taskInfo.getAction().startsWith(ListTasksAction.NAME) == false) { + numNodeTasks++; + } + } + assertThat(numNodeTasks, equalTo(0)); + }, 30, TimeUnit.SECONDS); + } + + private CheckedRunnable assertExpectedDocumentRunnable(final int value) { + return () -> { + final GetResponse getResponse = client().prepareGet("index2", "doc", Integer.toString(value)).get(); + assertTrue("Doc with id [" + value + "] is missing", getResponse.isExists()); + assertTrue((getResponse.getSource().containsKey("f"))); + assertThat(getResponse.getSource().get("f"), equalTo(value)); + }; + } + + private String getIndexSettings(final int numberOfShards, final int numberOfReplicas, + final Map additionalIndexSettings) throws IOException { + final String settings; + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startObject("settings"); + { + builder.field("index.number_of_shards", numberOfShards); + builder.field("index.number_of_replicas", numberOfReplicas); + for (final Map.Entry additionalSetting : additionalIndexSettings.entrySet()) { + builder.field(additionalSetting.getKey(), additionalSetting.getValue()); + } + } + builder.endObject(); + builder.startObject("mappings"); + { + builder.startObject("doc"); + { + builder.startObject("properties"); + { + builder.startObject("f"); + { + builder.field("type", "integer"); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + settings = BytesReference.bytes(builder).utf8ToString(); + } + return settings; + } + + private String getIndexSettingsWithNestedMapping(final int numberOfShards, final int numberOfReplicas, + final Map additionalIndexSettings) throws IOException { + final String settings; + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startObject("settings"); + { + builder.field("index.number_of_shards", numberOfShards); + builder.field("index.number_of_replicas", numberOfReplicas); + for (final Map.Entry additionalSetting : additionalIndexSettings.entrySet()) { + builder.field(additionalSetting.getKey(), additionalSetting.getValue()); + } + } + builder.endObject(); + builder.startObject("mappings"); + { + builder.startObject("doc"); + { + builder.startObject("properties"); + { + builder.startObject("objects"); + { + builder.field("type", "nested"); + builder.startObject("properties"); + { + builder.startObject("field"); + { + builder.field("type", "long"); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + builder.startObject("field"); + { + builder.field("type", "keyword"); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + settings = BytesReference.bytes(builder).utf8ToString(); + } + return settings; + } + + private void atLeastDocsIndexed(String index, long numDocsReplicated) throws InterruptedException { + logger.info("waiting for at least [{}] documents to be indexed into index [{}]", numDocsReplicated, index); + awaitBusy(() -> { + refresh(index); + SearchRequest request = new SearchRequest(index); + request.source(new SearchSourceBuilder().size(0)); + SearchResponse response = client().search(request).actionGet(); + return response.getHits().getTotalHits() >= numDocsReplicated; + }, 60, TimeUnit.SECONDS); + } + + private void assertSameDocCount(String index1, String index2) throws Exception { + refresh(index1); + SearchRequest request1 = new SearchRequest(index1); + request1.source(new SearchSourceBuilder().size(0)); + SearchResponse response1 = client().search(request1).actionGet(); + assertBusy(() -> { + refresh(index2); + SearchRequest request2 = new SearchRequest(index2); + request2.source(new SearchSourceBuilder().size(0)); + SearchResponse response2 = client().search(request2).actionGet(); + assertThat(response2.getHits().getTotalHits(), equalTo(response1.getHits().getTotalHits())); + }, 60, TimeUnit.SECONDS); + } + + public static FollowIndexAction.Request createFollowRequest(String leaderIndex, String followIndex) { + return new FollowIndexAction.Request(leaderIndex, followIndex, ShardFollowNodeTask.DEFAULT_MAX_BATCH_OPERATION_COUNT, + ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_READ_BATCHES, ShardFollowNodeTask.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, + ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_WRITE_BATCHES, ShardFollowNodeTask.DEFAULT_MAX_WRITE_BUFFER_SIZE, + TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10)); + } +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java new file mode 100644 index 00000000000..2ef84129232 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java @@ -0,0 +1,296 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator.AutoFollower; +import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; +import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Function; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class AutoFollowCoordinatorTests extends ESTestCase { + + public void testAutoFollower() { + Client client = mock(Client.class); + when(client.getRemoteClusterClient(anyString())).thenReturn(client); + + ClusterState leaderState = ClusterState.builder(new ClusterName("remote")) + .metaData(MetaData.builder().put(IndexMetaData.builder("logs-20190101") + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0))) + .build(); + + AutoFollowPattern autoFollowPattern = + new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null); + Map patterns = new HashMap<>(); + patterns.put("remote", autoFollowPattern); + Map> followedLeaderIndexUUIDS = new HashMap<>(); + followedLeaderIndexUUIDS.put("remote", new ArrayList<>()); + AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(patterns, followedLeaderIndexUUIDS); + + ClusterState currentState = ClusterState.builder(new ClusterName("name")) + .metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata)) + .build(); + + boolean[] invoked = new boolean[]{false}; + Consumer handler = e -> { + invoked[0] = true; + assertThat(e, nullValue()); + }; + AutoFollower autoFollower = new AutoFollower(handler, currentState) { + @Override + void getLeaderClusterState(String leaderClusterAlias, BiConsumer handler) { + handler.accept(leaderState, null); + } + + @Override + void createAndFollow(FollowIndexAction.Request followRequest, Runnable successHandler, Consumer failureHandler) { + assertThat(followRequest.getLeaderIndex(), equalTo("remote:logs-20190101")); + assertThat(followRequest.getFollowerIndex(), equalTo("logs-20190101")); + successHandler.run(); + } + + @Override + void updateAutoFollowMetadata(Function updateFunction, Consumer handler) { + ClusterState resultCs = updateFunction.apply(currentState); + AutoFollowMetadata result = resultCs.metaData().custom(AutoFollowMetadata.TYPE); + assertThat(result.getFollowedLeaderIndexUUIDs().size(), equalTo(1)); + assertThat(result.getFollowedLeaderIndexUUIDs().get("remote").size(), equalTo(1)); + handler.accept(null); + } + }; + autoFollower.autoFollowIndices(); + assertThat(invoked[0], is(true)); + } + + public void testAutoFollowerClusterStateApiFailure() { + Client client = mock(Client.class); + when(client.getRemoteClusterClient(anyString())).thenReturn(client); + + AutoFollowPattern autoFollowPattern = + new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null); + Map patterns = new HashMap<>(); + patterns.put("remote", autoFollowPattern); + Map> followedLeaderIndexUUIDS = new HashMap<>(); + followedLeaderIndexUUIDS.put("remote", new ArrayList<>()); + AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(patterns, followedLeaderIndexUUIDS); + ClusterState followerState = ClusterState.builder(new ClusterName("remote")) + .metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata)) + .build(); + + Exception failure = new RuntimeException("failure"); + boolean[] invoked = new boolean[]{false}; + Consumer handler = e -> { + invoked[0] = true; + assertThat(e, sameInstance(failure)); + }; + AutoFollower autoFollower = new AutoFollower(handler, followerState) { + @Override + void getLeaderClusterState(String leaderClusterAlias, BiConsumer handler) { + handler.accept(null, failure); + } + + @Override + void createAndFollow(FollowIndexAction.Request followRequest, Runnable successHandler, Consumer failureHandler) { + fail("should not get here"); + } + + @Override + void updateAutoFollowMetadata(Function updateFunction, Consumer handler) { + fail("should not get here"); + } + }; + autoFollower.autoFollowIndices(); + assertThat(invoked[0], is(true)); + } + + public void testAutoFollowerUpdateClusterStateFailure() { + Client client = mock(Client.class); + when(client.getRemoteClusterClient(anyString())).thenReturn(client); + + ClusterState leaderState = ClusterState.builder(new ClusterName("remote")) + .metaData(MetaData.builder().put(IndexMetaData.builder("logs-20190101") + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0))) + .build(); + + AutoFollowPattern autoFollowPattern = + new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null); + Map patterns = new HashMap<>(); + patterns.put("remote", autoFollowPattern); + Map> followedLeaderIndexUUIDS = new HashMap<>(); + followedLeaderIndexUUIDS.put("remote", new ArrayList<>()); + AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(patterns, followedLeaderIndexUUIDS); + ClusterState followerState = ClusterState.builder(new ClusterName("remote")) + .metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata)) + .build(); + + Exception failure = new RuntimeException("failure"); + boolean[] invoked = new boolean[]{false}; + Consumer handler = e -> { + invoked[0] = true; + assertThat(e, sameInstance(failure)); + }; + AutoFollower autoFollower = new AutoFollower(handler, followerState) { + @Override + void getLeaderClusterState(String leaderClusterAlias, BiConsumer handler) { + handler.accept(leaderState, null); + } + + @Override + void createAndFollow(FollowIndexAction.Request followRequest, Runnable successHandler, Consumer failureHandler) { + assertThat(followRequest.getLeaderIndex(), equalTo("remote:logs-20190101")); + assertThat(followRequest.getFollowerIndex(), equalTo("logs-20190101")); + successHandler.run(); + } + + @Override + void updateAutoFollowMetadata(Function updateFunction, Consumer handler) { + handler.accept(failure); + } + }; + autoFollower.autoFollowIndices(); + assertThat(invoked[0], is(true)); + } + + public void testAutoFollowerCreateAndFollowApiCallFailure() { + Client client = mock(Client.class); + when(client.getRemoteClusterClient(anyString())).thenReturn(client); + + ClusterState leaderState = ClusterState.builder(new ClusterName("remote")) + .metaData(MetaData.builder().put(IndexMetaData.builder("logs-20190101") + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0))) + .build(); + + AutoFollowPattern autoFollowPattern = + new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null); + Map patterns = new HashMap<>(); + patterns.put("remote", autoFollowPattern); + Map> followedLeaderIndexUUIDS = new HashMap<>(); + followedLeaderIndexUUIDS.put("remote", new ArrayList<>()); + AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(patterns, followedLeaderIndexUUIDS); + ClusterState followerState = ClusterState.builder(new ClusterName("remote")) + .metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata)) + .build(); + + Exception failure = new RuntimeException("failure"); + boolean[] invoked = new boolean[]{false}; + Consumer handler = e -> { + invoked[0] = true; + assertThat(e, sameInstance(failure)); + }; + AutoFollower autoFollower = new AutoFollower(handler, followerState) { + @Override + void getLeaderClusterState(String leaderClusterAlias, BiConsumer handler) { + handler.accept(leaderState, null); + } + + @Override + void createAndFollow(FollowIndexAction.Request followRequest, Runnable successHandler, Consumer failureHandler) { + assertThat(followRequest.getLeaderIndex(), equalTo("remote:logs-20190101")); + assertThat(followRequest.getFollowerIndex(), equalTo("logs-20190101")); + failureHandler.accept(failure); + } + + @Override + void updateAutoFollowMetadata(Function updateFunction, Consumer handler) { + fail("should not get here"); + } + }; + autoFollower.autoFollowIndices(); + assertThat(invoked[0], is(true)); + } + + public void testGetLeaderIndicesToFollow() { + AutoFollowPattern autoFollowPattern = + new AutoFollowPattern(Collections.singletonList("metrics-*"), null, null, null, null, null, null, null, null); + ClusterState followerState = ClusterState.builder(new ClusterName("remote")) + .metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE, + new AutoFollowMetadata(Collections.singletonMap("remote", autoFollowPattern), Collections.emptyMap()))) + .build(); + + MetaData.Builder imdBuilder = MetaData.builder(); + for (int i = 0; i < 5; i++) { + Settings.Builder builder = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_INDEX_UUID, "metrics-" + i); + imdBuilder.put(IndexMetaData.builder("metrics-" + i) + .settings(builder) + .numberOfShards(1) + .numberOfReplicas(0)); + } + imdBuilder.put(IndexMetaData.builder("logs-0") + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0)); + + ClusterState leaderState = ClusterState.builder(new ClusterName("remote")) + .metaData(imdBuilder) + .build(); + + List result = AutoFollower.getLeaderIndicesToFollow(autoFollowPattern, leaderState, followerState, Collections.emptyList()); + result.sort(Comparator.comparing(Index::getName)); + assertThat(result.size(), equalTo(5)); + assertThat(result.get(0).getName(), equalTo("metrics-0")); + assertThat(result.get(1).getName(), equalTo("metrics-1")); + assertThat(result.get(2).getName(), equalTo("metrics-2")); + assertThat(result.get(3).getName(), equalTo("metrics-3")); + assertThat(result.get(4).getName(), equalTo("metrics-4")); + + List followedIndexUUIDs = Collections.singletonList(leaderState.metaData().index("metrics-2").getIndexUUID()); + result = AutoFollower.getLeaderIndicesToFollow(autoFollowPattern, leaderState, followerState, followedIndexUUIDs); + result.sort(Comparator.comparing(Index::getName)); + assertThat(result.size(), equalTo(4)); + assertThat(result.get(0).getName(), equalTo("metrics-0")); + assertThat(result.get(1).getName(), equalTo("metrics-1")); + assertThat(result.get(2).getName(), equalTo("metrics-3")); + assertThat(result.get(3).getName(), equalTo("metrics-4")); + } + + public void testGetFollowerIndexName() { + AutoFollowPattern autoFollowPattern = new AutoFollowPattern(Collections.singletonList("metrics-*"), null, null, + null, null, null, null, null, null); + assertThat(AutoFollower.getFollowerIndexName(autoFollowPattern, "metrics-0"), equalTo("metrics-0")); + + autoFollowPattern = new AutoFollowPattern(Collections.singletonList("metrics-*"), "eu-metrics-0", null, null, + null, null, null, null, null); + assertThat(AutoFollower.getFollowerIndexName(autoFollowPattern, "metrics-0"), equalTo("eu-metrics-0")); + + autoFollowPattern = new AutoFollowPattern(Collections.singletonList("metrics-*"), "eu-{{leader_index}}", null, + null, null, null, null, null, null); + assertThat(AutoFollower.getFollowerIndexName(autoFollowPattern, "metrics-0"), equalTo("eu-metrics-0")); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowTests.java new file mode 100644 index 00000000000..a4808e428fe --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowTests.java @@ -0,0 +1,189 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.xpack.ccr.LocalStateCcr; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +public class AutoFollowTests extends ESSingleNodeTestCase { + + @Override + protected Collection> getPlugins() { + return Collections.singleton(LocalStateCcr.class); + } + + @Override + protected boolean resetNodeAfterTest() { + return true; + } + + public void testAutoFollow() throws Exception { + Settings leaderIndexSettings = Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) + .build(); + + createIndex("logs-201812", leaderIndexSettings, "_doc"); + + // Enabling auto following: + putAutoFollowPatterns("logs-*", "transactions-*"); + + createIndex("metrics-201901", leaderIndexSettings, "_doc"); + + createIndex("logs-201901", leaderIndexSettings, "_doc"); + assertBusy(() -> { + IndicesExistsRequest request = new IndicesExistsRequest("copy-logs-201901"); + assertTrue(client().admin().indices().exists(request).actionGet().isExists()); + }); + createIndex("transactions-201901", leaderIndexSettings, "_doc"); + assertBusy(() -> { + IndicesExistsRequest request = new IndicesExistsRequest("copy-transactions-201901"); + assertTrue(client().admin().indices().exists(request).actionGet().isExists()); + }); + + IndicesExistsRequest request = new IndicesExistsRequest("copy-metrics-201901"); + assertFalse(client().admin().indices().exists(request).actionGet().isExists()); + request = new IndicesExistsRequest("copy-logs-201812"); + assertFalse(client().admin().indices().exists(request).actionGet().isExists()); + } + + public void testAutoFollowManyIndices() throws Exception { + Settings leaderIndexSettings = Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) + .build(); + + putAutoFollowPatterns("logs-*"); + int numIndices = randomIntBetween(4, 32); + for (int i = 0; i < numIndices; i++) { + createIndex("logs-" + i, leaderIndexSettings, "_doc"); + } + int expectedVal1 = numIndices; + assertBusy(() -> { + MetaData metaData = client().admin().cluster().prepareState().get().getState().metaData(); + int count = (int) Arrays.stream(metaData.getConcreteAllIndices()).filter(s -> s.startsWith("copy-")).count(); + assertThat(count, equalTo(expectedVal1)); + }); + + deleteAutoFollowPatternSetting(); + createIndex("logs-does-not-count", leaderIndexSettings, "_doc"); + + putAutoFollowPatterns("logs-*"); + int i = numIndices; + numIndices = numIndices + randomIntBetween(4, 32); + for (; i < numIndices; i++) { + createIndex("logs-" + i, leaderIndexSettings, "_doc"); + } + int expectedVal2 = numIndices; + assertBusy(() -> { + MetaData metaData = client().admin().cluster().prepareState().get().getState().metaData(); + int count = (int) Arrays.stream(metaData.getConcreteAllIndices()).filter(s -> s.startsWith("copy-")).count(); + assertThat(count, equalTo(expectedVal2)); + }); + } + + public void testAutoFollowParameterAreDelegated() throws Exception { + Settings leaderIndexSettings = Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) + .build(); + + // Enabling auto following: + PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request(); + request.setLeaderClusterAlias("_local_"); + request.setLeaderIndexPatterns(Collections.singletonList("logs-*")); + // Need to set this, because following an index in the same cluster + request.setFollowIndexNamePattern("copy-{{leader_index}}"); + if (randomBoolean()) { + request.setMaxWriteBufferSize(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + request.setMaxConcurrentReadBatches(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + request.setMaxConcurrentWriteBatches(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + request.setMaxBatchOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + request.setMaxOperationSizeInBytes(randomNonNegativeLong()); + } + if (randomBoolean()) { + request.setRetryTimeout(TimeValue.timeValueMillis(500)); + } + if (randomBoolean()) { + request.setIdleShardRetryDelay(TimeValue.timeValueMillis(500)); + } + assertTrue(client().execute(PutAutoFollowPatternAction.INSTANCE, request).actionGet().isAcknowledged()); + + createIndex("logs-201901", leaderIndexSettings, "_doc"); + assertBusy(() -> { + PersistentTasksCustomMetaData persistentTasksMetaData = + client().admin().cluster().prepareState().get().getState().getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + assertThat(persistentTasksMetaData, notNullValue()); + assertThat(persistentTasksMetaData.tasks().size(), equalTo(1)); + ShardFollowTask shardFollowTask = (ShardFollowTask) persistentTasksMetaData.tasks().iterator().next().getParams(); + assertThat(shardFollowTask.getLeaderShardId().getIndexName(), equalTo("logs-201901")); + assertThat(shardFollowTask.getFollowShardId().getIndexName(), equalTo("copy-logs-201901")); + if (request.getMaxWriteBufferSize() != null) { + assertThat(shardFollowTask.getMaxWriteBufferSize(), equalTo(request.getMaxWriteBufferSize())); + } + if (request.getMaxConcurrentReadBatches() != null) { + assertThat(shardFollowTask.getMaxConcurrentReadBatches(), equalTo(request.getMaxConcurrentReadBatches())); + } + if (request.getMaxConcurrentWriteBatches() != null) { + assertThat(shardFollowTask.getMaxConcurrentWriteBatches(), equalTo(request.getMaxConcurrentWriteBatches())); + } + if (request.getMaxBatchOperationCount() != null) { + assertThat(shardFollowTask.getMaxBatchOperationCount(), equalTo(request.getMaxBatchOperationCount())); + } + if (request.getMaxOperationSizeInBytes() != null) { + assertThat(shardFollowTask.getMaxBatchSizeInBytes(), equalTo(request.getMaxOperationSizeInBytes())); + } + if (request.getRetryTimeout() != null) { + assertThat(shardFollowTask.getRetryTimeout(), equalTo(request.getRetryTimeout())); + } + if (request.getIdleShardRetryDelay() != null) { + assertThat(shardFollowTask.getIdleShardRetryDelay(), equalTo(request.getIdleShardRetryDelay())); + } + }); + } + + private void putAutoFollowPatterns(String... patterns) { + PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request(); + request.setLeaderClusterAlias("_local_"); + request.setLeaderIndexPatterns(Arrays.asList(patterns)); + // Need to set this, because following an index in the same cluster + request.setFollowIndexNamePattern("copy-{{leader_index}}"); + assertTrue(client().execute(PutAutoFollowPatternAction.INSTANCE, request).actionGet().isAcknowledged()); + } + + private void deleteAutoFollowPatternSetting() { + DeleteAutoFollowPatternAction.Request request = new DeleteAutoFollowPatternAction.Request(); + request.setLeaderClusterAlias("_local_"); + assertTrue(client().execute(DeleteAutoFollowPatternAction.INSTANCE, request).actionGet().isAcknowledged()); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexRequestTests.java new file mode 100644 index 00000000000..c68d1849965 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexRequestTests.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; + +public class CreateAndFollowIndexRequestTests extends AbstractStreamableTestCase { + + @Override + protected CreateAndFollowIndexAction.Request createBlankInstance() { + return new CreateAndFollowIndexAction.Request(); + } + + @Override + protected CreateAndFollowIndexAction.Request createTestInstance() { + return new CreateAndFollowIndexAction.Request(FollowIndexRequestTests.createTestRequest()); + } +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexResponseTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexResponseTests.java new file mode 100644 index 00000000000..11a518ef067 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexResponseTests.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; + +public class CreateAndFollowIndexResponseTests extends AbstractStreamableTestCase { + + @Override + protected CreateAndFollowIndexAction.Response createBlankInstance() { + return new CreateAndFollowIndexAction.Response(); + } + + @Override + protected CreateAndFollowIndexAction.Response createTestInstance() { + return new CreateAndFollowIndexAction.Response(randomBoolean(), randomBoolean(), randomBoolean()); + } +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/DeleteAutoFollowPatternRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/DeleteAutoFollowPatternRequestTests.java new file mode 100644 index 00000000000..0ca1b3d1278 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/DeleteAutoFollowPatternRequestTests.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; + +public class DeleteAutoFollowPatternRequestTests extends AbstractStreamableTestCase { + + @Override + protected DeleteAutoFollowPatternAction.Request createBlankInstance() { + return new DeleteAutoFollowPatternAction.Request(); + } + + @Override + protected DeleteAutoFollowPatternAction.Request createTestInstance() { + DeleteAutoFollowPatternAction.Request request = new DeleteAutoFollowPatternAction.Request(); + request.setLeaderClusterAlias(randomAlphaOfLength(4)); + return request; + } +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexActionTests.java new file mode 100644 index 00000000000..5b52700f557 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexActionTests.java @@ -0,0 +1,170 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData.State; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.MapperTestUtils; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.ccr.CcrSettings; +import org.elasticsearch.xpack.ccr.ShardChangesIT; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class FollowIndexActionTests extends ESTestCase { + + public void testValidation() throws IOException { + FollowIndexAction.Request request = ShardChangesIT.createFollowRequest("index1", "index2"); + { + // should fail, because leader index does not exist + Exception e = expectThrows(IllegalArgumentException.class, () -> FollowIndexAction.validate(request, null, null, null)); + assertThat(e.getMessage(), equalTo("leader index [index1] does not exist")); + } + { + // should fail, because follow index does not exist + IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY); + Exception e = expectThrows(IllegalArgumentException.class, () -> FollowIndexAction.validate(request, leaderIMD, null, null)); + assertThat(e.getMessage(), equalTo("follow index [index2] does not exist")); + } + { + // should fail because leader index does not have soft deletes enabled + IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY); + IndexMetaData followIMD = createIMD("index2", 5, Settings.EMPTY); + Exception e = expectThrows(IllegalArgumentException.class, + () -> FollowIndexAction.validate(request, leaderIMD, followIMD, null)); + assertThat(e.getMessage(), equalTo("leader index [index1] does not have soft deletes enabled")); + } + { + // should fail because the number of primary shards between leader and follow index are not equal + IndexMetaData leaderIMD = createIMD("index1", 5, Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build()); + IndexMetaData followIMD = createIMD("index2", 4, Settings.EMPTY); + Exception e = expectThrows(IllegalArgumentException.class, + () -> FollowIndexAction.validate(request, leaderIMD, followIMD, null)); + assertThat(e.getMessage(), + equalTo("leader index primary shards [5] does not match with the number of shards of the follow index [4]")); + } + { + // should fail, because leader index is closed + IndexMetaData leaderIMD = createIMD("index1", State.CLOSE, "{}", 5, Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build()); + IndexMetaData followIMD = createIMD("index2", State.OPEN, "{}", 5, Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build()); + Exception e = expectThrows(IllegalArgumentException.class, + () -> FollowIndexAction.validate(request, leaderIMD, followIMD, null)); + assertThat(e.getMessage(), equalTo("leader and follow index must be open")); + } + { + // should fail, because leader has a field with the same name mapped as keyword and follower as text + IndexMetaData leaderIMD = createIMD("index1", State.OPEN, "{\"properties\": {\"field\": {\"type\": \"keyword\"}}}", 5, + Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build()); + IndexMetaData followIMD = createIMD("index2", State.OPEN, "{\"properties\": {\"field\": {\"type\": \"text\"}}}", 5, + Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true).build()); + MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), Settings.EMPTY, "index2"); + mapperService.updateMapping(null, followIMD); + Exception e = expectThrows(IllegalArgumentException.class, + () -> FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService)); + assertThat(e.getMessage(), equalTo("mapper [field] of different type, current_type [text], merged_type [keyword]")); + } + { + // should fail because of non whitelisted settings not the same between leader and follow index + String mapping = "{\"properties\": {\"field\": {\"type\": \"text\", \"analyzer\": \"my_analyzer\"}}}"; + IndexMetaData leaderIMD = createIMD("index1", State.OPEN, mapping, 5, Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true") + .put("index.analysis.analyzer.my_analyzer.type", "custom") + .put("index.analysis.analyzer.my_analyzer.tokenizer", "whitespace").build()); + IndexMetaData followIMD = createIMD("index2", State.OPEN, mapping, 5, Settings.builder() + .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) + .put("index.analysis.analyzer.my_analyzer.type", "custom") + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build()); + Exception e = expectThrows(IllegalArgumentException.class, + () -> FollowIndexAction.validate(request, leaderIMD, followIMD, null)); + assertThat(e.getMessage(), equalTo("the leader and follower index settings must be identical")); + } + { + // should fail because the following index does not have the following_index settings + IndexMetaData leaderIMD = createIMD("index1", 5, + Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build()); + Settings followingIndexSettings = randomBoolean() ? Settings.EMPTY : + Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), false).build(); + IndexMetaData followIMD = createIMD("index2", 5, followingIndexSettings); + MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), + followingIndexSettings, "index2"); + mapperService.updateMapping(null, followIMD); + IllegalArgumentException error = expectThrows(IllegalArgumentException.class, + () -> FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService)); + assertThat(error.getMessage(), equalTo("the following index [index2] is not ready to follow; " + + "the setting [index.xpack.ccr.following_index] must be enabled.")); + } + { + // should succeed + IndexMetaData leaderIMD = createIMD("index1", 5, Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build()); + IndexMetaData followIMD = createIMD("index2", 5, Settings.builder() + .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true).build()); + MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), Settings.EMPTY, "index2"); + mapperService.updateMapping(null, followIMD); + FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService); + } + { + // should succeed, index settings are identical + String mapping = "{\"properties\": {\"field\": {\"type\": \"text\", \"analyzer\": \"my_analyzer\"}}}"; + IndexMetaData leaderIMD = createIMD("index1", State.OPEN, mapping, 5, Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true") + .put("index.analysis.analyzer.my_analyzer.type", "custom") + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build()); + IndexMetaData followIMD = createIMD("index2", State.OPEN, mapping, 5, Settings.builder() + .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) + .put("index.analysis.analyzer.my_analyzer.type", "custom") + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build()); + MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), + followIMD.getSettings(), "index2"); + mapperService.updateMapping(null, followIMD); + FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService); + } + { + // should succeed despite whitelisted settings being different + String mapping = "{\"properties\": {\"field\": {\"type\": \"text\", \"analyzer\": \"my_analyzer\"}}}"; + IndexMetaData leaderIMD = createIMD("index1", State.OPEN, mapping, 5, Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true") + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s") + .put("index.analysis.analyzer.my_analyzer.type", "custom") + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build()); + IndexMetaData followIMD = createIMD("index2", State.OPEN, mapping, 5, Settings.builder() + .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "10s") + .put("index.analysis.analyzer.my_analyzer.type", "custom") + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build()); + MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), + followIMD.getSettings(), "index2"); + mapperService.updateMapping(null, followIMD); + FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService); + } + } + + private static IndexMetaData createIMD(String index, int numberOfShards, Settings settings) throws IOException { + return createIMD(index, State.OPEN, "{\"properties\": {}}", numberOfShards, settings); + } + + private static IndexMetaData createIMD(String index, State state, String mapping, int numberOfShards, + Settings settings) throws IOException { + return IndexMetaData.builder(index) + .settings(settings(Version.CURRENT).put(settings)) + .numberOfShards(numberOfShards) + .state(state) + .numberOfReplicas(0) + .setRoutingNumShards(numberOfShards) + .putMapping("_doc", mapping) + .build(); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexRequestTests.java new file mode 100644 index 00000000000..7202f7202c6 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexRequestTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.io.IOException; + +public class FollowIndexRequestTests extends AbstractStreamableXContentTestCase { + + @Override + protected FollowIndexAction.Request createBlankInstance() { + return new FollowIndexAction.Request(); + } + + @Override + protected FollowIndexAction.Request createTestInstance() { + return createTestRequest(); + } + + @Override + protected FollowIndexAction.Request doParseInstance(XContentParser parser) throws IOException { + return FollowIndexAction.Request.fromXContent(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + static FollowIndexAction.Request createTestRequest() { + return new FollowIndexAction.Request(randomAlphaOfLength(4), randomAlphaOfLength(4), randomIntBetween(1, Integer.MAX_VALUE), + randomIntBetween(1, Integer.MAX_VALUE), randomNonNegativeLong(), randomIntBetween(1, Integer.MAX_VALUE), + randomIntBetween(1, Integer.MAX_VALUE), TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(500)); + } +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java new file mode 100644 index 00000000000..27760578db9 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.io.IOException; +import java.util.Arrays; + +public class PutAutoFollowPatternRequestTests extends AbstractStreamableXContentTestCase { + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected PutAutoFollowPatternAction.Request doParseInstance(XContentParser parser) throws IOException { + return PutAutoFollowPatternAction.Request.fromXContent(parser, null); + } + + @Override + protected PutAutoFollowPatternAction.Request createBlankInstance() { + return new PutAutoFollowPatternAction.Request(); + } + + @Override + protected PutAutoFollowPatternAction.Request createTestInstance() { + PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request(); + request.setLeaderClusterAlias(randomAlphaOfLength(4)); + request.setLeaderIndexPatterns(Arrays.asList(generateRandomStringArray(4, 4, false))); + if (randomBoolean()) { + request.setFollowIndexNamePattern(randomAlphaOfLength(4)); + } + if (randomBoolean()) { + request.setIdleShardRetryDelay(TimeValue.timeValueMillis(500)); + } + if (randomBoolean()) { + request.setRetryTimeout(TimeValue.timeValueMillis(500)); + } + if (randomBoolean()) { + request.setMaxBatchOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + request.setMaxConcurrentReadBatches(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + request.setMaxConcurrentWriteBatches(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + request.setMaxOperationSizeInBytes(randomNonNegativeLong()); + } + if (randomBoolean()) { + request.setMaxWriteBufferSize(randomIntBetween(0, Integer.MAX_VALUE)); + } + return request; + } +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java new file mode 100644 index 00000000000..430e9cb48b1 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java @@ -0,0 +1,183 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardNotStartedException; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardNotFoundException; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.mockito.Mockito; + +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class ShardChangesActionTests extends ESSingleNodeTestCase { + + @Override + protected boolean resetNodeAfterTest() { + return true; + } + + public void testGetOperations() throws Exception { + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .build(); + final IndexService indexService = createIndex("index", settings); + + final int numWrites = randomIntBetween(2, 4096); + for (int i = 0; i < numWrites; i++) { + client().prepareIndex("index", "doc", Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + } + + // A number of times, get operations within a range that exists: + int iters = randomIntBetween(8, 32); + IndexShard indexShard = indexService.getShard(0); + for (int iter = 0; iter < iters; iter++) { + int min = randomIntBetween(0, numWrites - 1); + int max = randomIntBetween(min, numWrites - 1); + int size = max - min + 1; + final Translog.Operation[] operations = ShardChangesAction.getOperations(indexShard, + indexShard.getGlobalCheckpoint(), min, size, Long.MAX_VALUE); + final List seenSeqNos = Arrays.stream(operations).map(Translog.Operation::seqNo).collect(Collectors.toList()); + final List expectedSeqNos = LongStream.rangeClosed(min, max).boxed().collect(Collectors.toList()); + assertThat(seenSeqNos, equalTo(expectedSeqNos)); + } + + + // get operations for a range no operations exists: + Translog.Operation[] operations = ShardChangesAction.getOperations(indexShard, indexShard.getGlobalCheckpoint(), + numWrites, numWrites + 1, Long.MAX_VALUE); + assertThat(operations.length, equalTo(0)); + + // get operations for a range some operations do not exist: + operations = ShardChangesAction.getOperations(indexShard, indexShard.getGlobalCheckpoint(), + numWrites - 10, numWrites + 10, Long.MAX_VALUE); + assertThat(operations.length, equalTo(10)); + } + + public void testGetOperationsWhenShardNotStarted() throws Exception { + IndexShard indexShard = Mockito.mock(IndexShard.class); + + ShardRouting shardRouting = TestShardRouting.newShardRouting("index", 0, "_node_id", true, ShardRoutingState.INITIALIZING); + Mockito.when(indexShard.routingEntry()).thenReturn(shardRouting); + expectThrows(IndexShardNotStartedException.class, () -> ShardChangesAction.getOperations(indexShard, + indexShard.getGlobalCheckpoint(), 0, 1, Long.MAX_VALUE)); + } + + public void testGetOperationsExceedByteLimit() throws Exception { + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .build(); + final IndexService indexService = createIndex("index", settings); + + final long numWrites = 32; + for (int i = 0; i < numWrites; i++) { + client().prepareIndex("index", "doc", Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + } + + final IndexShard indexShard = indexService.getShard(0); + final Translog.Operation[] operations = ShardChangesAction.getOperations(indexShard, indexShard.getGlobalCheckpoint(), + 0, 12, 256); + assertThat(operations.length, equalTo(12)); + assertThat(operations[0].seqNo(), equalTo(0L)); + assertThat(operations[1].seqNo(), equalTo(1L)); + assertThat(operations[2].seqNo(), equalTo(2L)); + assertThat(operations[3].seqNo(), equalTo(3L)); + assertThat(operations[4].seqNo(), equalTo(4L)); + assertThat(operations[5].seqNo(), equalTo(5L)); + assertThat(operations[6].seqNo(), equalTo(6L)); + assertThat(operations[7].seqNo(), equalTo(7L)); + assertThat(operations[8].seqNo(), equalTo(8L)); + assertThat(operations[9].seqNo(), equalTo(9L)); + assertThat(operations[10].seqNo(), equalTo(10L)); + assertThat(operations[11].seqNo(), equalTo(11L)); + } + + public void testGetOperationsAlwaysReturnAtLeastOneOp() throws Exception { + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .build(); + final IndexService indexService = createIndex("index", settings); + + client().prepareIndex("index", "doc", "0").setSource("{}", XContentType.JSON).get(); + + final IndexShard indexShard = indexService.getShard(0); + final Translog.Operation[] operations = + ShardChangesAction.getOperations(indexShard, indexShard.getGlobalCheckpoint(), 0, 1, 0); + assertThat(operations.length, equalTo(1)); + assertThat(operations[0].seqNo(), equalTo(0L)); + } + + public void testIndexNotFound() throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(1); + final AtomicReference reference = new AtomicReference<>(); + final ShardChangesAction.TransportAction transportAction = node().injector().getInstance(ShardChangesAction.TransportAction.class); + transportAction.execute( + new ShardChangesAction.Request(new ShardId(new Index("non-existent", "uuid"), 0)), + new ActionListener() { + @Override + public void onResponse(final ShardChangesAction.Response response) { + fail(); + } + + @Override + public void onFailure(final Exception e) { + reference.set(e); + latch.countDown(); + } + }); + latch.await(); + assertNotNull(reference.get()); + assertThat(reference.get(), instanceOf(IndexNotFoundException.class)); + } + + public void testShardNotFound() throws InterruptedException { + final int numberOfShards = randomIntBetween(1, 5); + final IndexService indexService = createIndex("index", Settings.builder().put("index.number_of_shards", numberOfShards).build()); + final CountDownLatch latch = new CountDownLatch(1); + final AtomicReference reference = new AtomicReference<>(); + final ShardChangesAction.TransportAction transportAction = node().injector().getInstance(ShardChangesAction.TransportAction.class); + transportAction.execute( + new ShardChangesAction.Request(new ShardId(indexService.getMetaData().getIndex(), numberOfShards)), + new ActionListener() { + @Override + public void onResponse(final ShardChangesAction.Response response) { + fail(); + } + + @Override + public void onFailure(final Exception e) { + reference.set(e); + latch.countDown(); + } + }); + latch.await(); + assertNotNull(reference.get()); + assertThat(reference.get(), instanceOf(ShardNotFoundException.class)); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesRequestTests.java new file mode 100644 index 00000000000..19585da8851 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesRequestTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.AbstractStreamableTestCase; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.nullValue; + +public class ShardChangesRequestTests extends AbstractStreamableTestCase { + + @Override + protected ShardChangesAction.Request createTestInstance() { + ShardChangesAction.Request request = new ShardChangesAction.Request(new ShardId("_index", "_indexUUID", 0)); + request.setMaxOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + request.setFromSeqNo(randomNonNegativeLong()); + return request; + } + + @Override + protected ShardChangesAction.Request createBlankInstance() { + return new ShardChangesAction.Request(); + } + + public void testValidate() { + ShardChangesAction.Request request = new ShardChangesAction.Request(new ShardId("_index", "_indexUUID", 0)); + request.setFromSeqNo(-1); + assertThat(request.validate().getMessage(), containsString("fromSeqNo [-1] cannot be lower than 0")); + + request.setFromSeqNo(0); + request.setMaxOperationCount(-1); + assertThat(request.validate().getMessage(), containsString("maxOperationCount [-1] cannot be lower than 0")); + + request.setMaxOperationCount(8); + assertThat(request.validate(), nullValue()); + } +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesResponseTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesResponseTests.java new file mode 100644 index 00000000000..e9c67097d72 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesResponseTests.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.test.AbstractStreamableTestCase; + +public class ShardChangesResponseTests extends AbstractStreamableTestCase { + + @Override + protected ShardChangesAction.Response createTestInstance() { + final long mappingVersion = randomNonNegativeLong(); + final long leaderGlobalCheckpoint = randomNonNegativeLong(); + final long leaderMaxSeqNo = randomLongBetween(leaderGlobalCheckpoint, Long.MAX_VALUE); + final int numOps = randomInt(8); + final Translog.Operation[] operations = new Translog.Operation[numOps]; + for (int i = 0; i < numOps; i++) { + operations[i] = new Translog.NoOp(i, 0, "test"); + } + return new ShardChangesAction.Response(mappingVersion, leaderGlobalCheckpoint, leaderMaxSeqNo, operations); + } + + @Override + protected ShardChangesAction.Response createBlankInstance() { + return new ShardChangesAction.Response(); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java new file mode 100644 index 00000000000..9bfd6b9d6ef --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java @@ -0,0 +1,298 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.UnavailableShardsException; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.seqno.LocalCheckpointTracker; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.LongConsumer; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class ShardFollowNodeTaskRandomTests extends ESTestCase { + + public void testSingleReaderWriter() throws Exception { + TestRun testRun = createTestRun(randomNonNegativeLong(), randomNonNegativeLong(), randomIntBetween(1, 2048)); + ShardFollowNodeTask task = createShardFollowTask(1, testRun); + startAndAssertAndStopTask(task, testRun); + } + + public void testMultipleReaderWriter() throws Exception { + int concurrency = randomIntBetween(2, 8); + TestRun testRun = createTestRun(0, 0, between(1, 1024)); + ShardFollowNodeTask task = createShardFollowTask(concurrency, testRun); + startAndAssertAndStopTask(task, testRun); + } + + private void startAndAssertAndStopTask(ShardFollowNodeTask task, TestRun testRun) throws Exception { + task.start(testRun.startSeqNo - 1, testRun.startSeqNo - 1, testRun.startSeqNo - 1, testRun.startSeqNo - 1); + assertBusy(() -> { + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.leaderGlobalCheckpoint(), equalTo(testRun.finalExpectedGlobalCheckpoint)); + assertThat(status.followerGlobalCheckpoint(), equalTo(testRun.finalExpectedGlobalCheckpoint)); + final long numberOfFailedFetches = + testRun.responses.values().stream().flatMap(List::stream).filter(f -> f.exception != null).count(); + assertThat(status.numberOfFailedFetches(), equalTo(numberOfFailedFetches)); + // the failures were able to be retried so fetch failures should have cleared + assertThat(status.fetchExceptions().entrySet(), hasSize(0)); + assertThat(status.mappingVersion(), equalTo(testRun.finalMappingVersion)); + }); + + task.markAsCompleted(); + assertBusy(() -> { + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(0)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + }); + } + + private ShardFollowNodeTask createShardFollowTask(int concurrency, TestRun testRun) { + AtomicBoolean stopped = new AtomicBoolean(false); + ShardFollowTask params = new ShardFollowTask(null, new ShardId("follow_index", "", 0), + new ShardId("leader_index", "", 0), testRun.maxOperationCount, concurrency, + ShardFollowNodeTask.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, concurrency, 10240, + TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10), Collections.emptyMap()); + + ThreadPool threadPool = new TestThreadPool(getClass().getSimpleName()); + BiConsumer scheduler = (delay, task) -> { + assert delay.millis() < 100 : "The delay should be kept to a minimum, so that this test does not take to long to run"; + if (stopped.get() == false) { + threadPool.schedule(delay, ThreadPool.Names.GENERIC, task); + } + }; + List receivedOperations = Collections.synchronizedList(new ArrayList<>()); + LocalCheckpointTracker tracker = new LocalCheckpointTracker(testRun.startSeqNo - 1, testRun.startSeqNo - 1); + return new ShardFollowNodeTask( + 1L, "type", ShardFollowTask.NAME, "description", null, Collections.emptyMap(), params, scheduler, System::nanoTime) { + + private volatile long mappingVersion = 0L; + private final Map fromToSlot = new HashMap<>(); + + @Override + protected void innerUpdateMapping(LongConsumer handler, Consumer errorHandler) { + handler.accept(mappingVersion); + } + + @Override + protected void innerSendBulkShardOperationsRequest( + List operations, + Consumer handler, + Consumer errorHandler) { + for(Translog.Operation op : operations) { + tracker.markSeqNoAsCompleted(op.seqNo()); + } + receivedOperations.addAll(operations); + + // Emulate network thread and avoid SO: + final BulkShardOperationsResponse response = new BulkShardOperationsResponse(); + response.setGlobalCheckpoint(tracker.getCheckpoint()); + response.setMaxSeqNo(tracker.getMaxSeqNo()); + threadPool.generic().execute(() -> handler.accept(response)); + } + + @Override + protected void innerSendShardChangesRequest(long from, int maxOperationCount, Consumer handler, + Consumer errorHandler) { + + // Emulate network thread and avoid SO: + Runnable task = () -> { + List items = testRun.responses.get(from); + if (items != null) { + final TestResponse testResponse; + synchronized (fromToSlot) { + int slot; + if (fromToSlot.get(from) == null) { + slot = fromToSlot.getOrDefault(from, 0); + fromToSlot.put(from, slot); + } else { + slot = fromToSlot.get(from); + } + testResponse = items.get(slot); + fromToSlot.put(from, ++slot); + // if too many invocations occur with the same from then AOBE occurs, this ok and then something is wrong. + } + mappingVersion = testResponse.mappingVersion; + if (testResponse.exception != null) { + errorHandler.accept(testResponse.exception); + } else { + handler.accept(testResponse.response); + } + } else { + assert from >= testRun.finalExpectedGlobalCheckpoint; + final long globalCheckpoint = tracker.getCheckpoint(); + final long maxSeqNo = tracker.getMaxSeqNo(); + handler.accept(new ShardChangesAction.Response(0L,globalCheckpoint, maxSeqNo, new Translog.Operation[0])); + } + }; + threadPool.generic().execute(task); + } + + @Override + protected boolean isStopped() { + return stopped.get(); + } + + @Override + public void markAsCompleted() { + stopped.set(true); + tearDown(); + } + + @Override + public void markAsFailed(Exception e) { + stopped.set(true); + tearDown(); + } + + private void tearDown() { + threadPool.shutdown(); + List expectedOperations = testRun.responses.values().stream() + .flatMap(List::stream) + .map(testResponse -> testResponse.response) + .filter(Objects::nonNull) + .flatMap(response -> Arrays.stream(response.getOperations())) + .sorted(Comparator.comparingLong(Translog.Operation::seqNo)) + .collect(Collectors.toList()); + assertThat(receivedOperations.size(), equalTo(expectedOperations.size())); + receivedOperations.sort(Comparator.comparingLong(Translog.Operation::seqNo)); + for (int i = 0; i < receivedOperations.size(); i++) { + Translog.Operation actual = receivedOperations.get(i); + Translog.Operation expected = expectedOperations.get(i); + assertThat(actual, equalTo(expected)); + } + } + }; + } + + private static TestRun createTestRun(long startSeqNo, long startMappingVersion, int maxOperationCount) { + long prevGlobalCheckpoint = startSeqNo; + long mappingVersion = startMappingVersion; + int numResponses = randomIntBetween(16, 256); + Map> responses = new HashMap<>(numResponses); + for (int i = 0; i < numResponses; i++) { + long nextGlobalCheckPoint = prevGlobalCheckpoint + maxOperationCount; + if (sometimes()) { + mappingVersion++; + } + + if (sometimes()) { + List item = new ArrayList<>(); + // Sometimes add a random retryable error + if (sometimes()) { + Exception error = new UnavailableShardsException(new ShardId("test", "test", 0), ""); + item.add(new TestResponse(error, mappingVersion, null)); + } + List ops = new ArrayList<>(); + for (long seqNo = prevGlobalCheckpoint; seqNo <= nextGlobalCheckPoint; seqNo++) { + String id = UUIDs.randomBase64UUID(); + byte[] source = "{}".getBytes(StandardCharsets.UTF_8); + ops.add(new Translog.Index("doc", id, seqNo, 0, source)); + } + item.add(new TestResponse(null, mappingVersion, + new ShardChangesAction.Response(mappingVersion, nextGlobalCheckPoint, nextGlobalCheckPoint, ops.toArray(EMPTY)))); + responses.put(prevGlobalCheckpoint, item); + } else { + // Simulates a leader shard copy not having all the operations the shard follow task thinks it has by + // splitting up a response into multiple responses AND simulates maxBatchSizeInBytes limit being reached: + long toSeqNo; + for (long fromSeqNo = prevGlobalCheckpoint; fromSeqNo <= nextGlobalCheckPoint; fromSeqNo = toSeqNo + 1) { + toSeqNo = randomLongBetween(fromSeqNo, nextGlobalCheckPoint); + List item = new ArrayList<>(); + // Sometimes add a random retryable error + if (sometimes()) { + Exception error = new UnavailableShardsException(new ShardId("test", "test", 0), ""); + item.add(new TestResponse(error, mappingVersion, null)); + } + // Sometimes add an empty shard changes response to also simulate a leader shard lagging behind + if (sometimes()) { + ShardChangesAction.Response response = + new ShardChangesAction.Response(mappingVersion, prevGlobalCheckpoint, prevGlobalCheckpoint, EMPTY); + item.add(new TestResponse(null, mappingVersion, response)); + } + List ops = new ArrayList<>(); + for (long seqNo = fromSeqNo; seqNo <= toSeqNo; seqNo++) { + String id = UUIDs.randomBase64UUID(); + byte[] source = "{}".getBytes(StandardCharsets.UTF_8); + ops.add(new Translog.Index("doc", id, seqNo, 0, source)); + } + // Report toSeqNo to simulate maxBatchSizeInBytes limit being met or last op to simulate a shard lagging behind: + long localLeaderGCP = randomBoolean() ? ops.get(ops.size() - 1).seqNo() : toSeqNo; + ShardChangesAction.Response response = + new ShardChangesAction.Response(mappingVersion, localLeaderGCP, localLeaderGCP, ops.toArray(EMPTY)); + item.add(new TestResponse(null, mappingVersion, response)); + responses.put(fromSeqNo, Collections.unmodifiableList(item)); + } + } + prevGlobalCheckpoint = nextGlobalCheckPoint + 1; + } + return new TestRun(maxOperationCount, startSeqNo, startMappingVersion, mappingVersion, + prevGlobalCheckpoint - 1, responses); + } + + // Instead of rarely(), which returns true very rarely especially not running in nightly mode or a multiplier have not been set + private static boolean sometimes() { + return randomIntBetween(0, 10) == 5; + } + + private static class TestRun { + + final int maxOperationCount; + final long startSeqNo; + final long startMappingVersion; + + final long finalMappingVersion; + final long finalExpectedGlobalCheckpoint; + final Map> responses; + + private TestRun(int maxOperationCount, long startSeqNo, long startMappingVersion, long finalMappingVersion, + long finalExpectedGlobalCheckpoint, Map> responses) { + this.maxOperationCount = maxOperationCount; + this.startSeqNo = startSeqNo; + this.startMappingVersion = startMappingVersion; + this.finalMappingVersion = finalMappingVersion; + this.finalExpectedGlobalCheckpoint = finalExpectedGlobalCheckpoint; + this.responses = Collections.unmodifiableMap(responses); + } + } + + private static class TestResponse { + + final Exception exception; + final long mappingVersion; + final ShardChangesAction.Response response; + + private TestResponse(Exception exception, long mappingVersion, ShardChangesAction.Response response) { + this.exception = exception; + this.mappingVersion = mappingVersion; + this.response = response; + } + } + + private static final Translog.Operation[] EMPTY = new Translog.Operation[0]; + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java new file mode 100644 index 00000000000..8368a818e00 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java @@ -0,0 +1,115 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; +import java.util.Map; +import java.util.NavigableMap; +import java.util.TreeMap; + +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class ShardFollowNodeTaskStatusTests extends AbstractSerializingTestCase { + + @Override + protected ShardFollowNodeTask.Status doParseInstance(XContentParser parser) throws IOException { + return ShardFollowNodeTask.Status.fromXContent(parser); + } + + @Override + protected ShardFollowNodeTask.Status createTestInstance() { + // if you change this constructor, reflect the changes in the hand-written assertions below + return new ShardFollowNodeTask.Status( + randomAlphaOfLength(4), + randomInt(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(0, Integer.MAX_VALUE), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomReadExceptions(), + randomLong()); + } + + @Override + protected void assertEqualInstances(final ShardFollowNodeTask.Status expectedInstance, final ShardFollowNodeTask.Status newInstance) { + assertNotSame(expectedInstance, newInstance); + assertThat(newInstance.leaderIndex(), equalTo(expectedInstance.leaderIndex())); + assertThat(newInstance.getShardId(), equalTo(expectedInstance.getShardId())); + assertThat(newInstance.leaderGlobalCheckpoint(), equalTo(expectedInstance.leaderGlobalCheckpoint())); + assertThat(newInstance.leaderMaxSeqNo(), equalTo(expectedInstance.leaderMaxSeqNo())); + assertThat(newInstance.followerGlobalCheckpoint(), equalTo(expectedInstance.followerGlobalCheckpoint())); + assertThat(newInstance.lastRequestedSeqNo(), equalTo(expectedInstance.lastRequestedSeqNo())); + assertThat(newInstance.numberOfConcurrentReads(), equalTo(expectedInstance.numberOfConcurrentReads())); + assertThat(newInstance.numberOfConcurrentWrites(), equalTo(expectedInstance.numberOfConcurrentWrites())); + assertThat(newInstance.numberOfQueuedWrites(), equalTo(expectedInstance.numberOfQueuedWrites())); + assertThat(newInstance.mappingVersion(), equalTo(expectedInstance.mappingVersion())); + assertThat(newInstance.totalFetchTimeMillis(), equalTo(expectedInstance.totalFetchTimeMillis())); + assertThat(newInstance.numberOfSuccessfulFetches(), equalTo(expectedInstance.numberOfSuccessfulFetches())); + assertThat(newInstance.numberOfFailedFetches(), equalTo(expectedInstance.numberOfFailedFetches())); + assertThat(newInstance.operationsReceived(), equalTo(expectedInstance.operationsReceived())); + assertThat(newInstance.totalTransferredBytes(), equalTo(expectedInstance.totalTransferredBytes())); + assertThat(newInstance.totalIndexTimeMillis(), equalTo(expectedInstance.totalIndexTimeMillis())); + assertThat(newInstance.numberOfSuccessfulBulkOperations(), equalTo(expectedInstance.numberOfSuccessfulBulkOperations())); + assertThat(newInstance.numberOfFailedBulkOperations(), equalTo(expectedInstance.numberOfFailedBulkOperations())); + assertThat(newInstance.numberOfOperationsIndexed(), equalTo(expectedInstance.numberOfOperationsIndexed())); + assertThat(newInstance.fetchExceptions().size(), equalTo(expectedInstance.fetchExceptions().size())); + assertThat(newInstance.fetchExceptions().keySet(), equalTo(expectedInstance.fetchExceptions().keySet())); + for (final Map.Entry entry : newInstance.fetchExceptions().entrySet()) { + // x-content loses the exception + final ElasticsearchException expected = expectedInstance.fetchExceptions().get(entry.getKey()); + assertThat(entry.getValue().getMessage(), containsString(expected.getMessage())); + assertNotNull(entry.getValue().getCause()); + assertThat( + entry.getValue().getCause(), + anyOf(instanceOf(ElasticsearchException.class), instanceOf(IllegalStateException.class))); + assertThat(entry.getValue().getCause().getMessage(), containsString(expected.getCause().getMessage())); + } + assertThat(newInstance.timeSinceLastFetchMillis(), equalTo(expectedInstance.timeSinceLastFetchMillis())); + } + + @Override + protected boolean assertToXContentEquivalence() { + return false; + } + + private NavigableMap randomReadExceptions() { + final int count = randomIntBetween(0, 16); + final NavigableMap readExceptions = new TreeMap<>(); + for (int i = 0; i < count; i++) { + readExceptions.put(randomNonNegativeLong(), new ElasticsearchException(new IllegalStateException("index [" + i + "]"))); + } + return readExceptions; + } + + @Override + protected Writeable.Reader instanceReader() { + return ShardFollowNodeTask.Status::new; + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java new file mode 100644 index 00000000000..4f7c0bf1664 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java @@ -0,0 +1,826 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardNotFoundException; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; + +import java.net.ConnectException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.LongConsumer; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.sameInstance; + +public class ShardFollowNodeTaskTests extends ESTestCase { + + private Exception fatalError; + private List shardChangesRequests; + private List> bulkShardOperationRequests; + private BiConsumer scheduler = (delay, task) -> task.run(); + + private Consumer beforeSendShardChangesRequest = status -> {}; + + private AtomicBoolean simulateResponse = new AtomicBoolean(); + + private Queue readFailures; + private Queue writeFailures; + private Queue mappingUpdateFailures; + private Queue mappingVersions; + private Queue leaderGlobalCheckpoints; + private Queue followerGlobalCheckpoints; + private Queue maxSeqNos; + + public void testCoordinateReads() { + ShardFollowNodeTask task = createShardFollowTask(8, between(8, 20), between(1, 20), Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 3, -1); + task.coordinateReads(); + assertThat(shardChangesRequests, contains(new long[]{0L, 8L})); // treat this a peak request + shardChangesRequests.clear(); + task.innerHandleReadResponse(0, 5L, generateShardChangesResponse(0, 5L, 0L, 60L)); + assertThat(shardChangesRequests, contains(new long[][]{ + {6L, 8L}, {14L, 8L}, {22L, 8L}, {30L, 8L}, {38L, 8L}, {46L, 8L}, {54L, 7L}} + )); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(7)); + assertThat(status.lastRequestedSeqNo(), equalTo(60L)); + } + + public void testWriteBuffer() { + // Need to set concurrentWrites to 0, other the write buffer gets flushed immediately: + ShardFollowNodeTask task = createShardFollowTask(64, 1, 0, 32, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + shardChangesRequests.clear(); + // Also invokes the coordinatesReads() method: + task.innerHandleReadResponse(0L, 63L, generateShardChangesResponse(0, 63, 0L, 128L)); + assertThat(shardChangesRequests.size(), equalTo(0)); // no more reads, because write buffer is full + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(0)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(128L)); + } + + public void testMaxConcurrentReads() { + ShardFollowNodeTask task = createShardFollowTask(8, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 64, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(8L)); + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.lastRequestedSeqNo(), equalTo(7L)); + } + + public void testTaskCancelled() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 64, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + shardChangesRequests.clear(); + // The call the updateMapping is a noop, so noting happens. + task.start(128L, 128L, task.getStatus().followerGlobalCheckpoint(), task.getStatus().followerMaxSeqNo()); + task.markAsCompleted(); + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(0)); + } + + public void testTaskCancelledAfterReadLimitHasBeenReached() { + ShardFollowNodeTask task = createShardFollowTask(16, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 31, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(16L)); + + task.markAsCompleted(); + shardChangesRequests.clear(); + // Also invokes the coordinatesReads() method: + task.innerHandleReadResponse(0L, 15L, generateShardChangesResponse(0, 15, 0L, 31L)); + assertThat(shardChangesRequests.size(), equalTo(0)); // no more reads, because task has been cancelled + assertThat(bulkShardOperationRequests.size(), equalTo(0)); // no more writes, because task has been cancelled + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(0)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.lastRequestedSeqNo(), equalTo(15L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(31L)); + assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); + } + + public void testTaskCancelledAfterWriteBufferLimitHasBeenReached() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, 32, Long.MAX_VALUE); + startTask(task, 64, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + task.markAsCompleted(); + shardChangesRequests.clear(); + // Also invokes the coordinatesReads() method: + task.innerHandleReadResponse(0L, 63L, generateShardChangesResponse(0, 63, 0L, 128L)); + assertThat(shardChangesRequests.size(), equalTo(0)); // no more reads, because task has been cancelled + assertThat(bulkShardOperationRequests.size(), equalTo(0)); // no more writes, because task has been cancelled + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(0)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(128L)); + assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); + } + + public void testReceiveRetryableError() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + int max = randomIntBetween(1, 10); + for (int i = 0; i < max; i++) { + readFailures.add(new ShardNotFoundException(new ShardId("leader_index", "", 0))); + } + mappingVersions.add(1L); + leaderGlobalCheckpoints.add(63L); + maxSeqNos.add(63L); + simulateResponse.set(true); + final AtomicLong retryCounter = new AtomicLong(); + // before each retry, we assert the fetch failures; after the last retry, the fetch failure should clear + beforeSendShardChangesRequest = status -> { + assertThat(status.numberOfFailedFetches(), equalTo(retryCounter.get())); + if (retryCounter.get() > 0) { + assertThat(status.fetchExceptions().entrySet(), hasSize(1)); + final Map.Entry entry = status.fetchExceptions().entrySet().iterator().next(); + assertThat(entry.getKey(), equalTo(0L)); + assertThat(entry.getValue(), instanceOf(ElasticsearchException.class)); + assertNotNull(entry.getValue().getCause()); + assertThat(entry.getValue().getCause(), instanceOf(ShardNotFoundException.class)); + final ShardNotFoundException cause = (ShardNotFoundException) entry.getValue().getCause(); + assertThat(cause.getShardId().getIndexName(), equalTo("leader_index")); + assertThat(cause.getShardId().getId(), equalTo(0)); + } + retryCounter.incrementAndGet(); + }; + task.coordinateReads(); + + // NUmber of requests is equal to initial request + retried attempts + assertThat(shardChangesRequests.size(), equalTo(max + 1)); + for (long[] shardChangesRequest : shardChangesRequests) { + assertThat(shardChangesRequest[0], equalTo(0L)); + assertThat(shardChangesRequest[1], equalTo(64L)); + } + + assertFalse("task is not stopped", task.isStopped()); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.numberOfFailedFetches(), equalTo((long)max)); + assertThat(status.numberOfSuccessfulFetches(), equalTo(1L)); + // the fetch failure has cleared + assertThat(status.fetchExceptions().entrySet(), hasSize(0)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + } + + public void testReceiveRetryableErrorRetriedTooManyTimes() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + int max = randomIntBetween(11, 32); + for (int i = 0; i < max; i++) { + readFailures.add(new ShardNotFoundException(new ShardId("leader_index", "", 0))); + } + final AtomicLong retryCounter = new AtomicLong(); + // before each retry, we assert the fetch failures; after the last retry, the fetch failure should persist + beforeSendShardChangesRequest = status -> { + assertThat(status.numberOfFailedFetches(), equalTo(retryCounter.get())); + if (retryCounter.get() > 0) { + assertThat(status.fetchExceptions().entrySet(), hasSize(1)); + final Map.Entry entry = status.fetchExceptions().entrySet().iterator().next(); + assertThat(entry.getKey(), equalTo(0L)); + assertThat(entry.getValue(), instanceOf(ElasticsearchException.class)); + assertNotNull(entry.getValue().getCause()); + assertThat(entry.getValue().getCause(), instanceOf(ShardNotFoundException.class)); + final ShardNotFoundException cause = (ShardNotFoundException) entry.getValue().getCause(); + assertThat(cause.getShardId().getIndexName(), equalTo("leader_index")); + assertThat(cause.getShardId().getId(), equalTo(0)); + } + retryCounter.incrementAndGet(); + }; + task.coordinateReads(); + + assertThat(shardChangesRequests.size(), equalTo(11)); + for (long[] shardChangesRequest : shardChangesRequests) { + assertThat(shardChangesRequest[0], equalTo(0L)); + assertThat(shardChangesRequest[1], equalTo(64L)); + } + + assertTrue("task is stopped", task.isStopped()); + assertThat(fatalError, notNullValue()); + assertThat(fatalError.getMessage(), containsString("retrying failed [")); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.numberOfFailedFetches(), equalTo(11L)); + assertThat(status.fetchExceptions().entrySet(), hasSize(1)); + final Map.Entry entry = status.fetchExceptions().entrySet().iterator().next(); + assertThat(entry.getKey(), equalTo(0L)); + assertThat(entry.getValue(), instanceOf(ElasticsearchException.class)); + assertNotNull(entry.getValue().getCause()); + assertThat(entry.getValue().getCause(), instanceOf(ShardNotFoundException.class)); + final ShardNotFoundException cause = (ShardNotFoundException) entry.getValue().getCause(); + assertThat(cause.getShardId().getIndexName(), equalTo("leader_index")); + assertThat(cause.getShardId().getId(), equalTo(0)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + } + + public void testReceiveNonRetryableError() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + Exception failure = new RuntimeException("replication failed"); + readFailures.add(failure); + final AtomicBoolean invoked = new AtomicBoolean(); + // since there will be only one failure, this should only be invoked once and there should not be a fetch failure + beforeSendShardChangesRequest = status -> { + if (invoked.compareAndSet(false, true)) { + assertThat(status.numberOfFailedFetches(), equalTo(0L)); + assertThat(status.fetchExceptions().entrySet(), hasSize(0)); + } else { + fail("invoked twice"); + } + }; + task.coordinateReads(); + + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + assertTrue("task is stopped", task.isStopped()); + assertThat(fatalError, sameInstance(failure)); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.numberOfFailedFetches(), equalTo(1L)); + assertThat(status.fetchExceptions().entrySet(), hasSize(1)); + final Map.Entry entry = status.fetchExceptions().entrySet().iterator().next(); + assertThat(entry.getKey(), equalTo(0L)); + assertThat(entry.getValue(), instanceOf(ElasticsearchException.class)); + assertNotNull(entry.getValue().getCause()); + assertThat(entry.getValue().getCause(), instanceOf(RuntimeException.class)); + final RuntimeException cause = (RuntimeException) entry.getValue().getCause(); + assertThat(cause.getMessage(), equalTo("replication failed")); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + } + + public void testHandleReadResponse() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 0L, 63L); + task.innerHandleReadResponse(0L, 63L, response); + + assertThat(bulkShardOperationRequests.size(), equalTo(1)); + assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.mappingVersion(), equalTo(0L)); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); + } + + public void testReceiveLessThanRequested() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + shardChangesRequests.clear(); + ShardChangesAction.Response response = generateShardChangesResponse(0, 20, 0L, 31L); + task.innerHandleReadResponse(0L, 63L, response); + + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(21L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(43L)); + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + } + + public void testCancelAndReceiveLessThanRequested() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + shardChangesRequests.clear(); + task.markAsCompleted(); + ShardChangesAction.Response response = generateShardChangesResponse(0, 31, 0L, 31L); + task.innerHandleReadResponse(0L, 64L, response); + + assertThat(shardChangesRequests.size(), equalTo(0)); + assertThat(bulkShardOperationRequests.size(), equalTo(0)); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(0)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + } + + public void testReceiveNothingExpectedSomething() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + shardChangesRequests.clear(); + task.innerHandleReadResponse(0L, 63L, new ShardChangesAction.Response(0, 0, 0, new Translog.Operation[0])); + + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + } + + public void testDelayCoordinatesRead() { + int[] counter = new int[]{0}; + scheduler = (delay, task) -> { + counter[0]++; + task.run(); + }; + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + shardChangesRequests.clear(); + ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 0L, 63L); + // Also invokes coordinateReads() + task.innerHandleReadResponse(0L, 63L, response); + task.innerHandleReadResponse(64L, 63L, + new ShardChangesAction.Response(0, 63L, 63L, new Translog.Operation[0])); + assertThat(counter[0], equalTo(1)); + } + + public void testMappingUpdate() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + mappingVersions.add(1L); + task.coordinateReads(); + ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 1L, 63L); + task.handleReadResponse(0L, 63L, response); + + assertThat(bulkShardOperationRequests.size(), equalTo(1)); + assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.mappingVersion(), equalTo(1L)); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); + } + + public void testMappingUpdateRetryableError() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + int max = randomIntBetween(1, 10); + for (int i = 0; i < max; i++) { + mappingUpdateFailures.add(new ConnectException()); + } + mappingVersions.add(1L); + task.coordinateReads(); + ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 1L, 63L); + task.handleReadResponse(0L, 63L, response); + + assertThat(mappingUpdateFailures.size(), equalTo(0)); + assertThat(bulkShardOperationRequests.size(), equalTo(1)); + assertThat(task.isStopped(), equalTo(false)); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.mappingVersion(), equalTo(1L)); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + + } + + public void testMappingUpdateRetryableErrorRetriedTooManyTimes() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + int max = randomIntBetween(11, 20); + for (int i = 0; i < max; i++) { + mappingUpdateFailures.add(new ConnectException()); + } + mappingVersions.add(1L); + task.coordinateReads(); + ShardChangesAction.Response response = generateShardChangesResponse(0, 64, 1L, 64L); + task.handleReadResponse(0L, 64L, response); + + assertThat(mappingUpdateFailures.size(), equalTo(max - 11)); + assertThat(mappingVersions.size(), equalTo(1)); + assertThat(bulkShardOperationRequests.size(), equalTo(0)); + assertThat(task.isStopped(), equalTo(true)); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.mappingVersion(), equalTo(0L)); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + } + + public void testMappingUpdateNonRetryableError() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + mappingUpdateFailures.add(new RuntimeException()); + task.coordinateReads(); + ShardChangesAction.Response response = generateShardChangesResponse(0, 64, 1L, 64L); + task.handleReadResponse(0L, 64L, response); + + assertThat(bulkShardOperationRequests.size(), equalTo(0)); + assertThat(task.isStopped(), equalTo(true)); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.mappingVersion(), equalTo(0L)); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + } + + public void testCoordinateWrites() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 0L, 63L); + // Also invokes coordinatesWrites() + task.innerHandleReadResponse(0L, 63L, response); + + assertThat(bulkShardOperationRequests.size(), equalTo(1)); + assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); + } + + public void testMaxConcurrentWrites() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 2, Integer.MAX_VALUE, Long.MAX_VALUE); + ShardChangesAction.Response response = generateShardChangesResponse(0, 256, 0L, 256L); + // Also invokes coordinatesWrites() + task.innerHandleReadResponse(0L, 64L, response); + + assertThat(bulkShardOperationRequests.size(), equalTo(2)); + assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()).subList(0, 64))); + assertThat(bulkShardOperationRequests.get(1), equalTo(Arrays.asList(response.getOperations()).subList(64, 128))); + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentWrites(), equalTo(2)); + + task = createShardFollowTask(64, 1, 4, Integer.MAX_VALUE, Long.MAX_VALUE); + response = generateShardChangesResponse(0, 256, 0L, 256L); + // Also invokes coordinatesWrites() + task.innerHandleReadResponse(0L, 64L, response); + + assertThat(bulkShardOperationRequests.size(), equalTo(4)); + assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()).subList(0, 64))); + assertThat(bulkShardOperationRequests.get(1), equalTo(Arrays.asList(response.getOperations()).subList(64, 128))); + assertThat(bulkShardOperationRequests.get(2), equalTo(Arrays.asList(response.getOperations()).subList(128, 192))); + assertThat(bulkShardOperationRequests.get(3), equalTo(Arrays.asList(response.getOperations()).subList(192, 256))); + + status = task.getStatus(); + assertThat(status.numberOfConcurrentWrites(), equalTo(4)); + } + + public void testMaxBatchOperationCount() { + ShardFollowNodeTask task = createShardFollowTask(8, 1, 32, Integer.MAX_VALUE, Long.MAX_VALUE); + ShardChangesAction.Response response = generateShardChangesResponse(0, 256, 0L, 256L); + // Also invokes coordinatesWrites() + task.innerHandleReadResponse(0L, 64L, response); + + assertThat(bulkShardOperationRequests.size(), equalTo(32)); + for (int i = 0; i < 32; i += 8) { + int offset = i * 8; + assertThat(bulkShardOperationRequests.get(i), equalTo(Arrays.asList(response.getOperations()).subList(offset, offset + 8))); + } + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentWrites(), equalTo(32)); + } + + public void testRetryableError() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + int max = randomIntBetween(1, 10); + for (int i = 0; i < max; i++) { + writeFailures.add(new ShardNotFoundException(new ShardId("leader_index", "", 0))); + } + ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 0L, 63L); + // Also invokes coordinatesWrites() + task.innerHandleReadResponse(0L, 63L, response); + + // Number of requests is equal to initial request + retried attempts: + assertThat(bulkShardOperationRequests.size(), equalTo(max + 1)); + for (List operations : bulkShardOperationRequests) { + assertThat(operations, equalTo(Arrays.asList(response.getOperations()))); + } + assertThat(task.isStopped(), equalTo(false)); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); + } + + public void testRetryableErrorRetriedTooManyTimes() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + int max = randomIntBetween(11, 32); + for (int i = 0; i < max; i++) { + writeFailures.add(new ShardNotFoundException(new ShardId("leader_index", "", 0))); + } + ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 0L, 643); + // Also invokes coordinatesWrites() + task.innerHandleReadResponse(0L, 63L, response); + + // Number of requests is equal to initial request + retried attempts: + assertThat(bulkShardOperationRequests.size(), equalTo(11)); + for (List operations : bulkShardOperationRequests) { + assertThat(operations, equalTo(Arrays.asList(response.getOperations()))); + } + assertThat(task.isStopped(), equalTo(true)); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); + } + + public void testNonRetryableError() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + writeFailures.add(new RuntimeException()); + ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 0L, 63L); + // Also invokes coordinatesWrites() + task.innerHandleReadResponse(0L, 63L, response); + + assertThat(bulkShardOperationRequests.size(), equalTo(1)); + assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); + assertThat(task.isStopped(), equalTo(true)); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); + } + + public void testMaxBatchBytesLimit() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 128, Integer.MAX_VALUE, 1L); + startTask(task, 64, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 0L, 64L); + // Also invokes coordinatesWrites() + task.innerHandleReadResponse(0L, 64L, response); + + assertThat(bulkShardOperationRequests.size(), equalTo(64)); + } + + public void testHandleWriteResponse() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + shardChangesRequests.clear(); + followerGlobalCheckpoints.add(63L); + ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 0L, 63L); + // Also invokes coordinatesWrites() + task.innerHandleReadResponse(0L, 63L, response); + + assertThat(bulkShardOperationRequests.size(), equalTo(1)); + assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); + + // handleWrite() also delegates to coordinateReads + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(64L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + assertThat(status.followerGlobalCheckpoint(), equalTo(63L)); + } + + ShardFollowNodeTask createShardFollowTask(int maxBatchOperationCount, int maxConcurrentReadBatches, int maxConcurrentWriteBatches, + int bufferWriteLimit, long maxBatchSizeInBytes) { + AtomicBoolean stopped = new AtomicBoolean(false); + ShardFollowTask params = new ShardFollowTask(null, new ShardId("follow_index", "", 0), + new ShardId("leader_index", "", 0), maxBatchOperationCount, maxConcurrentReadBatches, maxBatchSizeInBytes, + maxConcurrentWriteBatches, bufferWriteLimit, TimeValue.ZERO, TimeValue.ZERO, Collections.emptyMap()); + + shardChangesRequests = new ArrayList<>(); + bulkShardOperationRequests = new ArrayList<>(); + readFailures = new LinkedList<>(); + writeFailures = new LinkedList<>(); + mappingUpdateFailures = new LinkedList<>(); + mappingVersions = new LinkedList<>(); + leaderGlobalCheckpoints = new LinkedList<>(); + followerGlobalCheckpoints = new LinkedList<>(); + maxSeqNos = new LinkedList<>(); + return new ShardFollowNodeTask( + 1L, "type", ShardFollowTask.NAME, "description", null, Collections.emptyMap(), params, scheduler, System::nanoTime) { + + @Override + protected void innerUpdateMapping(LongConsumer handler, Consumer errorHandler) { + Exception failure = mappingUpdateFailures.poll(); + if (failure != null) { + errorHandler.accept(failure); + return; + } + + final Long mappingVersion = mappingVersions.poll(); + if (mappingVersion != null) { + handler.accept(mappingVersion); + } + } + + @Override + protected void innerSendBulkShardOperationsRequest( + final List operations, + final Consumer handler, + final Consumer errorHandler) { + bulkShardOperationRequests.add(operations); + Exception writeFailure = ShardFollowNodeTaskTests.this.writeFailures.poll(); + if (writeFailure != null) { + errorHandler.accept(writeFailure); + return; + } + Long followerGlobalCheckpoint = followerGlobalCheckpoints.poll(); + if (followerGlobalCheckpoint != null) { + final BulkShardOperationsResponse response = new BulkShardOperationsResponse(); + response.setGlobalCheckpoint(followerGlobalCheckpoint); + response.setMaxSeqNo(followerGlobalCheckpoint); + handler.accept(response); + } + } + + @Override + protected void innerSendShardChangesRequest(long from, int requestBatchSize, Consumer handler, + Consumer errorHandler) { + beforeSendShardChangesRequest.accept(getStatus()); + shardChangesRequests.add(new long[]{from, requestBatchSize}); + Exception readFailure = ShardFollowNodeTaskTests.this.readFailures.poll(); + if (readFailure != null) { + errorHandler.accept(readFailure); + } else if (simulateResponse.get()) { + final Translog.Operation[] operations = new Translog.Operation[requestBatchSize]; + for (int i = 0; i < requestBatchSize; i++) { + operations[i] = new Translog.NoOp(from + i, 0, "test"); + } + final ShardChangesAction.Response response = + new ShardChangesAction.Response( + mappingVersions.poll(), + leaderGlobalCheckpoints.poll(), + maxSeqNos.poll(), + operations); + handler.accept(response); + } + } + + @Override + protected boolean isStopped() { + return stopped.get(); + } + + @Override + public void markAsCompleted() { + stopped.set(true); + } + + @Override + public void markAsFailed(Exception e) { + fatalError = e; + stopped.set(true); + } + }; + } + + private static ShardChangesAction.Response generateShardChangesResponse(long fromSeqNo, long toSeqNo, long mappingVersion, + long leaderGlobalCheckPoint) { + List ops = new ArrayList<>(); + for (long seqNo = fromSeqNo; seqNo <= toSeqNo; seqNo++) { + String id = UUIDs.randomBase64UUID(); + byte[] source = "{}".getBytes(StandardCharsets.UTF_8); + ops.add(new Translog.Index("doc", id, seqNo, 0, source)); + } + return new ShardChangesAction.Response( + mappingVersion, leaderGlobalCheckPoint, leaderGlobalCheckPoint, ops.toArray(new Translog.Operation[0])); + } + + void startTask(ShardFollowNodeTask task, long leaderGlobalCheckpoint, long followerGlobalCheckpoint) { + // The call the updateMapping is a noop, so noting happens. + task.start(leaderGlobalCheckpoint, leaderGlobalCheckpoint, followerGlobalCheckpoint, followerGlobalCheckpoint); + } + + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java new file mode 100644 index 00000000000..2cd024cb03c --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -0,0 +1,274 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import com.carrotsearch.hppc.LongHashSet; +import com.carrotsearch.hppc.LongSet; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.support.replication.TransportWriteAction; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.Engine.Operation.Origin; +import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.index.replication.ESIndexLevelReplicationTestCase; +import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.ccr.CcrSettings; +import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsRequest; +import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; +import org.elasticsearch.xpack.ccr.action.bulk.TransportBulkShardOperationsAction; +import org.elasticsearch.xpack.ccr.index.engine.FollowingEngineFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.LongConsumer; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTestCase { + + public void testSimpleCcrReplication() throws Exception { + try (ReplicationGroup leaderGroup = createGroup(randomInt(2)); + ReplicationGroup followerGroup = createFollowGroup(randomInt(2))) { + leaderGroup.startAll(); + int docCount = leaderGroup.appendDocs(randomInt(64)); + leaderGroup.assertAllEqual(docCount); + followerGroup.startAll(); + ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); + final SeqNoStats leaderSeqNoStats = leaderGroup.getPrimary().seqNoStats(); + final SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); + shardFollowTask.start( + leaderSeqNoStats.getGlobalCheckpoint(), + leaderSeqNoStats.getMaxSeqNo(), + followerSeqNoStats.getGlobalCheckpoint(), + followerSeqNoStats.getMaxSeqNo()); + docCount += leaderGroup.appendDocs(randomInt(128)); + leaderGroup.syncGlobalCheckpoint(); + leaderGroup.assertAllEqual(docCount); + Set indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary()); + assertBusy(() -> { + assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); + followerGroup.assertAllEqual(indexedDocIds.size()); + }); + // Deletes should be replicated to the follower + List deleteDocIds = randomSubsetOf(indexedDocIds); + for (String deleteId : deleteDocIds) { + BulkItemResponse resp = leaderGroup.delete(new DeleteRequest(index.getName(), "type", deleteId)); + assertThat(resp.getResponse().getResult(), equalTo(DocWriteResponse.Result.DELETED)); + } + leaderGroup.syncGlobalCheckpoint(); + assertBusy(() -> { + assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); + followerGroup.assertAllEqual(indexedDocIds.size() - deleteDocIds.size()); + }); + shardFollowTask.markAsCompleted(); + assertConsistentHistoryBetweenLeaderAndFollower(leaderGroup, followerGroup); + } + } + + public void testFailLeaderReplicaShard() throws Exception { + try (ReplicationGroup leaderGroup = createGroup(1 + randomInt(1)); + ReplicationGroup followerGroup = createFollowGroup(randomInt(2))) { + leaderGroup.startAll(); + followerGroup.startAll(); + ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); + final SeqNoStats leaderSeqNoStats = leaderGroup.getPrimary().seqNoStats(); + final SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); + shardFollowTask.start( + leaderSeqNoStats.getGlobalCheckpoint(), + leaderSeqNoStats.getMaxSeqNo(), + followerSeqNoStats.getGlobalCheckpoint(), + followerSeqNoStats.getMaxSeqNo()); + int docCount = 256; + leaderGroup.appendDocs(1); + Runnable task = () -> { + try { + leaderGroup.appendDocs(docCount - 1); + leaderGroup.syncGlobalCheckpoint(); + } catch (Exception e) { + throw new AssertionError(e); + } + }; + Thread thread = new Thread(task); + thread.start(); + + // Remove and add a new replica + IndexShard luckyReplica = randomFrom(leaderGroup.getReplicas()); + leaderGroup.removeReplica(luckyReplica); + luckyReplica.close("stop replica", false); + luckyReplica.store().close(); + leaderGroup.addReplica(); + leaderGroup.startReplicas(1); + thread.join(); + + leaderGroup.assertAllEqual(docCount); + assertThat(shardFollowTask.getFailure(), nullValue()); + assertBusy(() -> followerGroup.assertAllEqual(docCount)); + shardFollowTask.markAsCompleted(); + assertConsistentHistoryBetweenLeaderAndFollower(leaderGroup, followerGroup); + } + } + + @Override + protected ReplicationGroup createGroup(int replicas, Settings settings) throws IOException { + Settings newSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, replicas) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 10000) + .put(settings) + .build(); + if (CcrSettings.CCR_FOLLOWING_INDEX_SETTING.get(newSettings)) { + IndexMetaData metaData = buildIndexMetaData(replicas, newSettings, indexMapping); + return new ReplicationGroup(metaData) { + + @Override + protected EngineFactory getEngineFactory(ShardRouting routing) { + return new FollowingEngineFactory(); + } + }; + } else { + return super.createGroup(replicas, newSettings); + } + } + + private ReplicationGroup createFollowGroup(int replicas) throws IOException { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); + return createGroup(replicas, settingsBuilder.build()); + } + + private ShardFollowNodeTask createShardFollowTask(ReplicationGroup leaderGroup, ReplicationGroup followerGroup) { + ShardFollowTask params = new ShardFollowTask(null, new ShardId("follow_index", "", 0), + new ShardId("leader_index", "", 0), between(1, 64), between(1, 8), Long.MAX_VALUE, between(1, 4), 10240, + TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10), Collections.emptyMap()); + + BiConsumer scheduler = (delay, task) -> threadPool.schedule(delay, ThreadPool.Names.GENERIC, task); + AtomicBoolean stopped = new AtomicBoolean(false); + LongSet fetchOperations = new LongHashSet(); + return new ShardFollowNodeTask( + 1L, "type", ShardFollowTask.NAME, "description", null, Collections.emptyMap(), params, scheduler, System::nanoTime) { + @Override + protected synchronized void onOperationsFetched(Translog.Operation[] operations) { + super.onOperationsFetched(operations); + for (Translog.Operation operation : operations) { + if (fetchOperations.add(operation.seqNo()) == false) { + throw new AssertionError("Operation [" + operation + " ] was fetched already"); + } + } + } + + @Override + protected void innerUpdateMapping(LongConsumer handler, Consumer errorHandler) { + // noop, as mapping updates are not tested + handler.accept(1L); + } + + @Override + protected void innerSendBulkShardOperationsRequest( + final List operations, + final Consumer handler, + final Consumer errorHandler) { + Runnable task = () -> { + BulkShardOperationsRequest request = new BulkShardOperationsRequest(params.getFollowShardId(), operations); + ActionListener listener = ActionListener.wrap(handler::accept, errorHandler); + new CCRAction(request, listener, followerGroup).execute(); + }; + threadPool.executor(ThreadPool.Names.GENERIC).execute(task); + } + + @Override + protected void innerSendShardChangesRequest(long from, int maxOperationCount, Consumer handler, + Consumer errorHandler) { + Runnable task = () -> { + List indexShards = new ArrayList<>(leaderGroup.getReplicas()); + indexShards.add(leaderGroup.getPrimary()); + Collections.shuffle(indexShards, random()); + + Exception exception = null; + for (IndexShard indexShard : indexShards) { + try { + final SeqNoStats seqNoStats = indexShard.seqNoStats(); + Translog.Operation[] ops = ShardChangesAction.getOperations(indexShard, seqNoStats.getGlobalCheckpoint(), from, + maxOperationCount, params.getMaxBatchSizeInBytes()); + // hard code mapping version; this is ok, as mapping updates are not tested here + final ShardChangesAction.Response response = + new ShardChangesAction.Response(1L, seqNoStats.getGlobalCheckpoint(), seqNoStats.getMaxSeqNo(), ops); + handler.accept(response); + return; + } catch (Exception e) { + exception = e; + } + } + assert exception != null; + errorHandler.accept(exception); + }; + threadPool.executor(ThreadPool.Names.GENERIC).execute(task); + } + + @Override + protected boolean isStopped() { + return stopped.get(); + } + + @Override + public void markAsCompleted() { + stopped.set(true); + } + + @Override + public void markAsFailed(Exception e) { + stopped.set(true); + } + + }; + } + + private void assertConsistentHistoryBetweenLeaderAndFollower(ReplicationGroup leader, ReplicationGroup follower) throws IOException { + int totalOps = leader.getPrimary().estimateNumberOfHistoryOperations("test", 0); + for (IndexShard followingShard : follower) { + assertThat(followingShard.estimateNumberOfHistoryOperations("test", 0), equalTo(totalOps)); + } + } + + class CCRAction extends ReplicationAction { + + CCRAction(BulkShardOperationsRequest request, ActionListener listener, ReplicationGroup group) { + super(request, listener, group, "ccr"); + } + + @Override + protected PrimaryResult performOnPrimary(IndexShard primary, BulkShardOperationsRequest request) throws Exception { + TransportWriteAction.WritePrimaryResult result = + TransportBulkShardOperationsAction.shardOperationOnPrimary(primary.shardId(), request.getOperations(), + primary, logger); + return new PrimaryResult(result.replicaRequest(), result.finalResponseIfSuccessful); + } + + @Override + protected void performOnReplica(BulkShardOperationsRequest request, IndexShard replica) throws Exception { + TransportBulkShardOperationsAction.applyTranslogOperations(request.getOperations(), replica, Origin.REPLICA); + } + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java new file mode 100644 index 00000000000..300794a6c00 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; +import java.util.Collections; + +public class ShardFollowTaskTests extends AbstractSerializingTestCase { + + @Override + protected ShardFollowTask doParseInstance(XContentParser parser) throws IOException { + return ShardFollowTask.fromXContent(parser); + } + + @Override + protected ShardFollowTask createTestInstance() { + return new ShardFollowTask( + randomAlphaOfLength(4), + new ShardId(randomAlphaOfLength(4), randomAlphaOfLength(4), randomInt(5)), + new ShardId(randomAlphaOfLength(4), randomAlphaOfLength(4), randomInt(5)), + randomIntBetween(1, Integer.MAX_VALUE), + randomIntBetween(1, Integer.MAX_VALUE), + randomNonNegativeLong(), + randomIntBetween(1, Integer.MAX_VALUE), + randomIntBetween(1, Integer.MAX_VALUE), + TimeValue.parseTimeValue(randomTimeValue(), ""), + TimeValue.parseTimeValue(randomTimeValue(), ""), + randomBoolean() ? null : Collections.singletonMap("key", "value")); + } + + @Override + protected Writeable.Reader instanceReader() { + return ShardFollowTask::new; + } +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternActionTests.java new file mode 100644 index 00000000000..03065ea8d38 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternActionTests.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.ccr.action.DeleteAutoFollowPatternAction.Request; +import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +public class TransportDeleteAutoFollowPatternActionTests extends ESTestCase { + + public void testInnerDelete() { + Map> existingAlreadyFollowedIndexUUIDS = new HashMap<>(); + Map existingAutoFollowPatterns = new HashMap<>(); + { + List existingPatterns = new ArrayList<>(); + existingPatterns.add("transactions-*"); + existingAutoFollowPatterns.put("eu_cluster", + new AutoFollowMetadata.AutoFollowPattern(existingPatterns, null, null, null, null, null, null, null, null)); + + List existingUUIDS = new ArrayList<>(); + existingUUIDS.add("_val"); + existingAlreadyFollowedIndexUUIDS.put("eu_cluster", existingUUIDS); + } + { + List existingPatterns = new ArrayList<>(); + existingPatterns.add("logs-*"); + existingAutoFollowPatterns.put("asia_cluster", + new AutoFollowMetadata.AutoFollowPattern(existingPatterns, null, null, null, null, null, null, null, null)); + + List existingUUIDS = new ArrayList<>(); + existingUUIDS.add("_val"); + existingAlreadyFollowedIndexUUIDS.put("asia_cluster", existingUUIDS); + } + ClusterState clusterState = ClusterState.builder(new ClusterName("us_cluster")) + .metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE, + new AutoFollowMetadata(existingAutoFollowPatterns, existingAlreadyFollowedIndexUUIDS))) + .build(); + + Request request = new Request(); + request.setLeaderClusterAlias("eu_cluster"); + AutoFollowMetadata result = TransportDeleteAutoFollowPatternAction.innerDelete(request, clusterState) + .getMetaData() + .custom(AutoFollowMetadata.TYPE); + assertThat(result.getPatterns().size(), equalTo(1)); + assertThat(result.getPatterns().get("asia_cluster"), notNullValue()); + assertThat(result.getFollowedLeaderIndexUUIDs().size(), equalTo(1)); + assertThat(result.getFollowedLeaderIndexUUIDs().get("asia_cluster"), notNullValue()); + } + + public void testInnerDeleteDoesNotExist() { + Map> existingAlreadyFollowedIndexUUIDS = new HashMap<>(); + Map existingAutoFollowPatterns = new HashMap<>(); + { + List existingPatterns = new ArrayList<>(); + existingPatterns.add("transactions-*"); + existingAutoFollowPatterns.put("eu_cluster", + new AutoFollowMetadata.AutoFollowPattern(existingPatterns, null, null, null, null, null, null, null, null)); + } + ClusterState clusterState = ClusterState.builder(new ClusterName("us_cluster")) + .metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE, + new AutoFollowMetadata(existingAutoFollowPatterns, existingAlreadyFollowedIndexUUIDS))) + .build(); + + Request request = new Request(); + request.setLeaderClusterAlias("asia_cluster"); + Exception e = expectThrows(ResourceNotFoundException.class, + () -> TransportDeleteAutoFollowPatternAction.innerDelete(request, clusterState)); + assertThat(e.getMessage(), equalTo("no auto-follow patterns for cluster alias [asia_cluster] found")); + } + + public void testInnerDeleteNoAutoFollowMetadata() { + ClusterState clusterState = ClusterState.builder(new ClusterName("us_cluster")) + .metaData(MetaData.builder()) + .build(); + + Request request = new Request(); + request.setLeaderClusterAlias("asia_cluster"); + Exception e = expectThrows(ResourceNotFoundException.class, + () -> TransportDeleteAutoFollowPatternAction.innerDelete(request, clusterState)); + assertThat(e.getMessage(), equalTo("no auto-follow patterns for cluster alias [asia_cluster] found")); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternActionTests.java new file mode 100644 index 00000000000..d894eda0b11 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternActionTests.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +public class TransportPutAutoFollowPatternActionTests extends ESTestCase { + + public void testInnerPut() { + PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request(); + request.setLeaderClusterAlias("eu_cluster"); + request.setLeaderIndexPatterns(Collections.singletonList("logs-*")); + + ClusterState localState = ClusterState.builder(new ClusterName("us_cluster")) + .metaData(MetaData.builder()) + .build(); + + ClusterState remoteState = ClusterState.builder(new ClusterName("eu_cluster")) + .metaData(MetaData.builder()) + .build(); + + ClusterState result = TransportPutAutoFollowPatternAction.innerPut(request, localState, remoteState); + AutoFollowMetadata autoFollowMetadata = result.metaData().custom(AutoFollowMetadata.TYPE); + assertThat(autoFollowMetadata, notNullValue()); + assertThat(autoFollowMetadata.getPatterns().size(), equalTo(1)); + assertThat(autoFollowMetadata.getPatterns().get("eu_cluster").getLeaderIndexPatterns().size(), equalTo(1)); + assertThat(autoFollowMetadata.getPatterns().get("eu_cluster").getLeaderIndexPatterns().get(0), equalTo("logs-*")); + assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().size(), equalTo(1)); + assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("eu_cluster").size(), equalTo(0)); + } + + public void testInnerPut_existingLeaderIndices() { + PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request(); + request.setLeaderClusterAlias("eu_cluster"); + request.setLeaderIndexPatterns(Collections.singletonList("logs-*")); + + ClusterState localState = ClusterState.builder(new ClusterName("us_cluster")) + .metaData(MetaData.builder()) + .build(); + + int numLeaderIndices = randomIntBetween(1, 8); + int numMatchingLeaderIndices = randomIntBetween(1, 8); + MetaData.Builder mdBuilder = MetaData.builder(); + for (int i = 0; i < numLeaderIndices; i++) { + mdBuilder.put(IndexMetaData.builder("transactions-" + i) + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0)); + } + for (int i = 0; i < numMatchingLeaderIndices; i++) { + mdBuilder.put(IndexMetaData.builder("logs-" + i) + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0)); + } + + ClusterState remoteState = ClusterState.builder(new ClusterName("eu_cluster")) + .metaData(mdBuilder) + .build(); + + ClusterState result = TransportPutAutoFollowPatternAction.innerPut(request, localState, remoteState); + AutoFollowMetadata autoFollowMetadata = result.metaData().custom(AutoFollowMetadata.TYPE); + assertThat(autoFollowMetadata, notNullValue()); + assertThat(autoFollowMetadata.getPatterns().size(), equalTo(1)); + assertThat(autoFollowMetadata.getPatterns().get("eu_cluster").getLeaderIndexPatterns().size(), equalTo(1)); + assertThat(autoFollowMetadata.getPatterns().get("eu_cluster").getLeaderIndexPatterns().get(0), equalTo("logs-*")); + assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().size(), equalTo(1)); + assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("eu_cluster").size(), equalTo(numMatchingLeaderIndices)); + } + + public void testInnerPut_existingLeaderIndicesAndAutoFollowMetadata() { + PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request(); + request.setLeaderClusterAlias("eu_cluster"); + request.setLeaderIndexPatterns(Arrays.asList("logs-*", "transactions-*")); + + Map existingAutoFollowPatterns = new HashMap<>(); + List existingPatterns = new ArrayList<>(); + existingPatterns.add("transactions-*"); + existingAutoFollowPatterns.put("eu_cluster", + new AutoFollowMetadata.AutoFollowPattern(existingPatterns, null, null, null, null, null, null, null, null)); + Map> existingAlreadyFollowedIndexUUIDS = new HashMap<>(); + List existingUUIDS = new ArrayList<>(); + existingUUIDS.add("_val"); + existingAlreadyFollowedIndexUUIDS.put("eu_cluster", existingUUIDS); + ClusterState localState = ClusterState.builder(new ClusterName("us_cluster")) + .metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE, + new AutoFollowMetadata(existingAutoFollowPatterns, existingAlreadyFollowedIndexUUIDS))) + .build(); + + int numLeaderIndices = randomIntBetween(1, 8); + MetaData.Builder mdBuilder = MetaData.builder(); + for (int i = 0; i < numLeaderIndices; i++) { + mdBuilder.put(IndexMetaData.builder("logs-" + i) + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0)); + } + + ClusterState remoteState = ClusterState.builder(new ClusterName("eu_cluster")) + .metaData(mdBuilder) + .build(); + + ClusterState result = TransportPutAutoFollowPatternAction.innerPut(request, localState, remoteState); + AutoFollowMetadata autoFollowMetadata = result.metaData().custom(AutoFollowMetadata.TYPE); + assertThat(autoFollowMetadata, notNullValue()); + assertThat(autoFollowMetadata.getPatterns().size(), equalTo(1)); + assertThat(autoFollowMetadata.getPatterns().get("eu_cluster").getLeaderIndexPatterns().size(), equalTo(2)); + assertThat(autoFollowMetadata.getPatterns().get("eu_cluster").getLeaderIndexPatterns().get(0), equalTo("logs-*")); + assertThat(autoFollowMetadata.getPatterns().get("eu_cluster").getLeaderIndexPatterns().get(1), equalTo("transactions-*")); + assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().size(), equalTo(1)); + assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("eu_cluster").size(), equalTo(numLeaderIndices + 1)); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java new file mode 100644 index 00000000000..4c6c0c060e4 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action.bulk; + +import org.apache.lucene.index.Term; +import org.elasticsearch.action.support.replication.TransportWriteAction; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardTestCase; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.xpack.ccr.CcrSettings; +import org.elasticsearch.xpack.ccr.index.engine.FollowingEngineFactory; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class BulkShardOperationsTests extends IndexShardTestCase { + + private static final byte[] SOURCE = "{}".getBytes(StandardCharsets.UTF_8); + + // test that we use the primary term on the follower when applying operations from the leader + public void testPrimaryTermFromFollower() throws IOException { + final Settings settings = Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true).build(); + final IndexShard followerPrimary = newStartedShard(true, settings, new FollowingEngineFactory()); + + // we use this primary on the operations yet we expect the applied operations to have the primary term of the follower + final long primaryTerm = randomLongBetween(1, Integer.MAX_VALUE); + + int numOps = randomIntBetween(0, 127); + final List operations = new ArrayList<>(randomIntBetween(0, 127)); + for (int i = 0; i < numOps; i++) { + final String id = Integer.toString(i); + final long seqNo = i; + final Translog.Operation.Type type = + randomValueOtherThan(Translog.Operation.Type.CREATE, () -> randomFrom(Translog.Operation.Type.values())); + switch (type) { + case INDEX: + operations.add(new Translog.Index("_doc", id, seqNo, primaryTerm, 0, SOURCE, null, -1)); + break; + case DELETE: + operations.add( + new Translog.Delete("_doc", id, new Term("_id", Uid.encodeId(id)), seqNo, primaryTerm, 0)); + break; + case NO_OP: + operations.add(new Translog.NoOp(seqNo, primaryTerm, "test")); + break; + default: + throw new IllegalStateException("unexpected operation type [" + type + "]"); + } + } + + final TransportWriteAction.WritePrimaryResult result = + TransportBulkShardOperationsAction.shardOperationOnPrimary(followerPrimary.shardId(), operations, followerPrimary, logger); + + try (Translog.Snapshot snapshot = followerPrimary.getHistoryOperations("test", 0)) { + assertThat(snapshot.totalOperations(), equalTo(operations.size())); + Translog.Operation operation; + while ((operation = snapshot.next()) != null) { + assertThat(operation.primaryTerm(), equalTo(followerPrimary.getOperationPrimaryTerm())); + } + } + + for (final Translog.Operation operation : result.replicaRequest().getOperations()) { + assertThat(operation.primaryTerm(), equalTo(followerPrimary.getOperationPrimaryTerm())); + } + + closeShards(followerPrimary); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java new file mode 100644 index 00000000000..e14b7513035 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.index.engine; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.ccr.CcrSettings; + +import java.util.Collections; +import java.util.concurrent.CountDownLatch; + +import static org.elasticsearch.cluster.routing.TestShardRouting.newShardRouting; +import static org.hamcrest.Matchers.equalTo; + +public class FollowEngineIndexShardTests extends IndexShardTestCase { + + public void testDoNotFillGaps() throws Exception { + Settings settings = Settings.builder() + .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) + .build(); + final IndexShard indexShard = newStartedShard(false, settings, new FollowingEngineFactory()); + + long seqNo = -1; + for (int i = 0; i < 8; i++) { + final String id = Long.toString(i); + SourceToParse sourceToParse = SourceToParse.source(indexShard.shardId().getIndexName(), "_doc", id, + new BytesArray("{}"), XContentType.JSON); + indexShard.applyIndexOperationOnReplica(++seqNo, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse); + } + long seqNoBeforeGap = seqNo; + seqNo += 8; + SourceToParse sourceToParse = SourceToParse.source(indexShard.shardId().getIndexName(), "_doc", "9", + new BytesArray("{}"), XContentType.JSON); + indexShard.applyIndexOperationOnReplica(seqNo, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse); + + // promote the replica to primary: + final ShardRouting replicaRouting = indexShard.routingEntry(); + final ShardRouting primaryRouting = + newShardRouting( + replicaRouting.shardId(), + replicaRouting.currentNodeId(), + null, + true, + ShardRoutingState.STARTED, + replicaRouting.allocationId()); + indexShard.updateShardState(primaryRouting, indexShard.getOperationPrimaryTerm() + 1, (shard, listener) -> {}, + 0L, Collections.singleton(primaryRouting.allocationId().getId()), + new IndexShardRoutingTable.Builder(primaryRouting.shardId()).addShard(primaryRouting).build(), Collections.emptySet()); + + final CountDownLatch latch = new CountDownLatch(1); + ActionListener actionListener = ActionListener.wrap(releasable -> { + releasable.close(); + latch.countDown(); + }, e -> {assert false : "expected no exception, but got [" + e.getMessage() + "]";}); + indexShard.acquirePrimaryOperationPermit(actionListener, ThreadPool.Names.GENERIC, ""); + latch.await(); + assertThat(indexShard.getLocalCheckpoint(), equalTo(seqNoBeforeGap)); + indexShard.refresh("test"); + assertThat(indexShard.docStats().getCount(), equalTo(9L)); + closeShards(indexShard); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java new file mode 100644 index 00000000000..b3e2d12227b --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java @@ -0,0 +1,323 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.index.engine; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.store.Directory; +import org.elasticsearch.Version; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.codec.CodecService; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineConfig; +import org.elasticsearch.index.engine.EngineTestCase; +import org.elasticsearch.index.engine.TranslogHandler; +import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.TranslogConfig; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.test.DummyShardLock; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; + +public class FollowingEngineTests extends ESTestCase { + + private ThreadPool threadPool; + private Index index; + private ShardId shardId; + private AtomicLong primaryTerm = new AtomicLong(); + + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("following-engine-tests"); + index = new Index("index", "uuid"); + shardId = new ShardId(index, 0); + primaryTerm.set(randomLongBetween(1, Long.MAX_VALUE)); + } + + public void tearDown() throws Exception { + terminate(threadPool); + super.tearDown(); + } + + public void testFollowingEngineRejectsNonFollowingIndex() throws IOException { + final Settings.Builder builder = + Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT); + if (randomBoolean()) { + builder.put("index.xpack.ccr.following_index", false); + } + final Settings settings = builder.build(); + final IndexMetaData indexMetaData = IndexMetaData.builder(index.getName()).settings(settings).build(); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); + try (Store store = createStore(shardId, indexSettings, newDirectory())) { + final EngineConfig engineConfig = engineConfig(shardId, indexSettings, threadPool, store, logger, xContentRegistry()); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new FollowingEngine(engineConfig)); + assertThat(e, hasToString(containsString("a following engine can not be constructed for a non-following index"))); + } + } + + public void testIndexSeqNoIsMaintained() throws IOException { + final long seqNo = randomIntBetween(0, Integer.MAX_VALUE); + runIndexTest( + seqNo, + Engine.Operation.Origin.PRIMARY, + (followingEngine, index) -> { + final Engine.IndexResult result = followingEngine.index(index); + assertThat(result.getSeqNo(), equalTo(seqNo)); + }); + } + + /* + * A following engine (whether or not it is an engine for a primary or replica shard) needs to maintain ordering semantics as the + * operations presented to it can arrive out of order (while a leader engine that is for a primary shard dictates the order). This test + * ensures that these semantics are maintained. + */ + public void testOutOfOrderDocuments() throws IOException { + final Settings settings = + Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT) + .put("index.xpack.ccr.following_index", true) + .build(); + final IndexMetaData indexMetaData = IndexMetaData.builder(index.getName()).settings(settings).build(); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); + try (Store store = createStore(shardId, indexSettings, newDirectory())) { + final EngineConfig engineConfig = engineConfig(shardId, indexSettings, threadPool, store, logger, xContentRegistry()); + try (FollowingEngine followingEngine = createEngine(store, engineConfig)) { + final VersionType versionType = + randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL, VersionType.EXTERNAL_GTE, VersionType.FORCE); + final List ops = EngineTestCase.generateSingleDocHistory(true, versionType, 2, 2, 20, "id"); + EngineTestCase.assertOpsOnReplica(ops, followingEngine, true, logger); + } + } + } + + public void runIndexTest( + final long seqNo, + final Engine.Operation.Origin origin, + final CheckedBiConsumer consumer) throws IOException { + final Settings settings = + Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT) + .put("index.xpack.ccr.following_index", true) + .build(); + final IndexMetaData indexMetaData = IndexMetaData.builder(index.getName()).settings(settings).build(); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); + try (Store store = createStore(shardId, indexSettings, newDirectory())) { + final EngineConfig engineConfig = engineConfig(shardId, indexSettings, threadPool, store, logger, xContentRegistry()); + try (FollowingEngine followingEngine = createEngine(store, engineConfig)) { + final Engine.Index index = createIndexOp("id", seqNo, origin); + consumer.accept(followingEngine, index); + } + } + } + + public void testDeleteSeqNoIsMaintained() throws IOException { + final long seqNo = randomIntBetween(0, Integer.MAX_VALUE); + runDeleteTest( + seqNo, + Engine.Operation.Origin.PRIMARY, + (followingEngine, delete) -> { + final Engine.DeleteResult result = followingEngine.delete(delete); + assertThat(result.getSeqNo(), equalTo(seqNo)); + }); + } + + public void runDeleteTest( + final long seqNo, + final Engine.Operation.Origin origin, + final CheckedBiConsumer consumer) throws IOException { + final Settings settings = + Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT) + .put("index.xpack.ccr.following_index", true) + .build(); + final IndexMetaData indexMetaData = IndexMetaData.builder(index.getName()).settings(settings).build(); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); + try (Store store = createStore(shardId, indexSettings, newDirectory())) { + final EngineConfig engineConfig = engineConfig(shardId, indexSettings, threadPool, store, logger, xContentRegistry()); + try (FollowingEngine followingEngine = createEngine(store, engineConfig)) { + final String id = "id"; + final Engine.Delete delete = new Engine.Delete( + "type", + id, + new Term("_id", id), + seqNo, + primaryTerm.get(), + randomNonNegativeLong(), + VersionType.EXTERNAL, + origin, + System.currentTimeMillis()); + + consumer.accept(followingEngine, delete); + } + } + } + + public void testDoNotFillSeqNoGaps() throws Exception { + final Settings settings = + Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT) + .put("index.xpack.ccr.following_index", true) + .build(); + final IndexMetaData indexMetaData = IndexMetaData.builder(index.getName()).settings(settings).build(); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); + try (Store store = createStore(shardId, indexSettings, newDirectory())) { + final EngineConfig engineConfig = engineConfig(shardId, indexSettings, threadPool, store, logger, xContentRegistry()); + try (FollowingEngine followingEngine = createEngine(store, engineConfig)) { + followingEngine.index(createIndexOp("id", 128, Engine.Operation.Origin.PRIMARY)); + int addedNoops = followingEngine.fillSeqNoGaps(primaryTerm.get()); + assertThat(addedNoops, equalTo(0)); + } + } + } + + private EngineConfig engineConfig( + final ShardId shardId, + final IndexSettings indexSettings, + final ThreadPool threadPool, + final Store store, + final Logger logger, + final NamedXContentRegistry xContentRegistry) throws IOException { + final IndexWriterConfig indexWriterConfig = newIndexWriterConfig(); + final Path translogPath = createTempDir("translog"); + final TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); + return new EngineConfig( + shardId, + "allocation-id", + threadPool, + indexSettings, + null, + store, + newMergePolicy(), + indexWriterConfig.getAnalyzer(), + indexWriterConfig.getSimilarity(), + new CodecService(null, logger), + new Engine.EventListener() { + @Override + public void onFailedEngine(String reason, Exception e) { + + } + }, + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + translogConfig, + TimeValue.timeValueMinutes(5), + Collections.emptyList(), + Collections.emptyList(), + null, + new NoneCircuitBreakerService(), + () -> SequenceNumbers.NO_OPS_PERFORMED, + () -> primaryTerm.get(), + EngineTestCase.tombstoneDocSupplier() + ); + } + + private static Store createStore( + final ShardId shardId, final IndexSettings indexSettings, final Directory directory) { + return new Store(shardId, indexSettings, directory, new DummyShardLock(shardId)); + } + + private FollowingEngine createEngine(Store store, EngineConfig config) throws IOException { + store.createEmpty(); + final String translogUuid = Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), + SequenceNumbers.NO_OPS_PERFORMED, shardId, 1L); + store.associateIndexWithNewTranslog(translogUuid); + FollowingEngine followingEngine = new FollowingEngine(config); + TranslogHandler translogHandler = new TranslogHandler(xContentRegistry(), config.getIndexSettings()); + followingEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); + return followingEngine; + } + + private Engine.Index createIndexOp(String id, long seqNo, Engine.Operation.Origin origin) { + final Field uidField = new Field("_id", id, IdFieldMapper.Defaults.FIELD_TYPE); + final String type = "type"; + final Field versionField = new NumericDocValuesField("_version", 0); + final SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); + final ParseContext.Document document = new ParseContext.Document(); + document.add(uidField); + document.add(versionField); + document.add(seqID.seqNo); + document.add(seqID.seqNoDocValue); + document.add(seqID.primaryTerm); + final BytesReference source = new BytesArray(new byte[]{1}); + final ParsedDocument parsedDocument = new ParsedDocument( + versionField, + seqID, + id, + type, + "routing", + Collections.singletonList(document), + source, + XContentType.JSON, + null); + + final long version; + final long autoGeneratedIdTimestamp; + if (randomBoolean()) { + version = 1; + autoGeneratedIdTimestamp = System.currentTimeMillis(); + } else { + version = randomNonNegativeLong(); + autoGeneratedIdTimestamp = IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP; + } + return new Engine.Index( + new Term("_id", parsedDocument.id()), + parsedDocument, + seqNo, + primaryTerm.get(), + version, + VersionType.EXTERNAL, + origin, + System.currentTimeMillis(), + autoGeneratedIdTimestamp, + randomBoolean()); + } + +} diff --git a/x-pack/plugin/core/build.gradle b/x-pack/plugin/core/build.gradle index a3b4bea9702..a58500b880f 100644 --- a/x-pack/plugin/core/build.gradle +++ b/x-pack/plugin/core/build.gradle @@ -8,7 +8,6 @@ import java.nio.file.StandardCopyOption apply plugin: 'elasticsearch.esplugin' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' -apply plugin: 'com.github.johnrengelman.shadow' archivesBaseName = 'x-pack-core' @@ -27,19 +26,18 @@ dependencyLicenses { dependencies { compileOnly "org.elasticsearch:elasticsearch:${version}" - compile project(':x-pack:protocol') - shadow "org.apache.httpcomponents:httpclient:${versions.httpclient}" - shadow "org.apache.httpcomponents:httpcore:${versions.httpcore}" - shadow "org.apache.httpcomponents:httpcore-nio:${versions.httpcore}" - shadow "org.apache.httpcomponents:httpasyncclient:${versions.httpasyncclient}" + compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" + compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" + compile "org.apache.httpcomponents:httpcore-nio:${versions.httpcore}" + compile "org.apache.httpcomponents:httpasyncclient:${versions.httpasyncclient}" - shadow "commons-logging:commons-logging:${versions.commonslogging}" - shadow "commons-codec:commons-codec:${versions.commonscodec}" + compile "commons-logging:commons-logging:${versions.commonslogging}" + compile "commons-codec:commons-codec:${versions.commonscodec}" // security deps - shadow 'com.unboundid:unboundid-ldapsdk:3.2.0' - shadow project(path: ':modules:transport-netty4', configuration: 'runtime') - shadow(project(path: ':plugins:transport-nio', configuration: 'runtime')) { + compile 'com.unboundid:unboundid-ldapsdk:3.2.0' + compile project(path: ':modules:transport-netty4', configuration: 'runtime') + compile(project(path: ':plugins:transport-nio', configuration: 'runtime')) { // TODO: core exclusion should not be necessary, since it is a transitive dep of all plugins exclude group: "org.elasticsearch", module: "elasticsearch-core" } @@ -112,8 +110,7 @@ test { // TODO: don't publish test artifacts just to run messy tests, fix the tests! // https://github.com/elastic/x-plugins/issues/724 configurations { - testArtifacts.extendsFrom(testRuntime, shadow) - testArtifacts.exclude(group: project(':x-pack:protocol').group, module: project(':x-pack:protocol').name) + testArtifacts.extendsFrom testRuntime } task testJar(type: Jar) { appendix 'test' diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java index e0874389243..0619aef6961 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java @@ -120,7 +120,7 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste super(settings); this.clusterService = clusterService; this.clock = clock; - this.scheduler = new SchedulerEngine(clock); + this.scheduler = new SchedulerEngine(settings, clock); this.licenseState = licenseState; this.operationModeFileWatcher = new OperationModeFileWatcher(resourceWatcherService, XPackPlugin.resolveConfigFile(env, "license_mode"), logger, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java new file mode 100644 index 00000000000..e7460d5a2eb --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java @@ -0,0 +1,282 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.license; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.protocol.xpack.XPackInfoRequest; +import org.elasticsearch.protocol.xpack.XPackInfoResponse; +import org.elasticsearch.protocol.xpack.license.LicenseStatus; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.xpack.core.action.XPackInfoAction; + +import java.util.EnumSet; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +/** + * Checks remote clusters for license compatibility with a specified license predicate. + */ +public final class RemoteClusterLicenseChecker { + + /** + * Encapsulates the license info of a remote cluster. + */ + public static final class RemoteClusterLicenseInfo { + + private final String clusterAlias; + + /** + * The alias of the remote cluster. + * + * @return the cluster alias + */ + public String clusterAlias() { + return clusterAlias; + } + + private final XPackInfoResponse.LicenseInfo licenseInfo; + + /** + * The license info of the remote cluster. + * + * @return the license info + */ + public XPackInfoResponse.LicenseInfo licenseInfo() { + return licenseInfo; + } + + RemoteClusterLicenseInfo(final String clusterAlias, final XPackInfoResponse.LicenseInfo licenseInfo) { + this.clusterAlias = clusterAlias; + this.licenseInfo = licenseInfo; + } + + } + + /** + * Encapsulates a remote cluster license check. The check is either successful if the license of the remote cluster is compatible with + * the predicate used to check license compatibility, or the check is a failure. + */ + public static final class LicenseCheck { + + private final RemoteClusterLicenseInfo remoteClusterLicenseInfo; + + /** + * The remote cluster license info. This method should only be invoked if this instance represents a failing license check. + * + * @return the remote cluster license info + */ + public RemoteClusterLicenseInfo remoteClusterLicenseInfo() { + assert isSuccess() == false; + return remoteClusterLicenseInfo; + } + + private static final LicenseCheck SUCCESS = new LicenseCheck(null); + + /** + * A successful license check. + * + * @return a successful license check instance + */ + public static LicenseCheck success() { + return SUCCESS; + } + + /** + * Test if this instance represents a successful license check. + * + * @return true if this instance represents a successful license check, otherwise false + */ + public boolean isSuccess() { + return this == SUCCESS; + } + + /** + * Creates a failing license check encapsulating the specified remote cluster license info. + * + * @param remoteClusterLicenseInfo the remote cluster license info + * @return a failing license check + */ + public static LicenseCheck failure(final RemoteClusterLicenseInfo remoteClusterLicenseInfo) { + return new LicenseCheck(remoteClusterLicenseInfo); + } + + private LicenseCheck(final RemoteClusterLicenseInfo remoteClusterLicenseInfo) { + this.remoteClusterLicenseInfo = remoteClusterLicenseInfo; + } + + } + + private final Client client; + private final Predicate predicate; + + /** + * Constructs a remote cluster license checker with the specified license predicate for checking license compatibility. The predicate + * does not need to check for the active license state as this is handled by the remote cluster license checker. + * + * @param client the client + * @param predicate the license predicate + */ + public RemoteClusterLicenseChecker(final Client client, final Predicate predicate) { + this.client = client; + this.predicate = predicate; + } + + public static boolean isLicensePlatinumOrTrial(final XPackInfoResponse.LicenseInfo licenseInfo) { + final License.OperationMode mode = License.OperationMode.resolve(licenseInfo.getMode()); + return mode == License.OperationMode.PLATINUM || mode == License.OperationMode.TRIAL; + } + + /** + * Checks the specified clusters for license compatibility. The specified callback will be invoked once if all clusters are + * license-compatible, otherwise the specified callback will be invoked once on the first cluster that is not license-compatible. + * + * @param clusterAliases the cluster aliases to check + * @param listener a callback + */ + public void checkRemoteClusterLicenses(final List clusterAliases, final ActionListener listener) { + final Iterator clusterAliasesIterator = clusterAliases.iterator(); + if (clusterAliasesIterator.hasNext() == false) { + listener.onResponse(LicenseCheck.success()); + return; + } + + final AtomicReference clusterAlias = new AtomicReference<>(); + + final ActionListener infoListener = new ActionListener() { + + @Override + public void onResponse(final XPackInfoResponse xPackInfoResponse) { + final XPackInfoResponse.LicenseInfo licenseInfo = xPackInfoResponse.getLicenseInfo(); + if ((licenseInfo.getStatus() == LicenseStatus.ACTIVE) == false + || predicate.test(License.OperationMode.resolve(licenseInfo.getMode())) == false) { + listener.onResponse(LicenseCheck.failure(new RemoteClusterLicenseInfo(clusterAlias.get(), licenseInfo))); + return; + } + + if (clusterAliasesIterator.hasNext()) { + clusterAlias.set(clusterAliasesIterator.next()); + // recurse to the next cluster + remoteClusterLicense(clusterAlias.get(), this); + } else { + listener.onResponse(LicenseCheck.success()); + } + } + + @Override + public void onFailure(final Exception e) { + final String message = "could not determine the license type for cluster [" + clusterAlias.get() + "]"; + listener.onFailure(new ElasticsearchException(message, e)); + } + + }; + + // check the license on the first cluster, and then we recursively check licenses on the remaining clusters + clusterAlias.set(clusterAliasesIterator.next()); + remoteClusterLicense(clusterAlias.get(), infoListener); + } + + private void remoteClusterLicense(final String clusterAlias, final ActionListener listener) { + final ThreadContext threadContext = client.threadPool().getThreadContext(); + final ContextPreservingActionListener contextPreservingActionListener = + new ContextPreservingActionListener<>(threadContext.newRestorableContext(false), listener); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + // we stash any context here since this is an internal execution and should not leak any existing context information + threadContext.markAsSystemContext(); + + final XPackInfoRequest request = new XPackInfoRequest(); + request.setCategories(EnumSet.of(XPackInfoRequest.Category.LICENSE)); + try { + client.getRemoteClusterClient(clusterAlias).execute(XPackInfoAction.INSTANCE, request, contextPreservingActionListener); + } catch (final Exception e) { + contextPreservingActionListener.onFailure(e); + } + } + } + + /** + * Predicate to test if the index name represents the name of a remote index. + * + * @param index the index name + * @return true if the collection of indices contains a remote index, otherwise false + */ + public static boolean isRemoteIndex(final String index) { + return index.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR) != -1; + } + + /** + * Predicate to test if the collection of index names contains any that represent the name of a remote index. + * + * @param indices the collection of index names + * @return true if the collection of index names contains a name that represents a remote index, otherwise false + */ + public static boolean containsRemoteIndex(final List indices) { + return indices.stream().anyMatch(RemoteClusterLicenseChecker::isRemoteIndex); + } + + /** + * Filters the collection of index names for names that represent a remote index. Remote index names are of the form + * {@code cluster_name:index_name}. + * + * @param indices the collection of index names + * @return list of index names that represent remote index names + */ + public static List remoteIndices(final List indices) { + return indices.stream().filter(RemoteClusterLicenseChecker::isRemoteIndex).collect(Collectors.toList()); + } + + /** + * Extract the list of remote cluster aliases from the list of index names. Remote index names are of the form + * {@code cluster_alias:index_name} and the cluster_alias is extracted for each index name that represents a remote index. + * + * @param indices the collection of index names + * @return the remote cluster names + */ + public static List remoteClusterAliases(final List indices) { + return indices.stream() + .filter(RemoteClusterLicenseChecker::isRemoteIndex) + .map(index -> index.substring(0, index.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR))) + .distinct() + .collect(Collectors.toList()); + } + + /** + * Constructs an error message for license incompatibility. + * + * @param feature the name of the feature that initiated the remote cluster license check. + * @param remoteClusterLicenseInfo the remote cluster license info of the cluster that failed the license check + * @return an error message representing license incompatibility + */ + public static String buildErrorMessage( + final String feature, + final RemoteClusterLicenseInfo remoteClusterLicenseInfo, + final Predicate predicate) { + final StringBuilder error = new StringBuilder(); + if (remoteClusterLicenseInfo.licenseInfo().getStatus() != LicenseStatus.ACTIVE) { + error.append(String.format(Locale.ROOT, "the license on cluster [%s] is not active", remoteClusterLicenseInfo.clusterAlias())); + } else { + assert predicate.test(remoteClusterLicenseInfo.licenseInfo()) == false : "license must be incompatible to build error message"; + final String message = String.format( + Locale.ROOT, + "the license mode [%s] on cluster [%s] does not enable [%s]", + License.OperationMode.resolve(remoteClusterLicenseInfo.licenseInfo().getMode()), + remoteClusterLicenseInfo.clusterAlias(), + feature); + error.append(message); + } + + return error.toString(); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index 722c9d0e711..37176803d4f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -410,10 +410,20 @@ public class XPackLicenseState { */ public boolean isCustomRoleProvidersAllowed() { final Status localStatus = status; - return (localStatus.mode == OperationMode.PLATINUM || localStatus.mode == OperationMode.TRIAL ) + return (localStatus.mode == OperationMode.PLATINUM || localStatus.mode == OperationMode.TRIAL) && localStatus.active; } + /** + * @return whether "authorization_realms" are allowed based on the license {@link OperationMode} + * @see org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings + */ + public boolean isAuthorizationRealmAllowed() { + final Status localStatus = status; + return (localStatus.mode == OperationMode.PLATINUM || localStatus.mode == OperationMode.TRIAL) + && localStatus.active; + } + /** * Determine if Watcher is available based on the current license. *

    @@ -514,13 +524,13 @@ public class XPackLicenseState { * {@code false}. */ public boolean isMachineLearningAllowed() { - // status is volatile - Status localStatus = status; - OperationMode operationMode = localStatus.mode; + // one-time volatile read as status could be updated on us while performing this check + final Status currentStatus = status; + return currentStatus.active && isMachineLearningAllowedForOperationMode(currentStatus.mode); + } - boolean licensed = operationMode == OperationMode.TRIAL || operationMode == OperationMode.PLATINUM; - - return licensed && localStatus.active; + public static boolean isMachineLearningAllowedForOperationMode(final OperationMode operationMode) { + return isPlatinumOrTrialOperationMode(operationMode); } /** @@ -612,4 +622,30 @@ public class XPackLicenseState { final OperationMode mode = status.mode; return mode == OperationMode.TRIAL ? (isSecurityExplicitlyEnabled || isSecurityEnabledByTrialVersion) : isSecurityEnabled; } + + /** + * Determine if cross-cluster replication should be enabled. + *

    + * Cross-cluster replication is only disabled when the license has expired or if the mode is not: + *

      + *
    • {@link OperationMode#PLATINUM}
    • + *
    • {@link OperationMode#TRIAL}
    • + *
    + * + * @return true is the license is compatible, otherwise false + */ + public boolean isCcrAllowed() { + // one-time volatile read as status could be updated on us while performing this check + final Status currentStatus = status; + return currentStatus.active && isCcrAllowedForOperationMode(currentStatus.mode); + } + + public static boolean isCcrAllowedForOperationMode(final OperationMode operationMode) { + return isPlatinumOrTrialOperationMode(operationMode); + } + + public static boolean isPlatinumOrTrialOperationMode(final OperationMode operationMode) { + return operationMode == OperationMode.PLATINUM || operationMode == OperationMode.TRIAL; + } + } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java new file mode 100644 index 00000000000..41f066daf93 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.Locale; + +/** + * Fetch information about X-Pack from the cluster. + */ +public class XPackInfoRequest extends ActionRequest { + + public enum Category { + BUILD, LICENSE, FEATURES; + + public static EnumSet toSet(String... categories) { + EnumSet set = EnumSet.noneOf(Category.class); + for (String category : categories) { + switch (category) { + case "_all": + return EnumSet.allOf(Category.class); + case "_none": + return EnumSet.noneOf(Category.class); + default: + set.add(Category.valueOf(category.toUpperCase(Locale.ROOT))); + } + } + return set; + } + } + + private boolean verbose; + private EnumSet categories = EnumSet.noneOf(Category.class); + + public XPackInfoRequest() {} + + public void setVerbose(boolean verbose) { + this.verbose = verbose; + } + + public boolean isVerbose() { + return verbose; + } + + public void setCategories(EnumSet categories) { + this.categories = categories; + } + + public EnumSet getCategories() { + return categories; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + this.verbose = in.readBoolean(); + EnumSet categories = EnumSet.noneOf(Category.class); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + categories.add(Category.valueOf(in.readString())); + } + this.categories = categories; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(verbose); + out.writeVInt(categories.size()); + for (Category category : categories) { + out.writeString(category.name()); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java new file mode 100644 index 00000000000..b51a451a67f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java @@ -0,0 +1,483 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.license.LicenseStatus; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class XPackInfoResponse extends ActionResponse implements ToXContentObject { + /** + * Value of the license's expiration time if it should never expire. + */ + public static final long BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS = Long.MAX_VALUE - TimeUnit.HOURS.toMillis(24 * 365); + // TODO move this constant to License.java once we move License.java to the protocol jar + + @Nullable private BuildInfo buildInfo; + @Nullable private LicenseInfo licenseInfo; + @Nullable private FeatureSetsInfo featureSetsInfo; + + public XPackInfoResponse() {} + + public XPackInfoResponse(@Nullable BuildInfo buildInfo, @Nullable LicenseInfo licenseInfo, @Nullable FeatureSetsInfo featureSetsInfo) { + this.buildInfo = buildInfo; + this.licenseInfo = licenseInfo; + this.featureSetsInfo = featureSetsInfo; + } + + /** + * @return The build info (incl. build hash and timestamp) + */ + public BuildInfo getBuildInfo() { + return buildInfo; + } + + /** + * @return The current license info (incl. UID, type/mode. status and expiry date). May return {@code null} when no + * license is currently installed. + */ + public LicenseInfo getLicenseInfo() { + return licenseInfo; + } + + /** + * @return The current status of the feature sets in X-Pack. Feature sets describe the features available/enabled in X-Pack. + */ + public FeatureSetsInfo getFeatureSetsInfo() { + return featureSetsInfo; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalWriteable(buildInfo); + out.writeOptionalWriteable(licenseInfo); + out.writeOptionalWriteable(featureSetsInfo); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + this.buildInfo = in.readOptionalWriteable(BuildInfo::new); + this.licenseInfo = in.readOptionalWriteable(LicenseInfo::new); + this.featureSetsInfo = in.readOptionalWriteable(FeatureSetsInfo::new); + } + + @Override + public boolean equals(Object other) { + if (other == null || other.getClass() != getClass()) return false; + if (this == other) return true; + XPackInfoResponse rhs = (XPackInfoResponse) other; + return Objects.equals(buildInfo, rhs.buildInfo) + && Objects.equals(licenseInfo, rhs.licenseInfo) + && Objects.equals(featureSetsInfo, rhs.featureSetsInfo); + } + + @Override + public int hashCode() { + return Objects.hash(buildInfo, licenseInfo, featureSetsInfo); + } + + @Override + public String toString() { + return Strings.toString(this, true, false); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "xpack_info_response", true, (a, v) -> { + BuildInfo buildInfo = (BuildInfo) a[0]; + LicenseInfo licenseInfo = (LicenseInfo) a[1]; + @SuppressWarnings("unchecked") // This is how constructing object parser works + List featureSets = (List) a[2]; + FeatureSetsInfo featureSetsInfo = featureSets == null ? null : new FeatureSetsInfo(new HashSet<>(featureSets)); + return new XPackInfoResponse(buildInfo, licenseInfo, featureSetsInfo); + }); + static { + PARSER.declareObject(optionalConstructorArg(), BuildInfo.PARSER, new ParseField("build")); + /* + * licenseInfo is sort of "double optional" because it is + * optional but it can also be send as `null`. + */ + PARSER.declareField(optionalConstructorArg(), (p, v) -> { + if (p.currentToken() == XContentParser.Token.VALUE_NULL) { + return null; + } + return LicenseInfo.PARSER.parse(p, v); + }, + new ParseField("license"), ValueType.OBJECT_OR_NULL); + PARSER.declareNamedObjects(optionalConstructorArg(), + (p, c, name) -> FeatureSetsInfo.FeatureSet.PARSER.parse(p, name), + new ParseField("features")); + } + public static XPackInfoResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + if (buildInfo != null) { + builder.field("build", buildInfo, params); + } + + EnumSet categories = XPackInfoRequest.Category + .toSet(Strings.splitStringByCommaToArray(params.param("categories", "_all"))); + if (licenseInfo != null) { + builder.field("license", licenseInfo, params); + } else if (categories.contains(XPackInfoRequest.Category.LICENSE)) { + // if the user requested the license info, and there is no license, we should send + // back an explicit null value (indicating there is no license). This is different + // than not adding the license info at all + builder.nullField("license"); + } + + if (featureSetsInfo != null) { + builder.field("features", featureSetsInfo, params); + } + + if (params.paramAsBoolean("human", true)) { + builder.field("tagline", "You know, for X"); + } + + return builder.endObject(); + } + + public static class LicenseInfo implements ToXContentObject, Writeable { + private final String uid; + private final String type; + private final String mode; + private final LicenseStatus status; + private final long expiryDate; + + public LicenseInfo(String uid, String type, String mode, LicenseStatus status, long expiryDate) { + this.uid = uid; + this.type = type; + this.mode = mode; + this.status = status; + this.expiryDate = expiryDate; + } + + public LicenseInfo(StreamInput in) throws IOException { + this(in.readString(), in.readString(), in.readString(), LicenseStatus.readFrom(in), in.readLong()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(uid); + out.writeString(type); + out.writeString(mode); + status.writeTo(out); + out.writeLong(expiryDate); + } + + public String getUid() { + return uid; + } + + public String getType() { + return type; + } + + public String getMode() { + return mode; + } + + public long getExpiryDate() { + return expiryDate; + } + + public LicenseStatus getStatus() { + return status; + } + + @Override + public boolean equals(Object other) { + if (other == null || other.getClass() != getClass()) return false; + if (this == other) return true; + LicenseInfo rhs = (LicenseInfo) other; + return Objects.equals(uid, rhs.uid) + && Objects.equals(type, rhs.type) + && Objects.equals(mode, rhs.mode) + && Objects.equals(status, rhs.status) + && expiryDate == rhs.expiryDate; + } + + @Override + public int hashCode() { + return Objects.hash(uid, type, mode, status, expiryDate); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "license_info", true, (a, v) -> { + String uid = (String) a[0]; + String type = (String) a[1]; + String mode = (String) a[2]; + LicenseStatus status = LicenseStatus.fromString((String) a[3]); + Long expiryDate = (Long) a[4]; + long primitiveExpiryDate = expiryDate == null ? BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS : expiryDate; + return new LicenseInfo(uid, type, mode, status, primitiveExpiryDate); + }); + static { + PARSER.declareString(constructorArg(), new ParseField("uid")); + PARSER.declareString(constructorArg(), new ParseField("type")); + PARSER.declareString(constructorArg(), new ParseField("mode")); + PARSER.declareString(constructorArg(), new ParseField("status")); + PARSER.declareLong(optionalConstructorArg(), new ParseField("expiry_date_in_millis")); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject() + .field("uid", uid) + .field("type", type) + .field("mode", mode) + .field("status", status.label()); + if (expiryDate != BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS) { + builder.timeField("expiry_date_in_millis", "expiry_date", expiryDate); + } + return builder.endObject(); + } + } + + public static class BuildInfo implements ToXContentObject, Writeable { + private final String hash; + private final String timestamp; + + public BuildInfo(String hash, String timestamp) { + this.hash = hash; + this.timestamp = timestamp; + } + + public BuildInfo(StreamInput input) throws IOException { + this(input.readString(), input.readString()); + } + + @Override + public void writeTo(StreamOutput output) throws IOException { + output.writeString(hash); + output.writeString(timestamp); + } + + public String getHash() { + return hash; + } + + public String getTimestamp() { + return timestamp; + } + + @Override + public boolean equals(Object other) { + if (other == null || other.getClass() != getClass()) return false; + if (this == other) return true; + BuildInfo rhs = (BuildInfo) other; + return Objects.equals(hash, rhs.hash) + && Objects.equals(timestamp, rhs.timestamp); + } + + @Override + public int hashCode() { + return Objects.hash(hash, timestamp); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "build_info", true, (a, v) -> new BuildInfo((String) a[0], (String) a[1])); + static { + PARSER.declareString(constructorArg(), new ParseField("hash")); + PARSER.declareString(constructorArg(), new ParseField("date")); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field("hash", hash) + .field("date", timestamp) + .endObject(); + } + } + + public static class FeatureSetsInfo implements ToXContentObject, Writeable { + private final Map featureSets; + + public FeatureSetsInfo(Set featureSets) { + Map map = new HashMap<>(featureSets.size()); + for (FeatureSet featureSet : featureSets) { + map.put(featureSet.name, featureSet); + } + this.featureSets = Collections.unmodifiableMap(map); + } + + public FeatureSetsInfo(StreamInput in) throws IOException { + int size = in.readVInt(); + Map featureSets = new HashMap<>(size); + for (int i = 0; i < size; i++) { + FeatureSet featureSet = new FeatureSet(in); + featureSets.put(featureSet.name, featureSet); + } + this.featureSets = Collections.unmodifiableMap(featureSets); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(featureSets.size()); + for (FeatureSet featureSet : featureSets.values()) { + featureSet.writeTo(out); + } + } + + public Map getFeatureSets() { + return featureSets; + } + + @Override + public boolean equals(Object other) { + if (other == null || other.getClass() != getClass()) return false; + if (this == other) return true; + FeatureSetsInfo rhs = (FeatureSetsInfo) other; + return Objects.equals(featureSets, rhs.featureSets); + } + + @Override + public int hashCode() { + return Objects.hash(featureSets); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + List names = new ArrayList<>(this.featureSets.keySet()).stream().sorted().collect(Collectors.toList()); + for (String name : names) { + builder.field(name, featureSets.get(name), params); + } + return builder.endObject(); + } + + public static class FeatureSet implements ToXContentObject, Writeable { + private final String name; + @Nullable private final String description; + private final boolean available; + private final boolean enabled; + @Nullable private final Map nativeCodeInfo; + + public FeatureSet(String name, @Nullable String description, boolean available, boolean enabled, + @Nullable Map nativeCodeInfo) { + this.name = name; + this.description = description; + this.available = available; + this.enabled = enabled; + this.nativeCodeInfo = nativeCodeInfo; + } + + public FeatureSet(StreamInput in) throws IOException { + this(in.readString(), in.readOptionalString(), in.readBoolean(), in.readBoolean(), in.readMap()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeOptionalString(description); + out.writeBoolean(available); + out.writeBoolean(enabled); + out.writeMap(nativeCodeInfo); + } + + public String name() { + return name; + } + + @Nullable + public String description() { + return description; + } + + public boolean available() { + return available; + } + + public boolean enabled() { + return enabled; + } + + @Nullable + public Map nativeCodeInfo() { + return nativeCodeInfo; + } + + @Override + public boolean equals(Object other) { + if (other == null || other.getClass() != getClass()) return false; + if (this == other) return true; + FeatureSet rhs = (FeatureSet) other; + return Objects.equals(name, rhs.name) + && Objects.equals(description, rhs.description) + && available == rhs.available + && enabled == rhs.enabled + && Objects.equals(nativeCodeInfo, rhs.nativeCodeInfo); + } + + @Override + public int hashCode() { + return Objects.hash(name, description, available, enabled, nativeCodeInfo); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "feature_set", true, (a, name) -> { + String description = (String) a[0]; + boolean available = (Boolean) a[1]; + boolean enabled = (Boolean) a[2]; + @SuppressWarnings("unchecked") // Matches up with declaration below + Map nativeCodeInfo = (Map) a[3]; + return new FeatureSet(name, description, available, enabled, nativeCodeInfo); + }); + static { + PARSER.declareString(optionalConstructorArg(), new ParseField("description")); + PARSER.declareBoolean(constructorArg(), new ParseField("available")); + PARSER.declareBoolean(constructorArg(), new ParseField("enabled")); + PARSER.declareObject(optionalConstructorArg(), (p, name) -> p.map(), new ParseField("native_code_info")); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (description != null) { + builder.field("description", description); + } + builder.field("available", available); + builder.field("enabled", enabled); + if (nativeCodeInfo != null) { + builder.field("native_code_info", nativeCodeInfo); + } + return builder.endObject(); + } + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageRequest.java new file mode 100644 index 00000000000..83621a9ac3d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageRequest.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.MasterNodeRequest; + +public class XPackUsageRequest extends MasterNodeRequest { + + @Override + public ActionRequestValidationException validate() { + return null; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageResponse.java new file mode 100644 index 00000000000..ccf681837fd --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageResponse.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack; + +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Response object from calling the xpack usage api. + * + * Usage information for each feature is accessible through {@link #getUsages()}. + */ +public class XPackUsageResponse { + + private final Map> usages; + + private XPackUsageResponse(Map> usages) throws IOException { + this.usages = usages; + } + + @SuppressWarnings("unchecked") + private static Map castMap(Object value) { + return (Map)value; + } + + /** Return a map from feature name to usage information for that feature. */ + public Map> getUsages() { + return usages; + } + + public static XPackUsageResponse fromXContent(XContentParser parser) throws IOException { + Map rawMap = parser.map(); + Map> usages = rawMap.entrySet().stream().collect( + Collectors.toMap(Map.Entry::getKey, e -> castMap(e.getValue()))); + return new XPackUsageResponse(usages); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/common/ProtocolUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/common/ProtocolUtils.java new file mode 100644 index 00000000000..39340955121 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/common/ProtocolUtils.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.common; + +import java.util.Arrays; +import java.util.Map; + +/** + * Common utilities used for XPack protocol classes + */ +public final class ProtocolUtils { + + /** + * Implements equals for a map of string arrays + * + * The map of string arrays is used in some XPack protocol classes but does't work with equal. + */ + public static boolean equals(Map a, Map b) { + if (a == null) { + return b == null; + } + if (b == null) { + return false; + } + if (a.size() != b.size()) { + return false; + } + for (Map.Entry entry : a.entrySet()) { + String[] val = entry.getValue(); + String key = entry.getKey(); + if (val == null) { + if (b.get(key) != null || b.containsKey(key) == false) { + return false; + } + } else { + if (Arrays.equals(val, b.get(key)) == false) { + return false; + } + } + } + return true; + } + + /** + * Implements hashCode for map of string arrays + * + * The map of string arrays does't work with hashCode. + */ + public static int hashCode(Map a) { + int hash = 0; + for (Map.Entry entry : a.entrySet()) + hash += Arrays.hashCode(entry.getValue()); + return hash; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Connection.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java similarity index 51% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Connection.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java index f3d92896449..994c7e2c2d5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Connection.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java @@ -3,18 +3,25 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.graph.action; +package org.elasticsearch.protocol.xpack.graph; import com.carrotsearch.hppc.ObjectIntHashMap; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContent.Params; -import org.elasticsearch.xpack.core.graph.action.Vertex.VertexId; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.graph.Vertex.VertexId; import java.io.IOException; +import java.util.List; import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; /** * A Connection links exactly two {@link Vertex} objects. The basis of a @@ -23,10 +30,10 @@ import java.util.Map; * as a weight. */ public class Connection { - Vertex from; - Vertex to; - double weight; - long docCount; + private Vertex from; + private Vertex to; + private double weight; + private long docCount; public Connection(Vertex from, Vertex to, double weight, long docCount) { this.from = from; @@ -35,7 +42,7 @@ public class Connection { this.docCount = docCount; } - void readFrom(StreamInput in, Map vertices) throws IOException { + public Connection(StreamInput in, Map vertices) throws IOException { from = vertices.get(new VertexId(in.readString(), in.readString())); to = vertices.get(new VertexId(in.readString(), in.readString())); weight = in.readDouble(); @@ -80,13 +87,81 @@ public class Connection { public long getDocCount() { return docCount; } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + Connection other = (Connection) obj; + return docCount == other.docCount && + weight == other.weight && + Objects.equals(to, other.to) && + Objects.equals(from, other.from); + } + + @Override + public int hashCode() { + return Objects.hash(docCount, weight, from, to); + } + + + private static final ParseField SOURCE = new ParseField("source"); + private static final ParseField TARGET = new ParseField("target"); + private static final ParseField WEIGHT = new ParseField("weight"); + private static final ParseField DOC_COUNT = new ParseField("doc_count"); + void toXContent(XContentBuilder builder, Params params, ObjectIntHashMap vertexNumbers) throws IOException { - builder.field("source", vertexNumbers.get(from)); - builder.field("target", vertexNumbers.get(to)); - builder.field("weight", weight); - builder.field("doc_count", docCount); + builder.field(SOURCE.getPreferredName(), vertexNumbers.get(from)); + builder.field(TARGET.getPreferredName(), vertexNumbers.get(to)); + builder.field(WEIGHT.getPreferredName(), weight); + builder.field(DOC_COUNT.getPreferredName(), docCount); } + + //When deserializing from XContent we need to wait for all vertices to be loaded before + // Connection objects can be created that reference them. This class provides the interim + // state for connections. + static class UnresolvedConnection { + int fromIndex; + int toIndex; + double weight; + long docCount; + UnresolvedConnection(int fromIndex, int toIndex, double weight, long docCount) { + super(); + this.fromIndex = fromIndex; + this.toIndex = toIndex; + this.weight = weight; + this.docCount = docCount; + } + public Connection resolve(List vertices) { + return new Connection(vertices.get(fromIndex), vertices.get(toIndex), weight, docCount); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "ConnectionParser", true, + args -> { + int source = (Integer) args[0]; + int target = (Integer) args[1]; + double weight = (Double) args[2]; + long docCount = (Long) args[3]; + return new UnresolvedConnection(source, target, weight, docCount); + }); + + static { + PARSER.declareInt(constructorArg(), SOURCE); + PARSER.declareInt(constructorArg(), TARGET); + PARSER.declareDouble(constructorArg(), WEIGHT); + PARSER.declareLong(constructorArg(), DOC_COUNT); + } + static UnresolvedConnection fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } + } + /** * An identifier (implements hashcode and equals) that represents a diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java similarity index 72% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequest.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java index e44f9f76037..196982c0a35 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.graph.action; +package org.elasticsearch.protocol.xpack.graph; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; @@ -14,6 +14,8 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms; @@ -29,7 +31,7 @@ import java.util.List; * Holds the criteria required to guide the exploration of connected terms which * can be returned as a graph. */ -public class GraphExploreRequest extends ActionRequest implements IndicesRequest.Replaceable { +public class GraphExploreRequest extends ActionRequest implements IndicesRequest.Replaceable, ToXContentObject { public static final String NO_HOPS_ERROR_MESSAGE = "Graph explore request must have at least one hop"; public static final String NO_VERTICES_ERROR_MESSAGE = "Graph explore hop must have at least one VertexRequest"; @@ -51,8 +53,8 @@ public class GraphExploreRequest extends ActionRequest implements IndicesRequest } /** - * Constructs a new graph request to run against the provided - * indices. No indices means it will run against all indices. + * Constructs a new graph request to run against the provided indices. No + * indices means it will run against all indices. */ public GraphExploreRequest(String... indices) { this.indices = indices; @@ -75,7 +77,6 @@ public class GraphExploreRequest extends ActionRequest implements IndicesRequest return this.indices; } - @Override public GraphExploreRequest indices(String... indices) { this.indices = indices; @@ -123,10 +124,14 @@ public class GraphExploreRequest extends ActionRequest implements IndicesRequest } /** - * Graph exploration can be set to timeout after the given period. Search operations involved in - * each hop are limited to the remaining time available but can still overrun due to the nature - * of their "best efforts" timeout support. When a timeout occurs partial results are returned. - * @param timeout a {@link TimeValue} object which determines the maximum length of time to spend exploring + * Graph exploration can be set to timeout after the given period. Search + * operations involved in each hop are limited to the remaining time + * available but can still overrun due to the nature of their "best efforts" + * timeout support. When a timeout occurs partial results are returned. + * + * @param timeout + * a {@link TimeValue} object which determines the maximum length + * of time to spend exploring */ public GraphExploreRequest timeout(TimeValue timeout) { if (timeout == null) { @@ -153,10 +158,10 @@ public class GraphExploreRequest extends ActionRequest implements IndicesRequest sampleSize = in.readInt(); sampleDiversityField = in.readOptionalString(); maxDocsPerDiversityValue = in.readInt(); - + useSignificance = in.readBoolean(); returnDetailedInfo = in.readBoolean(); - + int numHops = in.readInt(); Hop parentHop = null; for (int i = 0; i < numHops; i++) { @@ -180,7 +185,7 @@ public class GraphExploreRequest extends ActionRequest implements IndicesRequest out.writeInt(sampleSize); out.writeOptionalString(sampleDiversityField); out.writeInt(maxDocsPerDiversityValue); - + out.writeBoolean(useSignificance); out.writeBoolean(returnDetailedInfo); out.writeInt(hops.size()); @@ -196,18 +201,21 @@ public class GraphExploreRequest extends ActionRequest implements IndicesRequest } /** - * The number of top-matching documents that are considered during each hop (default is - * {@link SamplerAggregationBuilder#DEFAULT_SHARD_SAMPLE_SIZE} - * Very small values (less than 50) may not provide sufficient weight-of-evidence to identify - * significant connections between terms. - *

    Very large values (many thousands) are not recommended with loosely defined queries (fuzzy queries or those - * with many OR clauses). - * This is because any useful signals in the best documents are diluted with irrelevant noise from low-quality matches. - * Performance is also typically better with smaller samples as there are less look-ups required for background frequencies - * of terms found in the documents + * The number of top-matching documents that are considered during each hop + * (default is {@link SamplerAggregationBuilder#DEFAULT_SHARD_SAMPLE_SIZE} + * Very small values (less than 50) may not provide sufficient + * weight-of-evidence to identify significant connections between terms. + *

    + * Very large values (many thousands) are not recommended with loosely + * defined queries (fuzzy queries or those with many OR clauses). This is + * because any useful signals in the best documents are diluted with + * irrelevant noise from low-quality matches. Performance is also typically + * better with smaller samples as there are less look-ups required for + * background frequencies of terms found in the documents *

    * - * @param maxNumberOfDocsPerHop shard-level sample size in documents + * @param maxNumberOfDocsPerHop + * shard-level sample size in documents */ public void sampleSize(int maxNumberOfDocsPerHop) { sampleSize = maxNumberOfDocsPerHop; @@ -242,10 +250,13 @@ public class GraphExploreRequest extends ActionRequest implements IndicesRequest } /** - * Controls the choice of algorithm used to select interesting terms. The default - * value is true which means terms are selected based on significance (see the {@link SignificantTerms} - * aggregation) rather than popularity (using the {@link TermsAggregator}). - * @param value true if the significant_terms algorithm should be used. + * Controls the choice of algorithm used to select interesting terms. The + * default value is true which means terms are selected based on + * significance (see the {@link SignificantTerms} aggregation) rather than + * popularity (using the {@link TermsAggregator}). + * + * @param value + * true if the significant_terms algorithm should be used. */ public void useSignificance(boolean value) { this.useSignificance = value; @@ -254,32 +265,37 @@ public class GraphExploreRequest extends ActionRequest implements IndicesRequest public boolean useSignificance() { return useSignificance; } - + /** - * Return detailed information about vertex frequencies as part of JSON results - defaults to false - * @param value true if detailed information is required in JSON responses + * Return detailed information about vertex frequencies as part of JSON + * results - defaults to false + * + * @param value + * true if detailed information is required in JSON responses */ public void returnDetailedInfo(boolean value) { this.returnDetailedInfo = value; - } + } public boolean returnDetailedInfo() { return returnDetailedInfo; } - /** - * Add a stage in the graph exploration. Each hop represents a stage of - * querying elasticsearch to identify terms which can then be connnected - * to other terms in a subsequent hop. - * @param guidingQuery optional choice of query which influences which documents - * are considered in this stage - * @return a {@link Hop} object that holds settings for a stage in the graph exploration + * Add a stage in the graph exploration. Each hop represents a stage of + * querying elasticsearch to identify terms which can then be connnected to + * other terms in a subsequent hop. + * + * @param guidingQuery + * optional choice of query which influences which documents are + * considered in this stage + * @return a {@link Hop} object that holds settings for a stage in the graph + * exploration */ public Hop createNextHop(QueryBuilder guidingQuery) { Hop parent = null; if (hops.size() > 0) { - parent = hops.get(hops.size() - 1); + parent = hops.get(hops.size() - 1); } Hop newHop = new Hop(parent); newHop.guidingQuery = guidingQuery; @@ -330,6 +346,43 @@ public class GraphExploreRequest extends ActionRequest implements IndicesRequest } } - + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + builder.startObject("controls"); + { + if (sampleSize != SamplerAggregationBuilder.DEFAULT_SHARD_SAMPLE_SIZE) { + builder.field("sample_size", sampleSize); + } + if (sampleDiversityField != null) { + builder.startObject("sample_diversity"); + builder.field("field", sampleDiversityField); + builder.field("max_docs_per_value", maxDocsPerDiversityValue); + builder.endObject(); + } + builder.field("use_significance", useSignificance); + if (returnDetailedInfo) { + builder.field("return_detailed_stats", returnDetailedInfo); + } + } + builder.endObject(); + + for (Hop hop : hops) { + if (hop.parentHop != null) { + builder.startObject("connections"); + } + hop.toXContent(builder, params); + } + for (Hop hop : hops) { + if (hop.parentHop != null) { + builder.endObject(); + } + } + builder.endObject(); + + return builder; + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java similarity index 61% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreResponse.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java index 3d6c5f5aaca..12eb20617ff 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java @@ -3,26 +3,34 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.graph.action; +package org.elasticsearch.protocol.xpack.graph; import com.carrotsearch.hppc.ObjectIntHashMap; + import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.graph.action.Connection.ConnectionId; -import org.elasticsearch.xpack.core.graph.action.Vertex.VertexId; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.graph.Connection.ConnectionId; +import org.elasticsearch.protocol.xpack.graph.Connection.UnresolvedConnection; +import org.elasticsearch.protocol.xpack.graph.Vertex.VertexId; import java.io.IOException; import java.util.Collection; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; /** * Graph explore response holds a graph of {@link Vertex} and {@link Connection} objects @@ -100,8 +108,7 @@ public class GraphExploreResponse extends ActionResponse implements ToXContentOb connections = new HashMap<>(); for (int i = 0; i < size; i++) { - Connection e = new Connection(); - e.readFrom(in, vertices); + Connection e = new Connection(in, vertices); connections.put(e.getId(), e); } @@ -146,23 +153,19 @@ public class GraphExploreResponse extends ActionResponse implements ToXContentOb } - static final class Fields { - static final String TOOK = "took"; - static final String TIMED_OUT = "timed_out"; - static final String INDICES = "_indices"; - static final String FAILURES = "failures"; - static final String VERTICES = "vertices"; - static final String CONNECTIONS = "connections"; - - } + private static final ParseField TOOK = new ParseField("took"); + private static final ParseField TIMED_OUT = new ParseField("timed_out"); + private static final ParseField VERTICES = new ParseField("vertices"); + private static final ParseField CONNECTIONS = new ParseField("connections"); + private static final ParseField FAILURES = new ParseField("failures"); @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(Fields.TOOK, tookInMillis); - builder.field(Fields.TIMED_OUT, timedOut); + builder.field(TOOK.getPreferredName(), tookInMillis); + builder.field(TIMED_OUT.getPreferredName(), timedOut); - builder.startArray(Fields.FAILURES); + builder.startArray(FAILURES.getPreferredName()); if (shardFailures != null) { for (ShardOperationFailedException shardFailure : shardFailures) { builder.startObject(); @@ -178,7 +181,7 @@ public class GraphExploreResponse extends ActionResponse implements ToXContentOb extraParams.put(RETURN_DETAILED_INFO_PARAM, Boolean.toString(returnDetailedInfo)); Params extendedParams = new DelegatingMapParams(extraParams, params); - builder.startArray(Fields.VERTICES); + builder.startArray(VERTICES.getPreferredName()); for (Vertex vertex : vertices.values()) { builder.startObject(); vertexNumbers.put(vertex, vertexNumbers.size()); @@ -187,7 +190,7 @@ public class GraphExploreResponse extends ActionResponse implements ToXContentOb } builder.endArray(); - builder.startArray(Fields.CONNECTIONS); + builder.startArray(CONNECTIONS.getPreferredName()); for (Connection connection : connections.values()) { builder.startObject(); connection.toXContent(builder, extendedParams, vertexNumbers); @@ -198,5 +201,48 @@ public class GraphExploreResponse extends ActionResponse implements ToXContentOb return builder; } + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "GraphExploreResponsenParser", true, + args -> { + GraphExploreResponse result = new GraphExploreResponse(); + result.vertices = new HashMap<>(); + result.connections = new HashMap<>(); + + result.tookInMillis = (Long) args[0]; + result.timedOut = (Boolean) args[1]; + + @SuppressWarnings("unchecked") + List vertices = (List) args[2]; + @SuppressWarnings("unchecked") + List unresolvedConnections = (List) args[3]; + @SuppressWarnings("unchecked") + List failures = (List) args[4]; + for (Vertex vertex : vertices) { + // reverse-engineer if detailed stats were requested - + // mainly here for testing framework's equality tests + result.returnDetailedInfo = result.returnDetailedInfo || vertex.getFg() > 0; + result.vertices.put(vertex.getId(), vertex); + } + for (UnresolvedConnection unresolvedConnection : unresolvedConnections) { + Connection resolvedConnection = unresolvedConnection.resolve(vertices); + result.connections.put(resolvedConnection.getId(), resolvedConnection); + } + if (failures.size() > 0) { + result.shardFailures = failures.toArray(new ShardSearchFailure[failures.size()]); + } + return result; + }); + + static { + PARSER.declareLong(constructorArg(), TOOK); + PARSER.declareBoolean(constructorArg(), TIMED_OUT); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> Vertex.fromXContent(p), VERTICES); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> UnresolvedConnection.fromXContent(p), CONNECTIONS); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ShardSearchFailure.fromXContent(p), FAILURES); + } + + public static GraphExploreResponse fromXContext(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Hop.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java similarity index 85% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Hop.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java index 8ba7005f15f..e61403e8b37 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Hop.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java @@ -3,12 +3,14 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.graph.action; +package org.elasticsearch.protocol.xpack.graph; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -41,7 +43,7 @@ import java.util.List; *

    * */ -public class Hop { +public class Hop implements ToXContentFragment{ final Hop parentHop; List vertices = null; QueryBuilder guidingQuery = null; @@ -139,4 +141,20 @@ public class Hop { public VertexRequest getVertexRequest(int requestNumber) { return getEffectiveVertexRequests().get(requestNumber); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (guidingQuery != null) { + builder.field("query"); + guidingQuery.toXContent(builder, params); + } + if(vertices != null && vertices.size()>0) { + builder.startArray("vertices"); + for (VertexRequest vertexRequest : vertices) { + vertexRequest.toXContent(builder, params); + } + builder.endArray(); + } + return builder; + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Vertex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java similarity index 65% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Vertex.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java index c85d6d7dfd6..f17812a6396 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Vertex.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java @@ -3,14 +3,21 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.graph.action; +package org.elasticsearch.protocol.xpack.graph; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; /** * A vertex in a graph response represents a single term (a field and value pair) @@ -27,6 +34,13 @@ public class Vertex implements ToXContentFragment { private final int depth; private final long bg; private long fg; + private static final ParseField FIELD = new ParseField("field"); + private static final ParseField TERM = new ParseField("term"); + private static final ParseField WEIGHT = new ParseField("weight"); + private static final ParseField DEPTH = new ParseField("depth"); + private static final ParseField FG = new ParseField("fg"); + private static final ParseField BG = new ParseField("bg"); + public Vertex(String field, String term, double weight, int depth, long bg, long fg) { super(); @@ -50,20 +64,72 @@ public class Vertex implements ToXContentFragment { out.writeVLong(bg); out.writeVLong(fg); } + + @Override + public int hashCode() { + return Objects.hash(field, term, weight, depth, bg, fg); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + Vertex other = (Vertex) obj; + return depth == other.depth && + weight == other.weight && + bg == other.bg && + fg == other.fg && + Objects.equals(field, other.field) && + Objects.equals(term, other.term); + + } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { boolean returnDetailedInfo = params.paramAsBoolean(GraphExploreResponse.RETURN_DETAILED_INFO_PARAM, false); - builder.field("field", field); - builder.field("term", term); - builder.field("weight", weight); - builder.field("depth", depth); + builder.field(FIELD.getPreferredName(), field); + builder.field(TERM.getPreferredName(), term); + builder.field(WEIGHT.getPreferredName(), weight); + builder.field(DEPTH.getPreferredName(), depth); if (returnDetailedInfo) { - builder.field("fg", fg); - builder.field("bg", bg); + builder.field(FG.getPreferredName(), fg); + builder.field(BG.getPreferredName(), bg); } return builder; } + + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "VertexParser", true, + args -> { + String field = (String) args[0]; + String term = (String) args[1]; + double weight = (Double) args[2]; + int depth = (Integer) args[3]; + Long optionalBg = (Long) args[4]; + Long optionalFg = (Long) args[5]; + long bg = optionalBg == null ? 0 : optionalBg; + long fg = optionalFg == null ? 0 : optionalFg; + return new Vertex(field, term, weight, depth, bg, fg); + }); + + static { + PARSER.declareString(constructorArg(), FIELD); + PARSER.declareString(constructorArg(), TERM); + PARSER.declareDouble(constructorArg(), WEIGHT); + PARSER.declareInt(constructorArg(), DEPTH); + PARSER.declareLong(optionalConstructorArg(), BG); + PARSER.declareLong(optionalConstructorArg(), FG); + } + + static Vertex fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } + /** * @return a {@link VertexId} object that uniquely identifies this Vertex diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/VertexRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/VertexRequest.java similarity index 78% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/VertexRequest.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/VertexRequest.java index f7f7dec4b17..63d2c616547 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/VertexRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/VertexRequest.java @@ -3,11 +3,13 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.graph.action; +package org.elasticsearch.protocol.xpack.graph; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest.TermBoost; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest.TermBoost; import java.io.IOException; import java.util.HashMap; @@ -21,9 +23,10 @@ import java.util.Set; * inclusion list to filter which terms are considered. * */ -public class VertexRequest { +public class VertexRequest implements ToXContentObject { private String fieldName; - private int size = 5; + private int size = DEFAULT_SIZE; + public static final int DEFAULT_SIZE = 5; private Map includes; private Set excludes; public static final int DEFAULT_MIN_DOC_COUNT = 3; @@ -195,4 +198,38 @@ public class VertexRequest { return this; } + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("field", fieldName); + if (size != DEFAULT_SIZE) { + builder.field("size", size); + } + if (minDocCount != DEFAULT_MIN_DOC_COUNT) { + builder.field("min_doc_count", minDocCount); + } + if (shardMinDocCount != DEFAULT_SHARD_MIN_DOC_COUNT) { + builder.field("shard_min_doc_count", shardMinDocCount); + } + if(includes!=null) { + builder.startArray("include"); + for (TermBoost tb : includes.values()) { + builder.startObject(); + builder.field("term", tb.term); + builder.field("boost", tb.boost); + builder.endObject(); + } + builder.endArray(); + } + if(excludes!=null) { + builder.startArray("exclude"); + for (String value : excludes) { + builder.value(value); + } + builder.endArray(); + } + builder.endObject(); + return builder; + } + } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/package-info.java new file mode 100644 index 00000000000..5d5dd0f5ef6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/package-info.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Request and Response objects for the default distribution's Graph + * APIs. + */ +package org.elasticsearch.protocol.xpack.graph; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/DeleteLicenseRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/DeleteLicenseRequest.java new file mode 100644 index 00000000000..62353b093b5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/DeleteLicenseRequest.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; + + +public class DeleteLicenseRequest extends AcknowledgedRequest { + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseRequest.java new file mode 100644 index 00000000000..926ce1d1d70 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseRequest.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + + +public class GetLicenseRequest extends MasterNodeReadRequest { + + public GetLicenseRequest() { + } + + public GetLicenseRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseResponse.java new file mode 100644 index 00000000000..6d5e1b5653f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseResponse.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import org.elasticsearch.action.ActionResponse; + +public class GetLicenseResponse extends ActionResponse { + + private String license; + + GetLicenseResponse() { + } + + public GetLicenseResponse(String license) { + this.license = license; + } + + public String getLicenseDefinition() { + return license; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/LicenseStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/LicenseStatus.java new file mode 100644 index 00000000000..5bc66ab745e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/LicenseStatus.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import java.io.IOException; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +/** + * Status of an X-Pack license. + */ +public enum LicenseStatus implements Writeable { + + ACTIVE("active"), + INVALID("invalid"), + EXPIRED("expired"); + + private final String label; + + LicenseStatus(String label) { + this.label = label; + } + + public String label() { + return label; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(label); + } + + public static LicenseStatus readFrom(StreamInput in) throws IOException { + return fromString(in.readString()); + } + + public static LicenseStatus fromString(String value) { + switch (value) { + case "active": + return ACTIVE; + case "invalid": + return INVALID; + case "expired": + return EXPIRED; + default: + throw new IllegalArgumentException("unknown license status [" + value + "]"); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/LicensesStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/LicensesStatus.java new file mode 100644 index 00000000000..18745653e76 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/LicensesStatus.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import java.util.Locale; + +public enum LicensesStatus { + VALID((byte) 0), + INVALID((byte) 1), + EXPIRED((byte) 2); + + private final byte id; + + LicensesStatus(byte id) { + this.id = id; + } + + public int id() { + return id; + } + + public static LicensesStatus fromId(int id) { + if (id == 0) { + return VALID; + } else if (id == 1) { + return INVALID; + } else if (id == 2) { + return EXPIRED; + } else { + throw new IllegalStateException("no valid LicensesStatus for id=" + id); + } + } + + + @Override + public String toString() { + return this.name().toLowerCase(Locale.ROOT); + } + + public static LicensesStatus fromString(String value) { + switch (value) { + case "valid": + return VALID; + case "invalid": + return INVALID; + case "expired": + return EXPIRED; + default: + throw new IllegalArgumentException("unknown licenses status [" + value + "]"); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseRequest.java new file mode 100644 index 00000000000..342e6c296e7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseRequest.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; + +public class PutLicenseRequest extends AcknowledgedRequest { + + private String licenseDefinition; + private boolean acknowledge = false; + + public PutLicenseRequest() { + + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public void setLicenseDefinition(String licenseDefinition) { + this.licenseDefinition = licenseDefinition; + } + + public String getLicenseDefinition() { + return licenseDefinition; + } + + public void setAcknowledge(boolean acknowledge) { + this.acknowledge = acknowledge; + } + + public boolean isAcknowledge() { + return acknowledge; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponse.java new file mode 100644 index 00000000000..206c5a3b383 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponse.java @@ -0,0 +1,195 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.common.ProtocolUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class PutLicenseResponse extends AcknowledgedResponse { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "put_license_response", true, (a, v) -> { + boolean acknowledged = (Boolean) a[0]; + LicensesStatus licensesStatus = LicensesStatus.fromString((String) a[1]); + @SuppressWarnings("unchecked") Tuple> acknowledgements = (Tuple>) a[2]; + if (acknowledgements == null) { + return new PutLicenseResponse(acknowledged, licensesStatus); + } else { + return new PutLicenseResponse(acknowledged, licensesStatus, acknowledgements.v1(), acknowledgements.v2()); + } + + }); + + static { + PARSER.declareBoolean(constructorArg(), new ParseField("acknowledged")); + PARSER.declareString(constructorArg(), new ParseField("license_status")); + PARSER.declareObject(optionalConstructorArg(), (parser, v) -> { + Map acknowledgeMessages = new HashMap<>(); + String message = null; + XContentParser.Token token; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else { + if (currentFieldName == null) { + throw new XContentParseException(parser.getTokenLocation(), "expected message header or acknowledgement"); + } + if ("message".equals(currentFieldName)) { + if (token != XContentParser.Token.VALUE_STRING) { + throw new XContentParseException(parser.getTokenLocation(), "unexpected message header type"); + } + message = parser.text(); + } else { + if (token != XContentParser.Token.START_ARRAY) { + throw new XContentParseException(parser.getTokenLocation(), "unexpected acknowledgement type"); + } + List acknowledgeMessagesList = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token != XContentParser.Token.VALUE_STRING) { + throw new XContentParseException(parser.getTokenLocation(), "unexpected acknowledgement text"); + } + acknowledgeMessagesList.add(parser.text()); + } + acknowledgeMessages.put(currentFieldName, acknowledgeMessagesList.toArray(new String[0])); + } + } + } + return new Tuple<>(message, acknowledgeMessages); + }, + new ParseField("acknowledge")); + } + + private LicensesStatus status; + private Map acknowledgeMessages; + private String acknowledgeHeader; + + public PutLicenseResponse() { + } + + public PutLicenseResponse(boolean acknowledged, LicensesStatus status) { + this(acknowledged, status, null, Collections.emptyMap()); + } + + public PutLicenseResponse(boolean acknowledged, LicensesStatus status, String acknowledgeHeader, + Map acknowledgeMessages) { + super(acknowledged); + this.status = status; + this.acknowledgeHeader = acknowledgeHeader; + this.acknowledgeMessages = acknowledgeMessages; + } + + public LicensesStatus status() { + return status; + } + + public Map acknowledgeMessages() { + return acknowledgeMessages; + } + + public String acknowledgeHeader() { + return acknowledgeHeader; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + status = LicensesStatus.fromId(in.readVInt()); + acknowledgeHeader = in.readOptionalString(); + int size = in.readVInt(); + Map acknowledgeMessages = new HashMap<>(size); + for (int i = 0; i < size; i++) { + String feature = in.readString(); + int nMessages = in.readVInt(); + String[] messages = new String[nMessages]; + for (int j = 0; j < nMessages; j++) { + messages[j] = in.readString(); + } + acknowledgeMessages.put(feature, messages); + } + this.acknowledgeMessages = acknowledgeMessages; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(status.id()); + out.writeOptionalString(acknowledgeHeader); + out.writeVInt(acknowledgeMessages.size()); + for (Map.Entry entry : acknowledgeMessages.entrySet()) { + out.writeString(entry.getKey()); + out.writeVInt(entry.getValue().length); + for (String message : entry.getValue()) { + out.writeString(message); + } + } + } + + @Override + protected void addCustomFields(XContentBuilder builder, Params params) throws IOException { + builder.field("license_status", status.toString()); + if (!acknowledgeMessages.isEmpty()) { + builder.startObject("acknowledge"); + builder.field("message", acknowledgeHeader); + for (Map.Entry entry : acknowledgeMessages.entrySet()) { + builder.startArray(entry.getKey()); + for (String message : entry.getValue()) { + builder.value(message); + } + builder.endArray(); + } + builder.endObject(); + } + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + public static PutLicenseResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + PutLicenseResponse that = (PutLicenseResponse) o; + + return status == that.status && + ProtocolUtils.equals(acknowledgeMessages, that.acknowledgeMessages) && + Objects.equals(acknowledgeHeader, that.acknowledgeHeader); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), status, ProtocolUtils.hashCode(acknowledgeMessages), acknowledgeHeader); + } + + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/package-info.java new file mode 100644 index 00000000000..a0a80a9958b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/package-info.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Request and Response objects for the default distribution's License + * APIs. + */ +package org.elasticsearch.protocol.xpack.license; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequest.java new file mode 100644 index 00000000000..17afee59fa1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequest.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.migration; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +public class IndexUpgradeInfoRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { + + private String[] indices = Strings.EMPTY_ARRAY; + private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true); + + public IndexUpgradeInfoRequest(String... indices) { + indices(indices); + } + + public IndexUpgradeInfoRequest(StreamInput in) throws IOException { + super(in); + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public IndexUpgradeInfoRequest indices(String... indices) { + this.indices = Objects.requireNonNull(indices, "indices cannot be null"); + return this; + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public void indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + IndexUpgradeInfoRequest request = (IndexUpgradeInfoRequest) o; + return Arrays.equals(indices, request.indices) && + Objects.equals(indicesOptions.toString(), request.indicesOptions.toString()); + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(indices), indicesOptions.toString()); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponse.java new file mode 100644 index 00000000000..17115ac9b17 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponse.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.migration; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +public class IndexUpgradeInfoResponse extends ActionResponse implements ToXContentObject { + + private static final ParseField INDICES = new ParseField("indices"); + private static final ParseField ACTION_REQUIRED = new ParseField("action_required"); + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("IndexUpgradeInfoResponse", + true, + (a, c) -> { + @SuppressWarnings("unchecked") + Map map = (Map)a[0]; + Map actionsRequired = map.entrySet().stream() + .filter(e -> { + if (e.getValue() instanceof Map == false) { + return false; + } + @SuppressWarnings("unchecked") + Map value =(Map)e.getValue(); + return value.containsKey(ACTION_REQUIRED.getPreferredName()); + }) + .collect(Collectors.toMap( + Map.Entry::getKey, + e -> { + @SuppressWarnings("unchecked") + Map value = (Map) e.getValue(); + return UpgradeActionRequired.fromString((String)value.get(ACTION_REQUIRED.getPreferredName())); + } + )); + return new IndexUpgradeInfoResponse(actionsRequired); + }); + + static { + PARSER.declareObject(constructorArg(), (p, c) -> p.map(), INDICES); + } + + + private Map actions; + + public IndexUpgradeInfoResponse() { + + } + + public IndexUpgradeInfoResponse(Map actions) { + this.actions = actions; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + actions = in.readMap(StreamInput::readString, UpgradeActionRequired::readFromStream); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(actions, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); + } + + public Map getActions() { + return actions; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.startObject(INDICES.getPreferredName()); + for (Map.Entry entry : actions.entrySet()) { + builder.startObject(entry.getKey()); + { + builder.field(ACTION_REQUIRED.getPreferredName(), entry.getValue().toString()); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + IndexUpgradeInfoResponse response = (IndexUpgradeInfoResponse) o; + return Objects.equals(actions, response.actions); + } + + @Override + public int hashCode() { + return Objects.hash(actions); + } + + public static IndexUpgradeInfoResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/UpgradeActionRequired.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/UpgradeActionRequired.java new file mode 100644 index 00000000000..dce1c7d18f5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/UpgradeActionRequired.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.migration; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Locale; + +/** + * Indicates the type of the upgrade required for the index + */ +public enum UpgradeActionRequired implements Writeable { + NOT_APPLICABLE, // Indicates that the check is not applicable to this index type, the next check will be performed + UP_TO_DATE, // Indicates that the check finds this index to be up to date - no additional checks are required + REINDEX, // The index should be reindex + UPGRADE; // The index should go through the upgrade procedure + + public static UpgradeActionRequired fromString(String value) { + return UpgradeActionRequired.valueOf(value.toUpperCase(Locale.ROOT)); + } + + public static UpgradeActionRequired readFromStream(StreamInput in) throws IOException { + return in.readEnum(UpgradeActionRequired.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(this); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/package-info.java new file mode 100644 index 00000000000..7c52f6a8fd4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/package-info.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Request and Response objects for the default distribution's Migration + * APIs. + */ +package org.elasticsearch.protocol.xpack.migration; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/package-info.java new file mode 100644 index 00000000000..3ed877d08cc --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/package-info.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Request and Response objects for miscellaneous X-Pack APIs. + */ +package org.elasticsearch.protocol.xpack; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/security/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/security/package-info.java new file mode 100644 index 00000000000..ce627b267f3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/security/package-info.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Request and Response objects for the default distribution's Security + * APIs. + */ +package org.elasticsearch.protocol.xpack.security; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchRequest.java new file mode 100644 index 00000000000..4a458b69a75 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchRequest.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.watcher; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.uid.Versions; + +import java.io.IOException; + +/** + * A delete watch request to delete an watch by name (id) + */ +public class DeleteWatchRequest extends ActionRequest { + + private String id; + private long version = Versions.MATCH_ANY; + + public DeleteWatchRequest() { + this(null); + } + + public DeleteWatchRequest(String id) { + this.id = id; + } + + /** + * @return The name of the watch to be deleted + */ + public String getId() { + return id; + } + + /** + * Sets the name of the watch to be deleted + */ + public void setId(String id) { + this.id = id; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (id == null){ + validationException = ValidateActions.addValidationError("watch id is missing", validationException); + } else if (PutWatchRequest.isValidId(id) == false) { + validationException = ValidateActions.addValidationError("watch id contains whitespace", validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + id = in.readString(); + version = in.readLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + out.writeLong(version); + } + + @Override + public String toString() { + return "delete [" + id + "]"; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponse.java new file mode 100644 index 00000000000..39cd5e966fa --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponse.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.watcher; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class DeleteWatchResponse extends ActionResponse implements ToXContentObject { + + private static final ObjectParser PARSER + = new ObjectParser<>("x_pack_delete_watch_response", DeleteWatchResponse::new); + static { + PARSER.declareString(DeleteWatchResponse::setId, new ParseField("_id")); + PARSER.declareLong(DeleteWatchResponse::setVersion, new ParseField("_version")); + PARSER.declareBoolean(DeleteWatchResponse::setFound, new ParseField("found")); + } + + private String id; + private long version; + private boolean found; + + public DeleteWatchResponse() { + } + + public DeleteWatchResponse(String id, long version, boolean found) { + this.id = id; + this.version = version; + this.found = found; + } + + public String getId() { + return id; + } + + public long getVersion() { + return version; + } + + public boolean isFound() { + return found; + } + + private void setId(String id) { + this.id = id; + } + + private void setVersion(long version) { + this.version = version; + } + + private void setFound(boolean found) { + this.found = found; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + DeleteWatchResponse that = (DeleteWatchResponse) o; + + return Objects.equals(id, that.id) && Objects.equals(version, that.version) && Objects.equals(found, that.found); + } + + @Override + public int hashCode() { + return Objects.hash(id, version, found); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + id = in.readString(); + version = in.readVLong(); + found = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + out.writeVLong(version); + out.writeBoolean(found); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field("_id", id) + .field("_version", version) + .field("found", found) + .endObject(); + } + + public static DeleteWatchResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchRequest.java new file mode 100644 index 00000000000..7997d853db3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchRequest.java @@ -0,0 +1,145 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.watcher; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.xcontent.XContentType; + +import java.io.IOException; +import java.util.regex.Pattern; + +/** + * This request class contains the data needed to create a watch along with the name of the watch. + * The name of the watch will become the ID of the indexed document. + */ +public final class PutWatchRequest extends ActionRequest { + + private static final Pattern NO_WS_PATTERN = Pattern.compile("\\S+"); + + private String id; + private BytesReference source; + private XContentType xContentType = XContentType.JSON; + private boolean active = true; + private long version = Versions.MATCH_ANY; + + public PutWatchRequest() {} + + public PutWatchRequest(StreamInput in) throws IOException { + readFrom(in); + } + + public PutWatchRequest(String id, BytesReference source, XContentType xContentType) { + this.id = id; + this.source = source; + this.xContentType = xContentType; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + id = in.readString(); + source = in.readBytesReference(); + active = in.readBoolean(); + xContentType = in.readEnum(XContentType.class); + version = in.readZLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + out.writeBytesReference(source); + out.writeBoolean(active); + out.writeEnum(xContentType); + out.writeZLong(version); + } + + /** + * @return The name that will be the ID of the indexed document + */ + public String getId() { + return id; + } + + /** + * Set the watch name + */ + public void setId(String id) { + this.id = id; + } + + /** + * @return The source of the watch + */ + public BytesReference getSource() { + return source; + } + + /** + * Set the source of the watch + */ + public void setSource(BytesReference source, XContentType xContentType) { + this.source = source; + this.xContentType = xContentType; + } + + /** + * @return The initial active state of the watch (defaults to {@code true}, e.g. "active") + */ + public boolean isActive() { + return active; + } + + /** + * Sets the initial active state of the watch + */ + public void setActive(boolean active) { + this.active = active; + } + + /** + * Get the content type for the source + */ + public XContentType xContentType() { + return xContentType; + } + + public long getVersion() { + return version; + } + + public void setVersion(long version) { + this.version = version; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (id == null) { + validationException = ValidateActions.addValidationError("watch id is missing", validationException); + } else if (isValidId(id) == false) { + validationException = ValidateActions.addValidationError("watch id contains whitespace", validationException); + } + if (source == null) { + validationException = ValidateActions.addValidationError("watch source is missing", validationException); + } + if (xContentType == null) { + validationException = ValidateActions.addValidationError("request body is missing", validationException); + } + return validationException; + } + + public static boolean isValidId(String id) { + return Strings.isEmpty(id) == false && NO_WS_PATTERN.matcher(id).matches(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponse.java new file mode 100644 index 00000000000..f6e55ff5553 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponse.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.watcher; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class PutWatchResponse extends ActionResponse implements ToXContentObject { + + private static final ObjectParser PARSER + = new ObjectParser<>("x_pack_put_watch_response", PutWatchResponse::new); + static { + PARSER.declareString(PutWatchResponse::setId, new ParseField("_id")); + PARSER.declareLong(PutWatchResponse::setVersion, new ParseField("_version")); + PARSER.declareBoolean(PutWatchResponse::setCreated, new ParseField("created")); + } + + private String id; + private long version; + private boolean created; + + public PutWatchResponse() { + } + + public PutWatchResponse(String id, long version, boolean created) { + this.id = id; + this.version = version; + this.created = created; + } + + private void setId(String id) { + this.id = id; + } + + private void setVersion(long version) { + this.version = version; + } + + private void setCreated(boolean created) { + this.created = created; + } + + public String getId() { + return id; + } + + public long getVersion() { + return version; + } + + public boolean isCreated() { + return created; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + PutWatchResponse that = (PutWatchResponse) o; + + return Objects.equals(id, that.id) && Objects.equals(version, that.version) && Objects.equals(created, that.created); + } + + @Override + public int hashCode() { + return Objects.hash(id, version, created); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + out.writeVLong(version); + out.writeBoolean(created); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + id = in.readString(); + version = in.readVLong(); + created = in.readBoolean(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field("_id", id) + .field("_version", version) + .field("created", created) + .endObject(); + } + + public static PutWatchResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/package-info.java new file mode 100644 index 00000000000..0d9edf3b5c0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/package-info.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Request and Response objects for the default distribution's Watcher + * APIs. + */ +package org.elasticsearch.protocol.xpack.watcher; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 0389ceffbc3..190a9a2215e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -38,6 +38,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.action.XPackUsageAction; +import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.deprecation.DeprecationInfoAction; import org.elasticsearch.xpack.core.graph.GraphFeatureSetUsage; import org.elasticsearch.xpack.core.graph.action.GraphExploreAction; @@ -51,9 +52,11 @@ import org.elasticsearch.xpack.core.ml.action.DeleteCalendarEventAction; import org.elasticsearch.xpack.core.ml.action.DeleteDatafeedAction; import org.elasticsearch.xpack.core.ml.action.DeleteExpiredDataAction; import org.elasticsearch.xpack.core.ml.action.DeleteFilterAction; +import org.elasticsearch.xpack.core.ml.action.DeleteForecastAction; import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; import org.elasticsearch.xpack.core.ml.action.DeleteModelSnapshotAction; import org.elasticsearch.xpack.core.ml.action.FinalizeJobExecutionAction; +import org.elasticsearch.xpack.core.ml.action.FindFileStructureAction; import org.elasticsearch.xpack.core.ml.action.FlushJobAction; import org.elasticsearch.xpack.core.ml.action.ForecastJobAction; import org.elasticsearch.xpack.core.ml.action.GetBucketsAction; @@ -254,6 +257,7 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl UpdateProcessAction.INSTANCE, DeleteExpiredDataAction.INSTANCE, ForecastJobAction.INSTANCE, + DeleteForecastAction.INSTANCE, GetCalendarsAction.INSTANCE, PutCalendarAction.INSTANCE, DeleteCalendarAction.INSTANCE, @@ -262,6 +266,7 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl GetCalendarEventsAction.INSTANCE, PostCalendarEventsAction.INSTANCE, PersistJobAction.INSTANCE, + FindFileStructureAction.INSTANCE, // security ClearRealmCacheAction.INSTANCE, ClearRolesCacheAction.INSTANCE, @@ -364,7 +369,9 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ROLLUP, RollupFeatureSetUsage::new), new NamedWriteableRegistry.Entry(PersistentTaskParams.class, RollupJob.NAME, RollupJob::new), new NamedWriteableRegistry.Entry(Task.Status.class, RollupJobStatus.NAME, RollupJobStatus::new), - new NamedWriteableRegistry.Entry(PersistentTaskState.class, RollupJobStatus.NAME, RollupJobStatus::new) + new NamedWriteableRegistry.Entry(PersistentTaskState.class, RollupJobStatus.NAME, RollupJobStatus::new), + // ccr + new NamedWriteableRegistry.Entry(AutoFollowMetadata.class, AutoFollowMetadata.TYPE, AutoFollowMetadata::new) ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java new file mode 100644 index 00000000000..244a5d441d9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java @@ -0,0 +1,357 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ccr; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractNamedDiffable; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.security.xcontent.XContentUtils; + +import java.io.IOException; +import java.util.Arrays; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Custom metadata that contains auto follow patterns and what leader indices an auto follow pattern has already followed. + */ +public class AutoFollowMetadata extends AbstractNamedDiffable implements XPackPlugin.XPackMetaDataCustom { + + public static final String TYPE = "ccr_auto_follow"; + + private static final ParseField PATTERNS_FIELD = new ParseField("patterns"); + private static final ParseField FOLLOWED_LEADER_INDICES_FIELD = new ParseField("followed_leader_indices"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("auto_follow", + args -> new AutoFollowMetadata((Map) args[0], (Map>) args[1])); + + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> { + Map patterns = new HashMap<>(); + String fieldName = null; + for (XContentParser.Token token = p.nextToken(); token != XContentParser.Token.END_OBJECT; token = p.nextToken()) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = p.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + patterns.put(fieldName, AutoFollowPattern.PARSER.parse(p, c)); + } else { + throw new ElasticsearchParseException("unexpected token [" + token + "]"); + } + } + return patterns; + }, PATTERNS_FIELD); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> { + Map> alreadyFollowedIndexUUIDS = new HashMap<>(); + String fieldName = null; + for (XContentParser.Token token = p.nextToken(); token != XContentParser.Token.END_OBJECT; token = p.nextToken()) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = p.currentName(); + } else if (token == XContentParser.Token.START_ARRAY) { + alreadyFollowedIndexUUIDS.put(fieldName, Arrays.asList(XContentUtils.readStringArray(p, false))); + } else { + throw new ElasticsearchParseException("unexpected token [" + token + "]"); + } + } + return alreadyFollowedIndexUUIDS; + }, FOLLOWED_LEADER_INDICES_FIELD); + } + + public static AutoFollowMetadata fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + private final Map patterns; + private final Map> followedLeaderIndexUUIDs; + + public AutoFollowMetadata(Map patterns, Map> followedLeaderIndexUUIDs) { + this.patterns = patterns; + this.followedLeaderIndexUUIDs = followedLeaderIndexUUIDs; + } + + public AutoFollowMetadata(StreamInput in) throws IOException { + patterns = in.readMap(StreamInput::readString, AutoFollowPattern::new); + followedLeaderIndexUUIDs = in.readMapOfLists(StreamInput::readString, StreamInput::readString); + } + + public Map getPatterns() { + return patterns; + } + + public Map> getFollowedLeaderIndexUUIDs() { + return followedLeaderIndexUUIDs; + } + + @Override + public EnumSet context() { + // TODO: When a snapshot is restored do we want to restore this? + // (Otherwise we would start following indices automatically immediately) + return MetaData.ALL_CONTEXTS; + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_6_5_0.minimumCompatibilityVersion(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(patterns, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); + out.writeMapOfLists(followedLeaderIndexUUIDs, StreamOutput::writeString, StreamOutput::writeString); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(PATTERNS_FIELD.getPreferredName()); + for (Map.Entry entry : patterns.entrySet()) { + builder.startObject(entry.getKey()); + builder.value(entry.getValue()); + builder.endObject(); + } + builder.endObject(); + + builder.startObject(FOLLOWED_LEADER_INDICES_FIELD.getPreferredName()); + for (Map.Entry> entry : followedLeaderIndexUUIDs.entrySet()) { + builder.field(entry.getKey(), entry.getValue()); + } + builder.endObject(); + return builder; + } + + @Override + public boolean isFragment() { + return true; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AutoFollowMetadata that = (AutoFollowMetadata) o; + return Objects.equals(patterns, that.patterns); + } + + @Override + public int hashCode() { + return Objects.hash(patterns); + } + + public static class AutoFollowPattern implements Writeable, ToXContentObject { + + private static final ParseField LEADER_PATTERNS_FIELD = new ParseField("leader_patterns"); + private static final ParseField FOLLOW_PATTERN_FIELD = new ParseField("follow_pattern"); + public static final ParseField MAX_BATCH_OPERATION_COUNT = new ParseField("max_batch_operation_count"); + public static final ParseField MAX_CONCURRENT_READ_BATCHES = new ParseField("max_concurrent_read_batches"); + public static final ParseField MAX_BATCH_SIZE_IN_BYTES = new ParseField("max_batch_size_in_bytes"); + public static final ParseField MAX_CONCURRENT_WRITE_BATCHES = new ParseField("max_concurrent_write_batches"); + public static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); + public static final ParseField RETRY_TIMEOUT = new ParseField("retry_timeout"); + public static final ParseField IDLE_SHARD_RETRY_DELAY = new ParseField("idle_shard_retry_delay"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("auto_follow_pattern", + args -> new AutoFollowPattern((List) args[0], (String) args[1], (Integer) args[2], (Integer) args[3], + (Long) args[4], (Integer) args[5], (Integer) args[6], (TimeValue) args[7], (TimeValue) args[8])); + + static { + PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), LEADER_PATTERNS_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FOLLOW_PATTERN_FIELD); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_BATCH_OPERATION_COUNT); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_CONCURRENT_READ_BATCHES); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), MAX_BATCH_SIZE_IN_BYTES); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_CONCURRENT_WRITE_BATCHES); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_WRITE_BUFFER_SIZE); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), RETRY_TIMEOUT.getPreferredName()), + RETRY_TIMEOUT, ObjectParser.ValueType.STRING); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), IDLE_SHARD_RETRY_DELAY.getPreferredName()), + IDLE_SHARD_RETRY_DELAY, ObjectParser.ValueType.STRING); + } + + private final List leaderIndexPatterns; + private final String followIndexPattern; + private final Integer maxBatchOperationCount; + private final Integer maxConcurrentReadBatches; + private final Long maxOperationSizeInBytes; + private final Integer maxConcurrentWriteBatches; + private final Integer maxWriteBufferSize; + private final TimeValue retryTimeout; + private final TimeValue idleShardRetryDelay; + + public AutoFollowPattern(List leaderIndexPatterns, String followIndexPattern, Integer maxBatchOperationCount, + Integer maxConcurrentReadBatches, Long maxOperationSizeInBytes, Integer maxConcurrentWriteBatches, + Integer maxWriteBufferSize, TimeValue retryTimeout, TimeValue idleShardRetryDelay) { + this.leaderIndexPatterns = leaderIndexPatterns; + this.followIndexPattern = followIndexPattern; + this.maxBatchOperationCount = maxBatchOperationCount; + this.maxConcurrentReadBatches = maxConcurrentReadBatches; + this.maxOperationSizeInBytes = maxOperationSizeInBytes; + this.maxConcurrentWriteBatches = maxConcurrentWriteBatches; + this.maxWriteBufferSize = maxWriteBufferSize; + this.retryTimeout = retryTimeout; + this.idleShardRetryDelay = idleShardRetryDelay; + } + + AutoFollowPattern(StreamInput in) throws IOException { + leaderIndexPatterns = in.readList(StreamInput::readString); + followIndexPattern = in.readOptionalString(); + maxBatchOperationCount = in.readOptionalVInt(); + maxConcurrentReadBatches = in.readOptionalVInt(); + maxOperationSizeInBytes = in.readOptionalLong(); + maxConcurrentWriteBatches = in.readOptionalVInt(); + maxWriteBufferSize = in.readOptionalVInt(); + retryTimeout = in.readOptionalTimeValue(); + idleShardRetryDelay = in.readOptionalTimeValue(); + } + + public boolean match(String indexName) { + return match(leaderIndexPatterns, indexName); + } + + public static boolean match(List leaderIndexPatterns, String indexName) { + return Regex.simpleMatch(leaderIndexPatterns, indexName); + } + + public List getLeaderIndexPatterns() { + return leaderIndexPatterns; + } + + public String getFollowIndexPattern() { + return followIndexPattern; + } + + public Integer getMaxBatchOperationCount() { + return maxBatchOperationCount; + } + + public Integer getMaxConcurrentReadBatches() { + return maxConcurrentReadBatches; + } + + public Long getMaxOperationSizeInBytes() { + return maxOperationSizeInBytes; + } + + public Integer getMaxConcurrentWriteBatches() { + return maxConcurrentWriteBatches; + } + + public Integer getMaxWriteBufferSize() { + return maxWriteBufferSize; + } + + public TimeValue getRetryTimeout() { + return retryTimeout; + } + + public TimeValue getIdleShardRetryDelay() { + return idleShardRetryDelay; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeStringList(leaderIndexPatterns); + out.writeOptionalString(followIndexPattern); + out.writeOptionalVInt(maxBatchOperationCount); + out.writeOptionalVInt(maxConcurrentReadBatches); + out.writeOptionalLong(maxOperationSizeInBytes); + out.writeOptionalVInt(maxConcurrentWriteBatches); + out.writeOptionalVInt(maxWriteBufferSize); + out.writeOptionalTimeValue(retryTimeout); + out.writeOptionalTimeValue(idleShardRetryDelay); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.array(LEADER_PATTERNS_FIELD.getPreferredName(), leaderIndexPatterns.toArray(new String[0])); + if (followIndexPattern != null) { + builder.field(FOLLOW_PATTERN_FIELD.getPreferredName(), followIndexPattern); + } + if (maxBatchOperationCount != null) { + builder.field(MAX_BATCH_OPERATION_COUNT.getPreferredName(), maxBatchOperationCount); + } + if (maxConcurrentReadBatches != null) { + builder.field(MAX_CONCURRENT_READ_BATCHES.getPreferredName(), maxConcurrentReadBatches); + } + if (maxOperationSizeInBytes != null) { + builder.field(MAX_BATCH_SIZE_IN_BYTES.getPreferredName(), maxOperationSizeInBytes); + } + if (maxConcurrentWriteBatches != null) { + builder.field(MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); + } + if (maxWriteBufferSize != null){ + builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); + } + if (retryTimeout != null) { + builder.field(RETRY_TIMEOUT.getPreferredName(), retryTimeout); + } + if (idleShardRetryDelay != null) { + builder.field(IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay); + } + return builder; + } + + @Override + public boolean isFragment() { + return true; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AutoFollowPattern that = (AutoFollowPattern) o; + return Objects.equals(leaderIndexPatterns, that.leaderIndexPatterns) && + Objects.equals(followIndexPattern, that.followIndexPattern) && + Objects.equals(maxBatchOperationCount, that.maxBatchOperationCount) && + Objects.equals(maxConcurrentReadBatches, that.maxConcurrentReadBatches) && + Objects.equals(maxOperationSizeInBytes, that.maxOperationSizeInBytes) && + Objects.equals(maxConcurrentWriteBatches, that.maxConcurrentWriteBatches) && + Objects.equals(maxWriteBufferSize, that.maxWriteBufferSize) && + Objects.equals(retryTimeout, that.retryTimeout) && + Objects.equals(idleShardRetryDelay, that.idleShardRetryDelay); + } + + @Override + public int hashCode() { + return Objects.hash( + leaderIndexPatterns, + followIndexPattern, + maxBatchOperationCount, + maxConcurrentReadBatches, + maxOperationSizeInBytes, + maxConcurrentWriteBatches, + maxWriteBufferSize, + retryTimeout, + idleShardRetryDelay + ); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java index 5503eb69255..e4fd8d04351 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.graph.action; import org.elasticsearch.action.Action; +import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; public class GraphExploreAction extends Action { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequestBuilder.java index d5e756f78a2..37456f23464 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequestBuilder.java @@ -11,6 +11,9 @@ import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; +import org.elasticsearch.protocol.xpack.graph.Hop; import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java new file mode 100644 index 00000000000..ee0c0de97e0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java @@ -0,0 +1,385 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexing; + +import org.apache.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; + +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicReference; + +/** + * An abstract class that builds an index incrementally. A background job can be launched using {@link #maybeTriggerAsyncJob(long)}, + * it will create the index from the source index up to the last complete bucket that is allowed to be built (based on job position). + * Only one background job can run simultaneously and {@link #onFinish()} is called when the job + * finishes. {@link #onFailure(Exception)} is called if the job fails with an exception and {@link #onAbort()} is called if the indexer is + * aborted while a job is running. The indexer must be started ({@link #start()} to allow a background job to run when + * {@link #maybeTriggerAsyncJob(long)} is called. {@link #stop()} can be used to stop the background job without aborting the indexer. + * + * In a nutshell this is a 2 cycle engine: 1st it sends a query, 2nd it indexes documents based on the response, sends the next query, + * indexes, queries, indexes, ... until a condition lets the engine pause until the source provides new input. + * + * @param Type that defines a job position to be defined by the implementation. + */ +public abstract class AsyncTwoPhaseIndexer { + private static final Logger logger = Logger.getLogger(AsyncTwoPhaseIndexer.class.getName()); + + private final JobStats stats; + + private final AtomicReference state; + private final AtomicReference position; + private final Executor executor; + + protected AsyncTwoPhaseIndexer(Executor executor, AtomicReference initialState, + JobPosition initialPosition, JobStats jobStats) { + this.executor = executor; + this.state = initialState; + this.position = new AtomicReference<>(initialPosition); + this.stats = jobStats; + } + + /** + * Get the current state of the indexer. + */ + public IndexerState getState() { + return state.get(); + } + + /** + * Get the current position of the indexer. + */ + public JobPosition getPosition() { + return position.get(); + } + + /** + * Get the stats of this indexer. + */ + public JobStats getStats() { + return stats; + } + + /** + * Sets the internal state to {@link IndexerState#STARTED} if the previous state + * was {@link IndexerState#STOPPED}. Setting the state to STARTED allows a job + * to run in the background when {@link #maybeTriggerAsyncJob(long)} is called. + * + * @return The new state for the indexer (STARTED, INDEXING or ABORTING if the + * job was already aborted). + */ + public synchronized IndexerState start() { + state.compareAndSet(IndexerState.STOPPED, IndexerState.STARTED); + return state.get(); + } + + /** + * Sets the internal state to {@link IndexerState#STOPPING} if an async job is + * running in the background and in such case {@link #onFinish()} will be called + * as soon as the background job detects that the indexer is stopped. If there + * is no job running when this function is called, the state is directly set to + * {@link IndexerState#STOPPED} and {@link #onFinish()} will never be called. + * + * @return The new state for the indexer (STOPPED, STOPPING or ABORTING if the + * job was already aborted). + */ + public synchronized IndexerState stop() { + IndexerState currentState = state.updateAndGet(previousState -> { + if (previousState == IndexerState.INDEXING) { + return IndexerState.STOPPING; + } else if (previousState == IndexerState.STARTED) { + return IndexerState.STOPPED; + } else { + return previousState; + } + }); + return currentState; + } + + /** + * Sets the internal state to {@link IndexerState#ABORTING}. It returns false if + * an async job is running in the background and in such case {@link #onAbort} + * will be called as soon as the background job detects that the indexer is + * aborted. If there is no job running when this function is called, it returns + * true and {@link #onAbort()} will never be called. + * + * @return true if the indexer is aborted, false if a background job is running + * and abort is delayed. + */ + public synchronized boolean abort() { + IndexerState prevState = state.getAndUpdate((prev) -> IndexerState.ABORTING); + return prevState == IndexerState.STOPPED || prevState == IndexerState.STARTED; + } + + /** + * Triggers a background job that builds the index asynchronously iff + * there is no other job that runs and the indexer is started + * ({@link IndexerState#STARTED}. + * + * @param now + * The current time in milliseconds (used to limit the job to + * complete buckets) + * @return true if a job has been triggered, false otherwise + */ + public synchronized boolean maybeTriggerAsyncJob(long now) { + final IndexerState currentState = state.get(); + switch (currentState) { + case INDEXING: + case STOPPING: + case ABORTING: + logger.warn("Schedule was triggered for job [" + getJobId() + "], but prior indexer is still running."); + return false; + + case STOPPED: + logger.debug("Schedule was triggered for job [" + getJobId() + "] but job is stopped. Ignoring trigger."); + return false; + + case STARTED: + logger.debug("Schedule was triggered for job [" + getJobId() + "], state: [" + currentState + "]"); + stats.incrementNumInvocations(1); + onStartJob(now); + + if (state.compareAndSet(IndexerState.STARTED, IndexerState.INDEXING)) { + // fire off the search. Note this is async, the method will return from here + executor.execute(() -> doNextSearch(buildSearchRequest(), + ActionListener.wrap(this::onSearchResponse, exc -> finishWithFailure(exc)))); + logger.debug("Beginning to index [" + getJobId() + "], state: [" + currentState + "]"); + return true; + } else { + logger.debug("Could not move from STARTED to INDEXING state because current state is [" + state.get() + "]"); + return false; + } + + default: + logger.warn("Encountered unexpected state [" + currentState + "] while indexing"); + throw new IllegalStateException("Job encountered an illegal state [" + currentState + "]"); + } + } + + /** + * Called to get the Id of the job, used for logging. + * + * @return a string with the id of the job + */ + protected abstract String getJobId(); + + /** + * Called to process a response from the 1 search request in order to turn it into a {@link IterationResult}. + * + * @param searchResponse response from the search phase. + * @return Iteration object to be passed to indexing phase. + */ + protected abstract IterationResult doProcess(SearchResponse searchResponse); + + /** + * Called to build the next search request. + * + * @return SearchRequest to be passed to the search phase. + */ + protected abstract SearchRequest buildSearchRequest(); + + /** + * Called at startup after job has been triggered using {@link #maybeTriggerAsyncJob(long)} and the + * internal state is {@link IndexerState#STARTED}. + * + * @param now The current time in milliseconds passed through from {@link #maybeTriggerAsyncJob(long)} + */ + protected abstract void onStartJob(long now); + + /** + * Executes the {@link SearchRequest} and calls nextPhase with the + * response or the exception if an error occurs. + * + * @param request + * The search request to execute + * @param nextPhase + * Listener for the next phase + */ + protected abstract void doNextSearch(SearchRequest request, ActionListener nextPhase); + + /** + * Executes the {@link BulkRequest} and calls nextPhase with the + * response or the exception if an error occurs. + * + * @param request + * The bulk request to execute + * @param nextPhase + * Listener for the next phase + */ + protected abstract void doNextBulk(BulkRequest request, ActionListener nextPhase); + + /** + * Called periodically during the execution of a background job. Implementation + * should persists the state somewhere and continue the execution asynchronously + * using next. + * + * @param state + * The current state of the indexer + * @param position + * The current position of the indexer + * @param next + * Runnable for the next phase + */ + protected abstract void doSaveState(IndexerState state, JobPosition position, Runnable next); + + /** + * Called when a failure occurs in an async job causing the execution to stop. + * + * @param exc + * The exception + */ + protected abstract void onFailure(Exception exc); + + /** + * Called when a background job finishes. + */ + protected abstract void onFinish(); + + /** + * Called when a background job detects that the indexer is aborted causing the + * async execution to stop. + */ + protected abstract void onAbort(); + + private void finishWithFailure(Exception exc) { + doSaveState(finishAndSetState(), position.get(), () -> onFailure(exc)); + } + + private IndexerState finishAndSetState() { + return state.updateAndGet(prev -> { + switch (prev) { + case INDEXING: + // ready for another job + return IndexerState.STARTED; + + case STOPPING: + // must be started again + return IndexerState.STOPPED; + + case ABORTING: + // abort and exit + onAbort(); + return IndexerState.ABORTING; // This shouldn't matter, since onAbort() will kill the task first + + case STOPPED: + // No-op. Shouldn't really be possible to get here (should have to go through + // STOPPING + // first which will be handled) but is harmless to no-op and we don't want to + // throw exception here + return IndexerState.STOPPED; + + default: + // any other state is unanticipated at this point + throw new IllegalStateException("Indexer job encountered an illegal state [" + prev + "]"); + } + }); + } + + private void onSearchResponse(SearchResponse searchResponse) { + try { + if (checkState(getState()) == false) { + return; + } + if (searchResponse.getShardFailures().length != 0) { + throw new RuntimeException("Shard failures encountered while running indexer for job [" + getJobId() + "]: " + + Arrays.toString(searchResponse.getShardFailures())); + } + + stats.incrementNumPages(1); + IterationResult iterationResult = doProcess(searchResponse); + + if (iterationResult.isDone()) { + logger.debug("Finished indexing for job [" + getJobId() + "], saving state and shutting down."); + + // Change state first, then try to persist. This prevents in-progress + // STOPPING/ABORTING from + // being persisted as STARTED but then stop the job + doSaveState(finishAndSetState(), position.get(), this::onFinish); + return; + } + + final List docs = iterationResult.getToIndex(); + final BulkRequest bulkRequest = new BulkRequest(); + docs.forEach(bulkRequest::add); + + // TODO this might be a valid case, e.g. if implementation filters + assert bulkRequest.requests().size() > 0; + + doNextBulk(bulkRequest, ActionListener.wrap(bulkResponse -> { + // TODO we should check items in the response and move after accordingly to + // resume the failing buckets ? + if (bulkResponse.hasFailures()) { + logger.warn("Error while attempting to bulk index documents: " + bulkResponse.buildFailureMessage()); + } + stats.incrementNumOutputDocuments(bulkResponse.getItems().length); + if (checkState(getState()) == false) { + return; + } + + JobPosition newPosition = iterationResult.getPosition(); + position.set(newPosition); + + onBulkResponse(bulkResponse, newPosition); + }, exc -> finishWithFailure(exc))); + } catch (Exception e) { + finishWithFailure(e); + } + } + + private void onBulkResponse(BulkResponse response, JobPosition position) { + try { + + ActionListener listener = ActionListener.wrap(this::onSearchResponse, this::finishWithFailure); + // TODO probably something more intelligent than every-50 is needed + if (stats.getNumPages() > 0 && stats.getNumPages() % 50 == 0) { + doSaveState(IndexerState.INDEXING, position, () -> doNextSearch(buildSearchRequest(), listener)); + } else { + doNextSearch(buildSearchRequest(), listener); + } + } catch (Exception e) { + finishWithFailure(e); + } + } + + /** + * Checks the {@link IndexerState} and returns false if the execution should be + * stopped. + */ + private boolean checkState(IndexerState currentState) { + switch (currentState) { + case INDEXING: + // normal state; + return true; + + case STOPPING: + logger.info("Indexer job encountered [" + IndexerState.STOPPING + "] state, halting indexer."); + doSaveState(finishAndSetState(), getPosition(), () -> { + }); + return false; + + case STOPPED: + return false; + + case ABORTING: + logger.info("Requested shutdown of indexer for job [" + getJobId() + "]"); + onAbort(); + return false; + + default: + // Anything other than indexing, aborting or stopping is unanticipated + logger.warn("Encountered unexpected state [" + currentState + "] while indexing"); + throw new IllegalStateException("Indexer job encountered an illegal state [" + currentState + "]"); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java new file mode 100644 index 00000000000..2453504a5ba --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexing; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; + +import java.io.IOException; +import java.util.Objects; + +/** + * This class holds the runtime statistics of a job. The stats are not used by any internal process + * and are only for external monitoring/reference. Statistics are not persisted with the job, so if the + * allocated task is shutdown/restarted on a different node all the stats will reset. + */ +public abstract class IndexerJobStats implements ToXContentObject, Writeable { + + public static final ParseField NAME = new ParseField("job_stats"); + + protected long numPages = 0; + protected long numInputDocuments = 0; + protected long numOuputDocuments = 0; + protected long numInvocations = 0; + + public IndexerJobStats() { + } + + public IndexerJobStats(long numPages, long numInputDocuments, long numOuputDocuments, long numInvocations) { + this.numPages = numPages; + this.numInputDocuments = numInputDocuments; + this.numOuputDocuments = numOuputDocuments; + this.numInvocations = numInvocations; + } + + public IndexerJobStats(StreamInput in) throws IOException { + this.numPages = in.readVLong(); + this.numInputDocuments = in.readVLong(); + this.numOuputDocuments = in.readVLong(); + this.numInvocations = in.readVLong(); + } + + public long getNumPages() { + return numPages; + } + + public long getNumDocuments() { + return numInputDocuments; + } + + public long getNumInvocations() { + return numInvocations; + } + + public long getOutputDocuments() { + return numOuputDocuments; + } + + public void incrementNumPages(long n) { + assert(n >= 0); + numPages += n; + } + + public void incrementNumDocuments(long n) { + assert(n >= 0); + numInputDocuments += n; + } + + public void incrementNumInvocations(long n) { + assert(n >= 0); + numInvocations += n; + } + + public void incrementNumOutputDocuments(long n) { + assert(n >= 0); + numOuputDocuments += n; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(numPages); + out.writeVLong(numInputDocuments); + out.writeVLong(numOuputDocuments); + out.writeVLong(numInvocations); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + IndexerJobStats that = (IndexerJobStats) other; + + return Objects.equals(this.numPages, that.numPages) + && Objects.equals(this.numInputDocuments, that.numInputDocuments) + && Objects.equals(this.numOuputDocuments, that.numOuputDocuments) + && Objects.equals(this.numInvocations, that.numInvocations); + } + + @Override + public int hashCode() { + return Objects.hash(numPages, numInputDocuments, numOuputDocuments, numInvocations); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/IndexerState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerState.java similarity index 97% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/IndexerState.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerState.java index 6e211c1df9e..1b6b9a943cb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/IndexerState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerState.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.rollup.job; +package org.elasticsearch.xpack.core.indexing; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IterationResult.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IterationResult.java new file mode 100644 index 00000000000..1261daf185b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IterationResult.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexing; + +import org.elasticsearch.action.index.IndexRequest; + +import java.util.List; + +/** + * Result object to hold the result of 1 iteration of iterative indexing. + * Acts as an interface between the implementation and the generic indexer. + */ +public class IterationResult { + + private final boolean isDone; + private final JobPosition position; + private final List toIndex; + + /** + * Constructor for the result of 1 iteration. + * + * @param toIndex the list of requests to be indexed + * @param position the extracted, persistable position of the job required for the search phase + * @param isDone true if source is exhausted and job should go to sleep + * + * Note: toIndex.empty() != isDone due to possible filtering in the specific implementation + */ + public IterationResult(List toIndex, JobPosition position, boolean isDone) { + this.toIndex = toIndex; + this.position = position; + this.isDone = isDone; + } + + /** + * Returns true if this indexing iteration is done and job should go into sleep mode. + */ + public boolean isDone() { + return isDone; + } + + /** + * Return the position of the job, a generic to be passed to the next query construction. + * + * @return the position + */ + public JobPosition getPosition() { + return position; + } + + /** + * List of requests to be passed to bulk indexing. + * + * @return List of index requests. + */ + public List getToIndex() { + return toIndex; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java index e0b71abe966..193695ac693 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java @@ -115,7 +115,7 @@ public class MlMetadata implements XPackPlugin.XPackMetaDataCustom { @Override public Version getMinimalSupportedVersion() { - return Version.V_5_4_0; + return Version.V_6_0_0_alpha1; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java index fb3ac55cda0..73cdbeef442 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -72,18 +71,14 @@ public class DeleteDatafeedAction extends Action { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); datafeedId = in.readString(); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - force = in.readBoolean(); - } + force = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(datafeedId); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeBoolean(force); - } + out.writeBoolean(force); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteForecastAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteForecastAction.java new file mode 100644 index 00000000000..b9f981ae980 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteForecastAction.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.results.ForecastRequestStats; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; + +public class DeleteForecastAction extends Action { + + public static final DeleteForecastAction INSTANCE = new DeleteForecastAction(); + public static final String NAME = "cluster:admin/xpack/ml/job/forecast/delete"; + + private DeleteForecastAction() { + super(NAME); + } + + @Override + public AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + public static class Request extends AcknowledgedRequest { + + private String jobId; + private String forecastId; + private boolean allowNoForecasts = true; + + public Request() { + } + + public Request(String jobId, String forecastId) { + this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); + this.forecastId = ExceptionsHelper.requireNonNull(forecastId, ForecastRequestStats.FORECAST_ID.getPreferredName()); + } + + public String getJobId() { + return jobId; + } + + public String getForecastId() { + return forecastId; + } + + public boolean isAllowNoForecasts() { + return allowNoForecasts; + } + + public void setAllowNoForecasts(boolean allowNoForecasts) { + this.allowNoForecasts = allowNoForecasts; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + jobId = in.readString(); + forecastId = in.readString(); + allowNoForecasts = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(jobId); + out.writeString(forecastId); + out.writeBoolean(allowNoForecasts); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + public RequestBuilder(ElasticsearchClient client, DeleteForecastAction action) { + super(client, action, new Request()); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java index 933e98b80ff..56b7ec2b52f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -79,18 +78,14 @@ public class DeleteJobAction extends Action { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); jobId = in.readString(); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - force = in.readBoolean(); - } + force = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(jobId); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeBoolean(force); - } + out.writeBoolean(force); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureAction.java new file mode 100644 index 00000000000..9fda416b33b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureAction.java @@ -0,0 +1,183 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.StatusToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class FindFileStructureAction extends Action { + + public static final FindFileStructureAction INSTANCE = new FindFileStructureAction(); + public static final String NAME = "cluster:monitor/xpack/ml/findfilestructure"; + + private FindFileStructureAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + static class RequestBuilder extends ActionRequestBuilder { + + RequestBuilder(ElasticsearchClient client, FindFileStructureAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends ActionResponse implements StatusToXContentObject, Writeable { + + private FileStructure fileStructure; + + public Response(FileStructure fileStructure) { + this.fileStructure = fileStructure; + } + + Response() { + } + + public FileStructure getFileStructure() { + return fileStructure; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + fileStructure = new FileStructure(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + fileStructure.writeTo(out); + } + + @Override + public RestStatus status() { + return RestStatus.OK; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + fileStructure.toXContent(builder, params); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(fileStructure); + } + + @Override + public boolean equals(Object other) { + + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + FindFileStructureAction.Response that = (FindFileStructureAction.Response) other; + return Objects.equals(fileStructure, that.fileStructure); + } + } + + public static class Request extends ActionRequest { + + public static final ParseField LINES_TO_SAMPLE = new ParseField("lines_to_sample"); + + private Integer linesToSample; + private BytesReference sample; + + public Request() { + } + + public Integer getLinesToSample() { + return linesToSample; + } + + public void setLinesToSample(Integer linesToSample) { + this.linesToSample = linesToSample; + } + + public BytesReference getSample() { + return sample; + } + + public void setSample(BytesReference sample) { + this.sample = sample; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (linesToSample != null && linesToSample <= 0) { + validationException = + addValidationError(LINES_TO_SAMPLE.getPreferredName() + " must be positive if specified", validationException); + } + if (sample == null || sample.length() == 0) { + validationException = addValidationError("sample must be specified", validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + linesToSample = in.readOptionalVInt(); + sample = in.readBytesReference(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalVInt(linesToSample); + out.writeBytesReference(sample); + } + + @Override + public int hashCode() { + return Objects.hash(linesToSample, sample); + } + + @Override + public boolean equals(Object other) { + + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + Request that = (Request) other; + return Objects.equals(this.linesToSample, that.linesToSample) && + Objects.equals(this.sample, that.sample); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java index ef086b51262..4b96a4d6b27 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.tasks.BaseTasksResponse; @@ -127,9 +126,7 @@ public class FlushJobAction extends Action { start = in.readOptionalString(); end = in.readOptionalString(); advanceTime = in.readOptionalString(); - if (in.getVersion().after(Version.V_5_5_0)) { - skipTime = in.readOptionalString(); - } + skipTime = in.readOptionalString(); } @Override @@ -139,9 +136,7 @@ public class FlushJobAction extends Action { out.writeOptionalString(start); out.writeOptionalString(end); out.writeOptionalString(advanceTime); - if (out.getVersion().after(Version.V_5_5_0)) { - out.writeOptionalString(skipTime); - } + out.writeOptionalString(skipTime); } @Override @@ -222,18 +217,14 @@ public class FlushJobAction extends Action { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); flushed = in.readBoolean(); - if (in.getVersion().after(Version.V_5_5_0)) { - lastFinalizedBucketEnd = new Date(in.readVLong()); - } + lastFinalizedBucketEnd = new Date(in.readVLong()); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeBoolean(flushed); - if (out.getVersion().after(Version.V_5_5_0)) { - out.writeVLong(lastFinalizedBucketEnd.getTime()); - } + out.writeVLong(lastFinalizedBucketEnd.getTime()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java index 29b3d4bb8d5..c6c87ef0e46 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; @@ -162,7 +161,7 @@ public class GetBucketsAction extends Action { public void setPageParams(PageParams pageParams) { if (timestamp != null) { - throw new IllegalArgumentException("Param [" + PageParams.FROM.getPreferredName() + throw new IllegalArgumentException("Param [" + PageParams.FROM.getPreferredName() + ", " + PageParams.SIZE.getPreferredName() + "] is incompatible with [" + TIMESTAMP.getPreferredName() + "]."); } this.pageParams = ExceptionsHelper.requireNonNull(pageParams, PageParams.PAGE.getPreferredName()); @@ -212,10 +211,8 @@ public class GetBucketsAction extends Action { end = in.readOptionalString(); anomalyScore = in.readOptionalDouble(); pageParams = in.readOptionalWriteable(PageParams::new); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - sort = in.readString(); - descending = in.readBoolean(); - } + sort = in.readString(); + descending = in.readBoolean(); } @Override @@ -229,10 +226,8 @@ public class GetBucketsAction extends Action { out.writeOptionalString(end); out.writeOptionalDouble(anomalyScore); out.writeOptionalWriteable(pageParams); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeString(sort); - out.writeBoolean(descending); - } + out.writeString(sort); + out.writeBoolean(descending); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java index 807c0936375..d2d5d09090e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; @@ -95,7 +94,7 @@ public class GetJobsStatsAction extends Action { @Override public boolean match(Task task) { - return jobId.equals(MetaData.ALL) || OpenJobAction.JobTaskMatcher.match(task, jobId); + return OpenJobAction.JobTaskMatcher.match(task, jobId); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetRecordsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetRecordsAction.java index cd76c54f452..797603fad22 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetRecordsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetRecordsAction.java @@ -23,7 +23,6 @@ import org.elasticsearch.xpack.core.ml.action.util.PageParams; import org.elasticsearch.xpack.core.ml.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; -import org.elasticsearch.xpack.core.ml.job.results.Influencer; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; @@ -79,7 +78,7 @@ public class GetRecordsAction extends Action { private boolean excludeInterim = false; private PageParams pageParams = new PageParams(); private double recordScoreFilter = 0.0; - private String sort = Influencer.INFLUENCER_SCORE.getPreferredName(); + private String sort = RECORD_SCORE_FILTER.getPreferredName(); private boolean descending = true; public Request() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java index c108a983aa1..bbc39c7d731 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -168,10 +169,6 @@ public class OpenJobAction extends Action { public JobParams(StreamInput in) throws IOException { jobId = in.readString(); - if (in.getVersion().onOrBefore(Version.V_5_5_0)) { - // Read `ignoreDowntime` - in.readBoolean(); - } timeout = TimeValue.timeValueMillis(in.readVLong()); } @@ -199,10 +196,6 @@ public class OpenJobAction extends Action { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); - if (out.getVersion().onOrBefore(Version.V_5_5_0)) { - // Write `ignoreDowntime` - true by default - out.writeBoolean(true); - } out.writeVLong(timeout.millis()); } @@ -247,8 +240,14 @@ public class OpenJobAction extends Action { public interface JobTaskMatcher { static boolean match(Task task, String expectedJobId) { - String expectedDescription = "job-" + expectedJobId; - return task instanceof JobTaskMatcher && expectedDescription.equals(task.getDescription()); + if (task instanceof JobTaskMatcher) { + if (MetaData.ALL.equals(expectedJobId)) { + return true; + } + String expectedDescription = "job-" + expectedJobId; + return expectedDescription.equals(task.getDescription()); + } + return false; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java index c802f0bccca..55b9312f70b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java @@ -73,14 +73,13 @@ public class StopDatafeedAction extends Action { } private String datafeedId; - private String[] resolvedStartedDatafeedIds; + private String[] resolvedStartedDatafeedIds = new String[] {}; private TimeValue stopTimeout = DEFAULT_TIMEOUT; private boolean force = false; private boolean allowNoDatafeeds = true; public Request(String datafeedId) { this.datafeedId = ExceptionsHelper.requireNonNull(datafeedId, DatafeedConfig.ID.getPreferredName()); - this.resolvedStartedDatafeedIds = new String[] { datafeedId }; } public Request() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java index 1034b00af0a..03b58732a37 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java @@ -22,7 +22,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils; @@ -189,10 +189,6 @@ public class DatafeedConfig extends AbstractDiffable implements this.scriptFields = null; } this.scrollSize = in.readOptionalVInt(); - if (in.getVersion().before(Version.V_5_5_0)) { - // read former _source field - in.readBoolean(); - } this.chunkingConfig = in.readOptionalWriteable(ChunkingConfig::new); if (in.getVersion().onOrAfter(Version.V_6_2_0)) { this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); @@ -290,10 +286,6 @@ public class DatafeedConfig extends AbstractDiffable implements out.writeBoolean(false); } out.writeOptionalVInt(scrollSize); - if (out.getVersion().before(Version.V_5_5_0)) { - // write former _source field - out.writeBoolean(false); - } out.writeOptionalWriteable(chunkingConfig); if (out.getVersion().onOrAfter(Version.V_6_2_0)) { out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java index d894f7b339f..70102f27a56 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.datafeed; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -49,14 +48,6 @@ public enum DatafeedState implements PersistentTaskState { @Override public void writeTo(StreamOutput out) throws IOException { DatafeedState state = this; - // STARTING & STOPPING states were introduced in v5.5. - if (out.getVersion().before(Version.V_5_5_0)) { - if (this == STARTING) { - state = STOPPED; - } else if (this == STOPPING) { - state = STARTED; - } - } out.writeEnum(state); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java index f3748cefc51..d5425bdd1f4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.datafeed; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -122,10 +121,6 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { this.scriptFields = null; } this.scrollSize = in.readOptionalVInt(); - if (in.getVersion().before(Version.V_5_5_0)) { - // TODO for former _source param - remove in v7.0.0 - in.readOptionalBoolean(); - } this.chunkingConfig = in.readOptionalWriteable(ChunkingConfig::new); } @@ -163,10 +158,6 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { out.writeBoolean(false); } out.writeOptionalVInt(scrollSize); - if (out.getVersion().before(Version.V_5_5_0)) { - // TODO for former _source param - remove in v7.0.0 - out.writeOptionalBoolean(null); - } out.writeOptionalWriteable(chunkingConfig); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FieldStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FieldStats.java new file mode 100644 index 00000000000..8f624d000cc --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FieldStats.java @@ -0,0 +1,171 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.filestructurefinder; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class FieldStats implements ToXContentObject, Writeable { + + static final ParseField COUNT = new ParseField("count"); + static final ParseField CARDINALITY = new ParseField("cardinality"); + static final ParseField MIN_VALUE = new ParseField("min_value"); + static final ParseField MAX_VALUE = new ParseField("max_value"); + static final ParseField MEAN_VALUE = new ParseField("mean_value"); + static final ParseField MEDIAN_VALUE = new ParseField("median_value"); + static final ParseField TOP_HITS = new ParseField("top_hits"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("field_stats", false, + a -> new FieldStats((long) a[0], (int) a[1], (Double) a[2], (Double) a[3], (Double) a[4], (Double) a[5], + (List>) a[6])); + + static { + PARSER.declareLong(ConstructingObjectParser.constructorArg(), COUNT); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), CARDINALITY); + PARSER.declareDouble(ConstructingObjectParser.optionalConstructorArg(), MIN_VALUE); + PARSER.declareDouble(ConstructingObjectParser.optionalConstructorArg(), MAX_VALUE); + PARSER.declareDouble(ConstructingObjectParser.optionalConstructorArg(), MEAN_VALUE); + PARSER.declareDouble(ConstructingObjectParser.optionalConstructorArg(), MEDIAN_VALUE); + PARSER.declareObjectArray(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.mapOrdered(), TOP_HITS); + } + + private final long count; + private final int cardinality; + private final Double minValue; + private final Double maxValue; + private final Double meanValue; + private final Double medianValue; + private final List> topHits; + + public FieldStats(long count, int cardinality, List> topHits) { + this(count, cardinality, null, null, null, null, topHits); + } + + public FieldStats(long count, int cardinality, Double minValue, Double maxValue, Double meanValue, Double medianValue, + List> topHits) { + this.count = count; + this.cardinality = cardinality; + this.minValue = minValue; + this.maxValue = maxValue; + this.meanValue = meanValue; + this.medianValue = medianValue; + this.topHits = (topHits == null) ? Collections.emptyList() : Collections.unmodifiableList(topHits); + } + + public FieldStats(StreamInput in) throws IOException { + count = in.readVLong(); + cardinality = in.readVInt(); + minValue = in.readOptionalDouble(); + maxValue = in.readOptionalDouble(); + meanValue = in.readOptionalDouble(); + medianValue = in.readOptionalDouble(); + topHits = in.readList(StreamInput::readMap); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(count); + out.writeVInt(cardinality); + out.writeOptionalDouble(minValue); + out.writeOptionalDouble(maxValue); + out.writeOptionalDouble(meanValue); + out.writeOptionalDouble(medianValue); + out.writeCollection(topHits, StreamOutput::writeMap); + } + + public long getCount() { + return count; + } + + public int getCardinality() { + return cardinality; + } + + public Double getMinValue() { + return minValue; + } + + public Double getMaxValue() { + return maxValue; + } + + public Double getMeanValue() { + return meanValue; + } + + public Double getMedianValue() { + return medianValue; + } + + public List> getTopHits() { + return topHits; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + + builder.startObject(); + builder.field(COUNT.getPreferredName(), count); + builder.field(CARDINALITY.getPreferredName(), cardinality); + if (minValue != null) { + builder.field(MIN_VALUE.getPreferredName(), minValue); + } + if (maxValue != null) { + builder.field(MAX_VALUE.getPreferredName(), maxValue); + } + if (meanValue != null) { + builder.field(MEAN_VALUE.getPreferredName(), meanValue); + } + if (medianValue != null) { + builder.field(MEDIAN_VALUE.getPreferredName(), medianValue); + } + if (topHits.isEmpty() == false) { + builder.field(TOP_HITS.getPreferredName(), topHits); + } + builder.endObject(); + + return builder; + } + + @Override + public int hashCode() { + + return Objects.hash(count, cardinality, minValue, maxValue, meanValue, medianValue, topHits); + } + + @Override + public boolean equals(Object other) { + + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + FieldStats that = (FieldStats) other; + return this.count == that.count && + this.cardinality == that.cardinality && + Objects.equals(this.minValue, that.minValue) && + Objects.equals(this.maxValue, that.maxValue) && + Objects.equals(this.meanValue, that.meanValue) && + Objects.equals(this.medianValue, that.medianValue) && + Objects.equals(this.topHits, that.topHits); + } +} diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructure.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructure.java similarity index 74% rename from x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructure.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructure.java index 64a00d20899..5484f9f9902 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructure.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructure.java @@ -3,12 +3,16 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.core.ml.filestructurefinder; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; @@ -21,43 +25,20 @@ import java.util.SortedMap; import java.util.TreeMap; /** - * Stores the log file format determined by a {@link LogStructureFinder}. + * Stores the file format determined by Machine Learning. */ -public class LogStructure implements ToXContentObject { +public class FileStructure implements ToXContentObject, Writeable { public enum Format { - JSON, XML, CSV, TSV, SEMI_COLON_SEPARATED_VALUES, PIPE_SEPARATED_VALUES, SEMI_STRUCTURED_TEXT; - - public Character separator() { - switch (this) { - case JSON: - case XML: - return null; - case CSV: - return ','; - case TSV: - return '\t'; - case SEMI_COLON_SEPARATED_VALUES: - return ';'; - case PIPE_SEPARATED_VALUES: - return '|'; - case SEMI_STRUCTURED_TEXT: - return null; - default: - throw new IllegalStateException("enum value [" + this + "] missing from switch."); - } - } + JSON, XML, DELIMITED, SEMI_STRUCTURED_TEXT; public boolean supportsNesting() { switch (this) { case JSON: case XML: return true; - case CSV: - case TSV: - case SEMI_COLON_SEPARATED_VALUES: - case PIPE_SEPARATED_VALUES: + case DELIMITED: case SEMI_STRUCTURED_TEXT: return false; default: @@ -69,10 +50,7 @@ public class LogStructure implements ToXContentObject { switch (this) { case JSON: case XML: - case CSV: - case TSV: - case SEMI_COLON_SEPARATED_VALUES: - case PIPE_SEPARATED_VALUES: + case DELIMITED: return true; case SEMI_STRUCTURED_TEXT: return false; @@ -85,10 +63,7 @@ public class LogStructure implements ToXContentObject { switch (this) { case JSON: case XML: - case CSV: - case TSV: - case SEMI_COLON_SEPARATED_VALUES: - case PIPE_SEPARATED_VALUES: + case DELIMITED: return false; case SEMI_STRUCTURED_TEXT: return true; @@ -97,38 +72,6 @@ public class LogStructure implements ToXContentObject { } } - public boolean isSeparatedValues() { - switch (this) { - case JSON: - case XML: - return false; - case CSV: - case TSV: - case SEMI_COLON_SEPARATED_VALUES: - case PIPE_SEPARATED_VALUES: - return true; - case SEMI_STRUCTURED_TEXT: - return false; - default: - throw new IllegalStateException("enum value [" + this + "] missing from switch."); - } - } - - public static Format fromSeparator(char separator) { - switch (separator) { - case ',': - return CSV; - case '\t': - return TSV; - case ';': - return SEMI_COLON_SEPARATED_VALUES; - case '|': - return PIPE_SEPARATED_VALUES; - default: - throw new IllegalArgumentException("No known format has separator [" + separator + "]"); - } - } - public static Format fromString(String name) { return valueOf(name.trim().toUpperCase(Locale.ROOT)); } @@ -139,6 +82,8 @@ public class LogStructure implements ToXContentObject { } } + public static final String EXPLAIN = "explain"; + static final ParseField NUM_LINES_ANALYZED = new ParseField("num_lines_analyzed"); static final ParseField NUM_MESSAGES_ANALYZED = new ParseField("num_messages_analyzed"); static final ParseField SAMPLE_START = new ParseField("sample_start"); @@ -149,16 +94,17 @@ public class LogStructure implements ToXContentObject { static final ParseField EXCLUDE_LINES_PATTERN = new ParseField("exclude_lines_pattern"); static final ParseField INPUT_FIELDS = new ParseField("input_fields"); static final ParseField HAS_HEADER_ROW = new ParseField("has_header_row"); - static final ParseField SEPARATOR = new ParseField("separator"); + static final ParseField DELIMITER = new ParseField("delimiter"); static final ParseField SHOULD_TRIM_FIELDS = new ParseField("should_trim_fields"); static final ParseField GROK_PATTERN = new ParseField("grok_pattern"); static final ParseField TIMESTAMP_FIELD = new ParseField("timestamp_field"); static final ParseField TIMESTAMP_FORMATS = new ParseField("timestamp_formats"); static final ParseField NEED_CLIENT_TIMEZONE = new ParseField("need_client_timezone"); static final ParseField MAPPINGS = new ParseField("mappings"); + static final ParseField FIELD_STATS = new ParseField("field_stats"); static final ParseField EXPLANATION = new ParseField("explanation"); - public static final ObjectParser PARSER = new ObjectParser<>("log_file_structure", false, Builder::new); + public static final ObjectParser PARSER = new ObjectParser<>("file_structure", false, Builder::new); static { PARSER.declareInt(Builder::setNumLinesAnalyzed, NUM_LINES_ANALYZED); @@ -171,13 +117,20 @@ public class LogStructure implements ToXContentObject { PARSER.declareString(Builder::setExcludeLinesPattern, EXCLUDE_LINES_PATTERN); PARSER.declareStringArray(Builder::setInputFields, INPUT_FIELDS); PARSER.declareBoolean(Builder::setHasHeaderRow, HAS_HEADER_ROW); - PARSER.declareString((p, c) -> p.setSeparator(c.charAt(0)), SEPARATOR); + PARSER.declareString((p, c) -> p.setDelimiter(c.charAt(0)), DELIMITER); PARSER.declareBoolean(Builder::setShouldTrimFields, SHOULD_TRIM_FIELDS); PARSER.declareString(Builder::setGrokPattern, GROK_PATTERN); PARSER.declareString(Builder::setTimestampField, TIMESTAMP_FIELD); PARSER.declareStringArray(Builder::setTimestampFormats, TIMESTAMP_FORMATS); PARSER.declareBoolean(Builder::setNeedClientTimezone, NEED_CLIENT_TIMEZONE); PARSER.declareObject(Builder::setMappings, (p, c) -> new TreeMap<>(p.map()), MAPPINGS); + PARSER.declareObject(Builder::setFieldStats, (p, c) -> { + Map fieldStats = new TreeMap<>(); + while (p.nextToken() == XContentParser.Token.FIELD_NAME) { + fieldStats.put(p.currentName(), FieldStats.PARSER.apply(p, c)); + } + return fieldStats; + }, FIELD_STATS); PARSER.declareStringArray(Builder::setExplanation, EXPLANATION); } @@ -191,20 +144,21 @@ public class LogStructure implements ToXContentObject { private final String excludeLinesPattern; private final List inputFields; private final Boolean hasHeaderRow; - private final Character separator; + private final Character delimiter; private final Boolean shouldTrimFields; private final String grokPattern; private final List timestampFormats; private final String timestampField; private final boolean needClientTimezone; private final SortedMap mappings; + private final SortedMap fieldStats; private final List explanation; - public LogStructure(int numLinesAnalyzed, int numMessagesAnalyzed, String sampleStart, String charset, Boolean hasByteOrderMarker, - Format format, String multilineStartPattern, String excludeLinesPattern, List inputFields, - Boolean hasHeaderRow, Character separator, Boolean shouldTrimFields, String grokPattern, String timestampField, - List timestampFormats, boolean needClientTimezone, Map mappings, - List explanation) { + public FileStructure(int numLinesAnalyzed, int numMessagesAnalyzed, String sampleStart, String charset, Boolean hasByteOrderMarker, + Format format, String multilineStartPattern, String excludeLinesPattern, List inputFields, + Boolean hasHeaderRow, Character delimiter, Boolean shouldTrimFields, String grokPattern, String timestampField, + List timestampFormats, boolean needClientTimezone, Map mappings, + Map fieldStats, List explanation) { this.numLinesAnalyzed = numLinesAnalyzed; this.numMessagesAnalyzed = numMessagesAnalyzed; @@ -216,16 +170,77 @@ public class LogStructure implements ToXContentObject { this.excludeLinesPattern = excludeLinesPattern; this.inputFields = (inputFields == null) ? null : Collections.unmodifiableList(new ArrayList<>(inputFields)); this.hasHeaderRow = hasHeaderRow; - this.separator = separator; + this.delimiter = delimiter; this.shouldTrimFields = shouldTrimFields; this.grokPattern = grokPattern; this.timestampField = timestampField; this.timestampFormats = (timestampFormats == null) ? null : Collections.unmodifiableList(new ArrayList<>(timestampFormats)); this.needClientTimezone = needClientTimezone; this.mappings = Collections.unmodifiableSortedMap(new TreeMap<>(mappings)); + this.fieldStats = Collections.unmodifiableSortedMap(new TreeMap<>(fieldStats)); this.explanation = Collections.unmodifiableList(new ArrayList<>(explanation)); } + public FileStructure(StreamInput in) throws IOException { + numLinesAnalyzed = in.readVInt(); + numMessagesAnalyzed = in.readVInt(); + sampleStart = in.readString(); + charset = in.readString(); + hasByteOrderMarker = in.readOptionalBoolean(); + format = in.readEnum(Format.class); + multilineStartPattern = in.readOptionalString(); + excludeLinesPattern = in.readOptionalString(); + inputFields = in.readBoolean() ? Collections.unmodifiableList(in.readList(StreamInput::readString)) : null; + hasHeaderRow = in.readOptionalBoolean(); + delimiter = in.readBoolean() ? (char) in.readVInt() : null; + shouldTrimFields = in.readOptionalBoolean(); + grokPattern = in.readOptionalString(); + timestampFormats = in.readBoolean() ? Collections.unmodifiableList(in.readList(StreamInput::readString)) : null; + timestampField = in.readOptionalString(); + needClientTimezone = in.readBoolean(); + mappings = Collections.unmodifiableSortedMap(new TreeMap<>(in.readMap())); + fieldStats = Collections.unmodifiableSortedMap(new TreeMap<>(in.readMap(StreamInput::readString, FieldStats::new))); + explanation = Collections.unmodifiableList(in.readList(StreamInput::readString)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(numLinesAnalyzed); + out.writeVInt(numMessagesAnalyzed); + out.writeString(sampleStart); + out.writeString(charset); + out.writeOptionalBoolean(hasByteOrderMarker); + out.writeEnum(format); + out.writeOptionalString(multilineStartPattern); + out.writeOptionalString(excludeLinesPattern); + if (inputFields == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeCollection(inputFields, StreamOutput::writeString); + } + out.writeOptionalBoolean(hasHeaderRow); + if (delimiter == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeVInt(delimiter); + } + out.writeOptionalBoolean(shouldTrimFields); + out.writeOptionalString(grokPattern); + if (timestampFormats == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeCollection(timestampFormats, StreamOutput::writeString); + } + out.writeOptionalString(timestampField); + out.writeBoolean(needClientTimezone); + out.writeMap(mappings); + out.writeMap(fieldStats, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); + out.writeCollection(explanation, StreamOutput::writeString); + } + public int getNumLinesAnalyzed() { return numLinesAnalyzed; } @@ -266,8 +281,8 @@ public class LogStructure implements ToXContentObject { return hasHeaderRow; } - public Character getSeparator() { - return separator; + public Character getDelimiter() { + return delimiter; } public Boolean getShouldTrimFields() { @@ -294,6 +309,10 @@ public class LogStructure implements ToXContentObject { return mappings; } + public SortedMap getFieldStats() { + return fieldStats; + } + public List getExplanation() { return explanation; } @@ -322,8 +341,8 @@ public class LogStructure implements ToXContentObject { if (hasHeaderRow != null) { builder.field(HAS_HEADER_ROW.getPreferredName(), hasHeaderRow.booleanValue()); } - if (separator != null) { - builder.field(SEPARATOR.getPreferredName(), String.valueOf(separator)); + if (delimiter != null) { + builder.field(DELIMITER.getPreferredName(), String.valueOf(delimiter)); } if (shouldTrimFields != null) { builder.field(SHOULD_TRIM_FIELDS.getPreferredName(), shouldTrimFields.booleanValue()); @@ -339,7 +358,16 @@ public class LogStructure implements ToXContentObject { } builder.field(NEED_CLIENT_TIMEZONE.getPreferredName(), needClientTimezone); builder.field(MAPPINGS.getPreferredName(), mappings); - builder.field(EXPLANATION.getPreferredName(), explanation); + if (fieldStats.isEmpty() == false) { + builder.startObject(FIELD_STATS.getPreferredName()); + for (Map.Entry entry : fieldStats.entrySet()) { + builder.field(entry.getKey(), entry.getValue()); + } + builder.endObject(); + } + if (params.paramAsBoolean(EXPLAIN, false)) { + builder.field(EXPLANATION.getPreferredName(), explanation); + } builder.endObject(); return builder; @@ -349,8 +377,8 @@ public class LogStructure implements ToXContentObject { public int hashCode() { return Objects.hash(numLinesAnalyzed, numMessagesAnalyzed, sampleStart, charset, hasByteOrderMarker, format, - multilineStartPattern, excludeLinesPattern, inputFields, hasHeaderRow, separator, shouldTrimFields, grokPattern, timestampField, - timestampFormats, needClientTimezone, mappings, explanation); + multilineStartPattern, excludeLinesPattern, inputFields, hasHeaderRow, delimiter, shouldTrimFields, grokPattern, timestampField, + timestampFormats, needClientTimezone, mappings, fieldStats, explanation); } @Override @@ -364,7 +392,7 @@ public class LogStructure implements ToXContentObject { return false; } - LogStructure that = (LogStructure) other; + FileStructure that = (FileStructure) other; return this.numLinesAnalyzed == that.numLinesAnalyzed && this.numMessagesAnalyzed == that.numMessagesAnalyzed && this.needClientTimezone == that.needClientTimezone && @@ -376,12 +404,13 @@ public class LogStructure implements ToXContentObject { Objects.equals(this.excludeLinesPattern, that.excludeLinesPattern) && Objects.equals(this.inputFields, that.inputFields) && Objects.equals(this.hasHeaderRow, that.hasHeaderRow) && - Objects.equals(this.separator, that.separator) && + Objects.equals(this.delimiter, that.delimiter) && Objects.equals(this.shouldTrimFields, that.shouldTrimFields) && Objects.equals(this.grokPattern, that.grokPattern) && Objects.equals(this.timestampField, that.timestampField) && Objects.equals(this.timestampFormats, that.timestampFormats) && Objects.equals(this.mappings, that.mappings) && + Objects.equals(this.fieldStats, that.fieldStats) && Objects.equals(this.explanation, that.explanation); } @@ -397,13 +426,14 @@ public class LogStructure implements ToXContentObject { private String excludeLinesPattern; private List inputFields; private Boolean hasHeaderRow; - private Character separator; + private Character delimiter; private Boolean shouldTrimFields; private String grokPattern; private String timestampField; private List timestampFormats; private boolean needClientTimezone; private Map mappings; + private Map fieldStats = Collections.emptyMap(); private List explanation; public Builder() { @@ -441,7 +471,6 @@ public class LogStructure implements ToXContentObject { public Builder setFormat(Format format) { this.format = Objects.requireNonNull(format); - this.separator = format.separator(); return this; } @@ -465,13 +494,13 @@ public class LogStructure implements ToXContentObject { return this; } - public Builder setShouldTrimFields(Boolean shouldTrimFields) { - this.shouldTrimFields = shouldTrimFields; + public Builder setDelimiter(Character delimiter) { + this.delimiter = delimiter; return this; } - public Builder setSeparator(Character separator) { - this.separator = separator; + public Builder setShouldTrimFields(Boolean shouldTrimFields) { + this.shouldTrimFields = shouldTrimFields; return this; } @@ -500,13 +529,18 @@ public class LogStructure implements ToXContentObject { return this; } + public Builder setFieldStats(Map fieldStats) { + this.fieldStats = Objects.requireNonNull(fieldStats); + return this; + } + public Builder setExplanation(List explanation) { this.explanation = Objects.requireNonNull(explanation); return this; } @SuppressWarnings("fallthrough") - public LogStructure build() { + public FileStructure build() { if (numLinesAnalyzed <= 0) { throw new IllegalArgumentException("Number of lines analyzed must be positive."); @@ -542,28 +576,22 @@ public class LogStructure implements ToXContentObject { if (hasHeaderRow != null) { throw new IllegalArgumentException("Has header row may not be specified for [" + format + "] structures."); } - if (separator != null) { - throw new IllegalArgumentException("Separator may not be specified for [" + format + "] structures."); + if (delimiter != null) { + throw new IllegalArgumentException("Delimiter may not be specified for [" + format + "] structures."); } if (grokPattern != null) { throw new IllegalArgumentException("Grok pattern may not be specified for [" + format + "] structures."); } break; - case CSV: - case TSV: - case SEMI_COLON_SEPARATED_VALUES: - case PIPE_SEPARATED_VALUES: + case DELIMITED: if (inputFields == null || inputFields.isEmpty()) { throw new IllegalArgumentException("Input fields must be specified for [" + format + "] structures."); } if (hasHeaderRow == null) { throw new IllegalArgumentException("Has header row must be specified for [" + format + "] structures."); } - Character expectedSeparator = format.separator(); - assert expectedSeparator != null; - if (expectedSeparator.equals(separator) == false) { - throw new IllegalArgumentException("Separator must be [" + expectedSeparator + "] for [" + format + - "] structures."); + if (delimiter == null) { + throw new IllegalArgumentException("Delimiter must be specified for [" + format + "] structures."); } if (grokPattern != null) { throw new IllegalArgumentException("Grok pattern may not be specified for [" + format + "] structures."); @@ -576,8 +604,8 @@ public class LogStructure implements ToXContentObject { if (hasHeaderRow != null) { throw new IllegalArgumentException("Has header row may not be specified for [" + format + "] structures."); } - if (separator != null) { - throw new IllegalArgumentException("Separator may not be specified for [" + format + "] structures."); + if (delimiter != null) { + throw new IllegalArgumentException("Delimiter may not be specified for [" + format + "] structures."); } if (shouldTrimFields != null) { throw new IllegalArgumentException("Should trim fields may not be specified for [" + format + "] structures."); @@ -606,9 +634,9 @@ public class LogStructure implements ToXContentObject { throw new IllegalArgumentException("Explanation must be specified."); } - return new LogStructure(numLinesAnalyzed, numMessagesAnalyzed, sampleStart, charset, hasByteOrderMarker, format, - multilineStartPattern, excludeLinesPattern, inputFields, hasHeaderRow, separator, shouldTrimFields, grokPattern, - timestampField, timestampFormats, needClientTimezone, mappings, explanation); + return new FileStructure(numLinesAnalyzed, numMessagesAnalyzed, sampleStart, charset, hasByteOrderMarker, format, + multilineStartPattern, excludeLinesPattern, inputFields, hasHeaderRow, delimiter, shouldTrimFields, grokPattern, + timestampField, timestampFormats, needClientTimezone, mappings, fieldStats, explanation); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java index 93aa5495c40..b5083aeecb9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.core.ml.job.config; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -248,12 +247,7 @@ public class Detector implements ToXContentObject, Writeable { useNull = in.readBoolean(); excludeFrequent = in.readBoolean() ? ExcludeFrequent.readFromStream(in) : null; rules = Collections.unmodifiableList(in.readList(DetectionRule::new)); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - detectorIndex = in.readInt(); - } else { - // negative means unknown, and is expected for 5.4 jobs - detectorIndex = -1; - } + detectorIndex = in.readInt(); } @Override @@ -276,9 +270,7 @@ public class Detector implements ToXContentObject, Writeable { } else { out.writeList(Collections.emptyList()); } - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeInt(detectorIndex); - } + out.writeInt(detectorIndex); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java index 0005d16a99c..a978612fd02 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java @@ -214,11 +214,7 @@ public class Job extends AbstractDiffable implements Writeable, ToXContentO public Job(StreamInput in) throws IOException { jobId = in.readString(); jobType = in.readString(); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - jobVersion = in.readBoolean() ? Version.readVersion(in) : null; - } else { - jobVersion = null; - } + jobVersion = in.readBoolean() ? Version.readVersion(in) : null; if (in.getVersion().onOrAfter(Version.V_6_1_0)) { groups = Collections.unmodifiableList(in.readList(StreamInput::readString)); } else { @@ -482,13 +478,11 @@ public class Job extends AbstractDiffable implements Writeable, ToXContentO public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); out.writeString(jobType); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - if (jobVersion != null) { - out.writeBoolean(true); - Version.writeVersion(jobVersion, out); - } else { - out.writeBoolean(false); - } + if (jobVersion != null) { + out.writeBoolean(true); + Version.writeVersion(jobVersion, out); + } else { + out.writeBoolean(false); } if (out.getVersion().onOrAfter(Version.V_6_1_0)) { out.writeStringList(groups); @@ -666,9 +660,7 @@ public class Job extends AbstractDiffable implements Writeable, ToXContentO */ public static Set getCompatibleJobTypes(Version nodeVersion) { Set compatibleTypes = new HashSet<>(); - if (nodeVersion.onOrAfter(Version.V_5_4_0)) { - compatibleTypes.add(ANOMALY_DETECTOR_JOB_TYPE); - } + compatibleTypes.add(ANOMALY_DETECTOR_JOB_TYPE); return compatibleTypes; } @@ -732,9 +724,7 @@ public class Job extends AbstractDiffable implements Writeable, ToXContentO public Builder(StreamInput in) throws IOException { id = in.readOptionalString(); jobType = in.readString(); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - jobVersion = in.readBoolean() ? Version.readVersion(in) : null; - } + jobVersion = in.readBoolean() ? Version.readVersion(in) : null; if (in.getVersion().onOrAfter(Version.V_6_1_0)) { groups = in.readList(StreamInput::readString); } else { @@ -921,13 +911,11 @@ public class Job extends AbstractDiffable implements Writeable, ToXContentO public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(id); out.writeString(jobType); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - if (jobVersion != null) { - out.writeBoolean(true); - Version.writeVersion(jobVersion, out); - } else { - out.writeBoolean(false); - } + if (jobVersion != null) { + out.writeBoolean(true); + Version.writeVersion(jobVersion, out); + } else { + out.writeBoolean(false); } if (out.getVersion().onOrAfter(Version.V_6_1_0)) { out.writeStringList(groups); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobState.java index e89149a062b..948284d5e00 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobState.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.config; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -34,10 +33,6 @@ public enum JobState implements Writeable { @Override public void writeTo(StreamOutput out) throws IOException { JobState state = this; - // Pre v5.5 the OPENING state didn't exist - if (this == OPENING && out.getVersion().before(Version.V_5_5_0)) { - state = CLOSED; - } out.writeEnum(state); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java index 7411115bda3..3c571c9d605 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java @@ -161,7 +161,9 @@ public final class Messages { public static final String REST_JOB_NOT_CLOSED_REVERT = "Can only revert to a model snapshot when the job is closed."; public static final String REST_NO_SUCH_MODEL_SNAPSHOT = "No model snapshot with id [{0}] exists for job [{1}]"; public static final String REST_START_AFTER_END = "Invalid time range: end time ''{0}'' is earlier than start time ''{1}''."; - + public static final String REST_NO_SUCH_FORECAST = "No forecast(s) [{0}] exists for job [{1}]"; + public static final String REST_CANNOT_DELETE_FORECAST_IN_CURRENT_STATE = + "Forecast(s) [{0}] for job [{1}] needs to be either FAILED or FINISHED to be deleted"; public static final String FIELD_CANNOT_BE_NULL = "Field [{0}] cannot be null"; private Messages() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/JobDataDeleter.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/JobDataDeleter.java index 0a7d27f7a0e..cc86ce17bb9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/JobDataDeleter.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/JobDataDeleter.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.Client; @@ -22,7 +21,6 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.reindex.DeleteByQueryAction; import org.elasticsearch.index.reindex.DeleteByQueryRequest; -import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelState; import org.elasticsearch.xpack.core.ml.job.results.Result; @@ -129,8 +127,8 @@ public class JobDataDeleter { QueryBuilder query = QueryBuilders.boolQuery() .filter(QueryBuilders.existsQuery(Result.RESULT_TYPE.getPreferredName())) .filter(QueryBuilders.rangeQuery(Result.TIMESTAMP.getPreferredName()).gte(cutoffEpochMs)); - deleteByQueryHolder.searchRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); - deleteByQueryHolder.searchRequest.source(new SearchSourceBuilder().query(query)); + deleteByQueryHolder.dbqRequest.setIndicesOptions(IndicesOptions.lenientExpandOpen()); + deleteByQueryHolder.dbqRequest.setQuery(query); executeAsyncWithOrigin(client, ML_ORIGIN, DeleteByQueryAction.INSTANCE, deleteByQueryHolder.dbqRequest, ActionListener.wrap(r -> listener.onResponse(true), listener::onFailure)); } @@ -142,9 +140,9 @@ public class JobDataDeleter { DeleteByQueryHolder deleteByQueryHolder = new DeleteByQueryHolder(AnomalyDetectorsIndex.jobResultsAliasedName(jobId)); deleteByQueryHolder.dbqRequest.setRefresh(false); - deleteByQueryHolder.searchRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); + deleteByQueryHolder.dbqRequest.setIndicesOptions(IndicesOptions.lenientExpandOpen()); QueryBuilder qb = QueryBuilders.termQuery(Result.IS_INTERIM.getPreferredName(), true); - deleteByQueryHolder.searchRequest.source(new SearchSourceBuilder().query(new ConstantScoreQueryBuilder(qb))); + deleteByQueryHolder.dbqRequest.setQuery(new ConstantScoreQueryBuilder(qb)); try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { client.execute(DeleteByQueryAction.INSTANCE, deleteByQueryHolder.dbqRequest).get(); @@ -156,13 +154,11 @@ public class JobDataDeleter { // Wrapper to ensure safety private static class DeleteByQueryHolder { - private final SearchRequest searchRequest; private final DeleteByQueryRequest dbqRequest; private DeleteByQueryHolder(String index) { - // The search request has to be constructed and passed to the DeleteByQueryRequest before more details are set to it - searchRequest = new SearchRequest(index); - dbqRequest = new DeleteByQueryRequest(searchRequest); + dbqRequest = new DeleteByQueryRequest(); + dbqRequest.indices(index); dbqRequest.setSlices(5); dbqRequest.setAbortOnVersionConflict(false); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/JobStorageDeletionTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/JobStorageDeletionTask.java index 19cb42a220e..61ed8ed4e11 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/JobStorageDeletionTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/JobStorageDeletionTask.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Client; @@ -28,7 +27,6 @@ import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.DeleteByQueryAction; import org.elasticsearch.index.reindex.DeleteByQueryRequest; -import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xpack.core.ml.action.GetModelSnapshotsAction; @@ -95,12 +93,11 @@ public class JobStorageDeletionTask extends Task { ActionListener deleteCategorizerStateHandler = ActionListener.wrap( response -> { logger.info("Running DBQ on [" + indexName + "," + indexPattern + "] for job [" + jobId + "]"); - SearchRequest searchRequest = new SearchRequest(indexName, indexPattern); - DeleteByQueryRequest request = new DeleteByQueryRequest(searchRequest); + DeleteByQueryRequest request = new DeleteByQueryRequest(indexName, indexPattern); ConstantScoreQueryBuilder query = new ConstantScoreQueryBuilder(new TermQueryBuilder(Job.ID.getPreferredName(), jobId)); - searchRequest.source(new SearchSourceBuilder().query(query)); - searchRequest.indicesOptions(MlIndicesUtils.addIgnoreUnavailable(IndicesOptions.lenientExpandOpen())); + request.setQuery(query); + request.setIndicesOptions(MlIndicesUtils.addIgnoreUnavailable(IndicesOptions.lenientExpandOpen())); request.setSlices(5); request.setAbortOnVersionConflict(false); request.setRefresh(true); @@ -125,14 +122,13 @@ public class JobStorageDeletionTask extends Task { private void deleteQuantiles(String jobId, Client client, ActionListener finishedHandler) { // The quantiles type and doc ID changed in v5.5 so delete both the old and new format - SearchRequest searchRequest = new SearchRequest(AnomalyDetectorsIndex.jobStateIndexName()); - DeleteByQueryRequest request = new DeleteByQueryRequest(searchRequest); + DeleteByQueryRequest request = new DeleteByQueryRequest(AnomalyDetectorsIndex.jobStateIndexName()); // Just use ID here, not type, as trying to delete different types spams the logs with an exception stack trace IdsQueryBuilder query = new IdsQueryBuilder().addIds(Quantiles.documentId(jobId), // TODO: remove in 7.0 Quantiles.v54DocumentId(jobId)); - searchRequest.source(new SearchSourceBuilder().query(query)); - searchRequest.indicesOptions(MlIndicesUtils.addIgnoreUnavailable(IndicesOptions.lenientExpandOpen())); + request.setQuery(query); + request.setIndicesOptions(MlIndicesUtils.addIgnoreUnavailable(IndicesOptions.lenientExpandOpen())); request.setAbortOnVersionConflict(false); request.setRefresh(true); @@ -162,14 +158,13 @@ public class JobStorageDeletionTask extends Task { private void deleteCategorizerState(String jobId, Client client, int docNum, ActionListener finishedHandler) { // The categorizer state type and doc ID changed in v5.5 so delete both the old and new format - SearchRequest searchRequest = new SearchRequest(AnomalyDetectorsIndex.jobStateIndexName()); - DeleteByQueryRequest request = new DeleteByQueryRequest(searchRequest); + DeleteByQueryRequest request = new DeleteByQueryRequest(AnomalyDetectorsIndex.jobStateIndexName()); // Just use ID here, not type, as trying to delete different types spams the logs with an exception stack trace IdsQueryBuilder query = new IdsQueryBuilder().addIds(CategorizerState.documentId(jobId, docNum), // TODO: remove in 7.0 CategorizerState.v54DocumentId(jobId, docNum)); - searchRequest.source(new SearchSourceBuilder().query(query)); - searchRequest.indicesOptions(MlIndicesUtils.addIgnoreUnavailable(IndicesOptions.lenientExpandOpen())); + request.setQuery(query); + request.setIndicesOptions(MlIndicesUtils.addIgnoreUnavailable(IndicesOptions.lenientExpandOpen())); request.setAbortOnVersionConflict(false); request.setRefresh(true); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java index ad8b24e66c6..2d9afa833c3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.process.autodetect.output; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -58,17 +57,13 @@ public class FlushAcknowledgement implements ToXContentObject, Writeable { public FlushAcknowledgement(StreamInput in) throws IOException { id = in.readString(); - if (in.getVersion().after(Version.V_5_5_0)) { - lastFinalizedBucketEnd = new Date(in.readVLong()); - } + lastFinalizedBucketEnd = new Date(in.readVLong()); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(id); - if (out.getVersion().after(Version.V_5_5_0)) { - out.writeVLong(lastFinalizedBucketEnd.getTime()); - } + out.writeVLong(lastFinalizedBucketEnd.getTime()); } public String getId() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java index 03487500d8a..068b998dc25 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java @@ -143,7 +143,7 @@ public class ModelSnapshot implements ToXContentObject, Writeable { if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { minVersion = Version.readVersion(in); } else { - minVersion = Version.V_5_5_0; + minVersion = Version.CURRENT.minimumCompatibilityVersion(); } timestamp = in.readBoolean() ? new Date(in.readVLong()) : null; description = in.readOptionalString(); @@ -357,9 +357,8 @@ public class ModelSnapshot implements ToXContentObject, Writeable { private String jobId; // Stored snapshot documents created prior to 6.3.0 will have no - // value for min_version. We default it to 5.5.0 as there were - // no model changes between 5.5.0 and 6.3.0. - private Version minVersion = Version.V_5_5_0; + // value for min_version. + private Version minVersion = Version.V_6_3_0; private Date timestamp; private String description; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java index 360bcfaaead..869cdcb437e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.results; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -163,10 +162,6 @@ public class AnomalyRecord implements ToXContentObject, Writeable { @SuppressWarnings("unchecked") public AnomalyRecord(StreamInput in) throws IOException { jobId = in.readString(); - // bwc for removed sequenceNum field - if (in.getVersion().before(Version.V_5_5_0)) { - in.readInt(); - } detectorIndex = in.readInt(); probability = in.readDouble(); byFieldName = in.readOptionalString(); @@ -201,10 +196,6 @@ public class AnomalyRecord implements ToXContentObject, Writeable { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); - // bwc for removed sequenceNum field - if (out.getVersion().before(Version.V_5_5_0)) { - out.writeInt(0); - } out.writeInt(detectorIndex); out.writeDouble(probability); out.writeOptionalString(byFieldName); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java index 8a7fe2395b4..8280ee9f22e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java @@ -137,19 +137,11 @@ public class Bucket implements ToXContentObject, Writeable { anomalyScore = in.readDouble(); bucketSpan = in.readLong(); initialAnomalyScore = in.readDouble(); - // bwc for recordCount - if (in.getVersion().before(Version.V_5_5_0)) { - in.readInt(); - } records = in.readList(AnomalyRecord::new); eventCount = in.readLong(); isInterim = in.readBoolean(); bucketInfluencers = in.readList(BucketInfluencer::new); processingTimeMs = in.readLong(); - // bwc for perPartitionMaxProbability - if (in.getVersion().before(Version.V_5_5_0)) { - in.readGenericValue(); - } // bwc for perPartitionNormalization if (in.getVersion().before(Version.V_6_5_0)) { in.readList(Bucket::readOldPerPartitionNormalization); @@ -171,19 +163,11 @@ public class Bucket implements ToXContentObject, Writeable { out.writeDouble(anomalyScore); out.writeLong(bucketSpan); out.writeDouble(initialAnomalyScore); - // bwc for recordCount - if (out.getVersion().before(Version.V_5_5_0)) { - out.writeInt(0); - } out.writeList(records); out.writeLong(eventCount); out.writeBoolean(isInterim); out.writeList(bucketInfluencers); out.writeLong(processingTimeMs); - // bwc for perPartitionMaxProbability - if (out.getVersion().before(Version.V_5_5_0)) { - out.writeGenericValue(Collections.emptyMap()); - } // bwc for perPartitionNormalization if (out.getVersion().before(Version.V_6_5_0)) { out.writeList(Collections.emptyList()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencer.java index 8b18562ec6d..38d76789a2e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencer.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.results; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -100,10 +99,6 @@ public class BucketInfluencer implements ToXContentObject, Writeable { isInterim = in.readBoolean(); timestamp = new Date(in.readLong()); bucketSpan = in.readLong(); - // bwc for removed sequenceNum field - if (in.getVersion().before(Version.V_5_5_0)) { - in.readInt(); - } } @Override @@ -117,10 +112,6 @@ public class BucketInfluencer implements ToXContentObject, Writeable { out.writeBoolean(isInterim); out.writeLong(timestamp.getTime()); out.writeLong(bucketSpan); - // bwc for removed sequenceNum field - if (out.getVersion().before(Version.V_5_5_0)) { - out.writeInt(0); - } } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influencer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influencer.java index 97ed643c44d..8ee49cb88d0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influencer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influencer.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.results; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -97,10 +96,6 @@ public class Influencer implements ToXContentObject, Writeable { influencerScore = in.readDouble(); isInterim = in.readBoolean(); bucketSpan = in.readLong(); - // bwc for removed sequenceNum field - if (in.getVersion().before(Version.V_5_5_0)) { - in.readInt(); - } } @Override @@ -114,10 +109,6 @@ public class Influencer implements ToXContentObject, Writeable { out.writeDouble(influencerScore); out.writeBoolean(isInterim); out.writeLong(bucketSpan); - // bwc for removed sequenceNum field - if (out.getVersion().before(Version.V_5_5_0)) { - out.writeInt(0); - } } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ModelPlot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ModelPlot.java index c331d8b0437..9f066b6e98e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ModelPlot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ModelPlot.java @@ -109,20 +109,7 @@ public class ModelPlot implements ToXContentObject, Writeable { public ModelPlot(StreamInput in) throws IOException { jobId = in.readString(); - // timestamp isn't optional in v5.5 - if (in.getVersion().before(Version.V_5_5_0)) { - if (in.readBoolean()) { - timestamp = new Date(in.readLong()); - } else { - timestamp = new Date(); - } - } else { - timestamp = new Date(in.readLong()); - } - // bwc for removed id field - if (in.getVersion().before(Version.V_5_5_0)) { - in.readOptionalString(); - } + timestamp = new Date(in.readLong()); partitionFieldName = in.readOptionalString(); partitionFieldValue = in.readOptionalString(); overFieldName = in.readOptionalString(); @@ -138,11 +125,7 @@ public class ModelPlot implements ToXContentObject, Writeable { } else { actual = in.readOptionalDouble(); } - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - bucketSpan = in.readLong(); - } else { - bucketSpan = 0; - } + bucketSpan = in.readLong(); if (in.getVersion().onOrAfter(Version.V_6_1_0)) { detectorIndex = in.readInt(); } else { @@ -154,20 +137,7 @@ public class ModelPlot implements ToXContentObject, Writeable { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); - // timestamp isn't optional in v5.5 - if (out.getVersion().before(Version.V_5_5_0)) { - boolean hasTimestamp = timestamp != null; - out.writeBoolean(hasTimestamp); - if (hasTimestamp) { - out.writeLong(timestamp.getTime()); - } - } else { - out.writeLong(timestamp.getTime()); - } - // bwc for removed id field - if (out.getVersion().before(Version.V_5_5_0)) { - out.writeOptionalString(null); - } + out.writeLong(timestamp.getTime()); out.writeOptionalString(partitionFieldName); out.writeOptionalString(partitionFieldValue); out.writeOptionalString(overFieldName); @@ -189,9 +159,7 @@ public class ModelPlot implements ToXContentObject, Writeable { } else { out.writeOptionalDouble(actual); } - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeLong(bucketSpan); - } + out.writeLong(bucketSpan); if (out.getVersion().onOrAfter(Version.V_6_1_0)) { out.writeInt(detectorIndex); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/stats/StatsAccumulator.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/stats/StatsAccumulator.java index fe987db48ce..91c95c707d4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/stats/StatsAccumulator.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/stats/StatsAccumulator.java @@ -8,7 +8,7 @@ package org.elasticsearch.xpack.core.ml.stats; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.search.aggregations.metrics.stats.Stats; +import org.elasticsearch.search.aggregations.metrics.Stats; import java.io.IOException; import java.util.HashMap; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupField.java index 134ce6c87b3..a784922228b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupField.java @@ -7,11 +7,11 @@ package org.elasticsearch.xpack.core.rollup; import org.elasticsearch.common.ParseField; import org.elasticsearch.index.mapper.NumberFieldMapper; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import java.util.Arrays; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java index 50f79315085..7bbbf07e6dc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java @@ -26,8 +26,8 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.tasks.Task; import org.elasticsearch.xpack.core.rollup.RollupField; +import org.elasticsearch.xpack.core.rollup.job.RollupIndexerJobStats; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; import org.elasticsearch.xpack.core.rollup.job.RollupJobStatus; import java.io.IOException; @@ -174,7 +174,14 @@ public class GetRollupJobsAction extends Action { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(JOBS.getPreferredName(), jobs); + + // XContentBuilder does not support passing the params object for Iterables + builder.field(JOBS.getPreferredName()); + builder.startArray(); + for (JobWrapper job : jobs) { + job.toXContent(builder, params); + } + builder.endArray(); builder.endObject(); return builder; } @@ -204,20 +211,20 @@ public class GetRollupJobsAction extends Action { public static class JobWrapper implements Writeable, ToXContentObject { private final RollupJobConfig job; - private final RollupJobStats stats; + private final RollupIndexerJobStats stats; private final RollupJobStatus status; public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, a -> new JobWrapper((RollupJobConfig) a[0], - (RollupJobStats) a[1], (RollupJobStatus)a[2])); + (RollupIndexerJobStats) a[1], (RollupJobStatus)a[2])); static { PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> RollupJobConfig.fromXContent(p, null), CONFIG); - PARSER.declareObject(ConstructingObjectParser.constructorArg(), RollupJobStats.PARSER::apply, STATS); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), RollupIndexerJobStats.PARSER::apply, STATS); PARSER.declareObject(ConstructingObjectParser.constructorArg(), RollupJobStatus.PARSER::apply, STATUS); } - public JobWrapper(RollupJobConfig job, RollupJobStats stats, RollupJobStatus status) { + public JobWrapper(RollupJobConfig job, RollupIndexerJobStats stats, RollupJobStatus status) { this.job = job; this.stats = stats; this.status = status; @@ -225,7 +232,7 @@ public class GetRollupJobsAction extends Action { public JobWrapper(StreamInput in) throws IOException { this.job = new RollupJobConfig(in); - this.stats = new RollupJobStats(in); + this.stats = new RollupIndexerJobStats(in); this.status = new RollupJobStatus(in); } @@ -240,7 +247,7 @@ public class GetRollupJobsAction extends Action { return job; } - public RollupJobStats getStats() { + public RollupIndexerJobStats getStats() { return stats; } @@ -254,7 +261,7 @@ public class GetRollupJobsAction extends Action { builder.field(CONFIG.getPreferredName()); job.toXContent(builder, params); builder.field(STATUS.getPreferredName(), status); - builder.field(STATS.getPreferredName(), stats); + builder.field(STATS.getPreferredName(), stats, params); builder.endObject(); return builder; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java index 1b8eb736084..054d08df999 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java @@ -11,15 +11,26 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; +import org.elasticsearch.xpack.core.rollup.job.GroupConfig; +import org.elasticsearch.xpack.core.rollup.job.HistogramGroupConfig; +import org.elasticsearch.xpack.core.rollup.job.MetricConfig; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; +import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.stream.Collectors; + +import static java.util.Collections.singletonMap; /** * Represents the Rollup capabilities for a specific job on a single rollup index @@ -42,52 +53,7 @@ public class RollupJobCaps implements Writeable, ToXContentObject { jobID = job.getId(); rollupIndex = job.getRollupIndex(); indexPattern = job.getIndexPattern(); - Map dateHistoAggCap = job.getGroupConfig().getDateHistogram().toAggCap(); - String dateField = job.getGroupConfig().getDateHistogram().getField(); - RollupFieldCaps fieldCaps = fieldCapLookup.get(dateField); - if (fieldCaps == null) { - fieldCaps = new RollupFieldCaps(); - } - fieldCaps.addAgg(dateHistoAggCap); - fieldCapLookup.put(dateField, fieldCaps); - - if (job.getGroupConfig().getHistogram() != null) { - Map histoAggCap = job.getGroupConfig().getHistogram().toAggCap(); - Arrays.stream(job.getGroupConfig().getHistogram().getFields()).forEach(field -> { - RollupFieldCaps caps = fieldCapLookup.get(field); - if (caps == null) { - caps = new RollupFieldCaps(); - } - caps.addAgg(histoAggCap); - fieldCapLookup.put(field, caps); - }); - } - - if (job.getGroupConfig().getTerms() != null) { - Map histoAggCap = job.getGroupConfig().getTerms().toAggCap(); - Arrays.stream(job.getGroupConfig().getTerms().getFields()).forEach(field -> { - RollupFieldCaps caps = fieldCapLookup.get(field); - if (caps == null) { - caps = new RollupFieldCaps(); - } - caps.addAgg(histoAggCap); - fieldCapLookup.put(field, caps); - }); - } - - if (job.getMetricsConfig().size() > 0) { - job.getMetricsConfig().forEach(metricConfig -> { - List> metrics = metricConfig.toAggCap(); - metrics.forEach(m -> { - RollupFieldCaps caps = fieldCapLookup.get(metricConfig.getField()); - if (caps == null) { - caps = new RollupFieldCaps(); - } - caps.addAgg(m); - fieldCapLookup.put(metricConfig.getField(), caps); - }); - }); - } + fieldCapLookup = createRollupFieldCaps(job); } public RollupJobCaps(StreamInput in) throws IOException { @@ -149,8 +115,8 @@ public class RollupJobCaps implements Writeable, ToXContentObject { RollupJobCaps that = (RollupJobCaps) other; return Objects.equals(this.jobID, that.jobID) - && Objects.equals(this.rollupIndex, that.rollupIndex) - && Objects.equals(this.fieldCapLookup, that.fieldCapLookup); + && Objects.equals(this.rollupIndex, that.rollupIndex) + && Objects.equals(this.fieldCapLookup, that.fieldCapLookup); } @Override @@ -158,6 +124,77 @@ public class RollupJobCaps implements Writeable, ToXContentObject { return Objects.hash(jobID, rollupIndex, fieldCapLookup); } + static Map createRollupFieldCaps(final RollupJobConfig rollupJobConfig) { + final Map fieldCapLookup = new HashMap<>(); + + final GroupConfig groupConfig = rollupJobConfig.getGroupConfig(); + if (groupConfig != null) { + // Create RollupFieldCaps for the date histogram + final DateHistogramGroupConfig dateHistogram = groupConfig.getDateHistogram(); + final Map dateHistogramAggCap = new HashMap<>(); + dateHistogramAggCap.put("agg", DateHistogramAggregationBuilder.NAME); + dateHistogramAggCap.put(DateHistogramGroupConfig.INTERVAL, dateHistogram.getInterval().toString()); + if (dateHistogram.getDelay() != null) { + dateHistogramAggCap.put(DateHistogramGroupConfig.DELAY, dateHistogram.getDelay().toString()); + } + dateHistogramAggCap.put(DateHistogramGroupConfig.TIME_ZONE, dateHistogram.getTimeZone()); + + final RollupFieldCaps dateHistogramFieldCaps = new RollupFieldCaps(); + dateHistogramFieldCaps.addAgg(dateHistogramAggCap); + fieldCapLookup.put(dateHistogram.getField(), dateHistogramFieldCaps); + + // Create RollupFieldCaps for the histogram + final HistogramGroupConfig histogram = groupConfig.getHistogram(); + if (histogram != null) { + final Map histogramAggCap = new HashMap<>(); + histogramAggCap.put("agg", HistogramAggregationBuilder.NAME); + histogramAggCap.put(HistogramGroupConfig.INTERVAL, histogram.getInterval()); + for (String field : histogram.getFields()) { + RollupFieldCaps caps = fieldCapLookup.get(field); + if (caps == null) { + caps = new RollupFieldCaps(); + } + caps.addAgg(histogramAggCap); + fieldCapLookup.put(field, caps); + } + } + + // Create RollupFieldCaps for the term + final TermsGroupConfig terms = groupConfig.getTerms(); + if (terms != null) { + final Map termsAggCap = singletonMap("agg", TermsAggregationBuilder.NAME); + for (String field : terms.getFields()) { + RollupFieldCaps caps = fieldCapLookup.get(field); + if (caps == null) { + caps = new RollupFieldCaps(); + } + caps.addAgg(termsAggCap); + fieldCapLookup.put(field, caps); + } + } + } + + // Create RollupFieldCaps for the metrics + final List metricsConfig = rollupJobConfig.getMetricsConfig(); + if (metricsConfig.size() > 0) { + metricsConfig.forEach(metricConfig -> { + final List> metrics = metricConfig.getMetrics().stream() + .map(metric -> singletonMap("agg", (Object) metric)) + .collect(Collectors.toList()); + + metrics.forEach(m -> { + RollupFieldCaps caps = fieldCapLookup.get(metricConfig.getField()); + if (caps == null) { + caps = new RollupFieldCaps(); + } + caps.addAgg(m); + fieldCapLookup.put(metricConfig.getField(), caps); + }); + }); + } + return Collections.unmodifiableMap(fieldCapLookup); + } + public static class RollupFieldCaps implements Writeable, ToXContentObject { private List> aggs = new ArrayList<>(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java index 77dfa1cbbb1..166322b9372 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java @@ -20,17 +20,11 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; -import org.elasticsearch.search.aggregations.bucket.composite.DateHistogramValuesSourceBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.elasticsearch.xpack.core.rollup.RollupField; import org.joda.time.DateTimeZone; import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Objects; @@ -55,10 +49,10 @@ import static org.elasticsearch.common.xcontent.ObjectParser.ValueType; public class DateHistogramGroupConfig implements Writeable, ToXContentObject { static final String NAME = "date_histogram"; - private static final String INTERVAL = "interval"; + public static final String INTERVAL = "interval"; private static final String FIELD = "field"; public static final String TIME_ZONE = "time_zone"; - private static final String DELAY = "delay"; + public static final String DELAY = "delay"; private static final String DEFAULT_TIMEZONE = "UTC"; private static final ConstructingObjectParser PARSER; static { @@ -183,38 +177,6 @@ public class DateHistogramGroupConfig implements Writeable, ToXContentObject { return createRounding(interval.toString(), timeZone); } - /** - * This returns a set of aggregation builders which represent the configured - * set of date histograms. Used by the rollup indexer to iterate over historical data - */ - public List> toBuilders() { - DateHistogramValuesSourceBuilder vsBuilder = - new DateHistogramValuesSourceBuilder(RollupField.formatIndexerAggName(field, DateHistogramAggregationBuilder.NAME)); - vsBuilder.dateHistogramInterval(interval); - vsBuilder.field(field); - vsBuilder.timeZone(toDateTimeZone(timeZone)); - return Collections.singletonList(vsBuilder); - } - - /** - * @return A map representing this config object as a RollupCaps aggregation object - */ - public Map toAggCap() { - Map map = new HashMap<>(3); - map.put("agg", DateHistogramAggregationBuilder.NAME); - map.put(INTERVAL, interval.toString()); - if (delay != null) { - map.put(DELAY, delay.toString()); - } - map.put(TIME_ZONE, timeZone); - - return map; - } - - public Map getMetadata() { - return Collections.singletonMap(RollupField.formatMetaField(RollupField.INTERVAL), interval.toString()); - } - public void validateMappings(Map> fieldCapsResponse, ActionRequestValidationException validationException) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/HistogramGroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/HistogramGroupConfig.java index 0480050bf52..a22d022ee2d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/HistogramGroupConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/HistogramGroupConfig.java @@ -16,19 +16,13 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; -import org.elasticsearch.search.aggregations.bucket.composite.HistogramValuesSourceBuilder; -import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; import org.elasticsearch.xpack.core.rollup.RollupField; import java.io.IOException; import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; @@ -47,7 +41,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constru public class HistogramGroupConfig implements Writeable, ToXContentObject { static final String NAME = "histogram"; - private static final String INTERVAL = "interval"; + public static final String INTERVAL = "interval"; private static final String FIELDS = "fields"; private static final ConstructingObjectParser PARSER; static { @@ -86,39 +80,6 @@ public class HistogramGroupConfig implements Writeable, ToXContentObject { return fields; } - /** - * This returns a set of aggregation builders which represent the configured - * set of histograms. Used by the rollup indexer to iterate over historical data - */ - public List> toBuilders() { - if (fields.length == 0) { - return Collections.emptyList(); - } - - return Arrays.stream(fields).map(f -> { - HistogramValuesSourceBuilder vsBuilder - = new HistogramValuesSourceBuilder(RollupField.formatIndexerAggName(f, HistogramAggregationBuilder.NAME)); - vsBuilder.interval(interval); - vsBuilder.field(f); - vsBuilder.missingBucket(true); - return vsBuilder; - }).collect(Collectors.toList()); - } - - /** - * @return A map representing this config object as a RollupCaps aggregation object - */ - public Map toAggCap() { - Map map = new HashMap<>(2); - map.put("agg", HistogramAggregationBuilder.NAME); - map.put(INTERVAL, interval); - return map; - } - - public Map getMetadata() { - return Collections.singletonMap(RollupField.formatMetaField(RollupField.INTERVAL), interval); - } - public void validateMappings(Map> fieldCapsResponse, ActionRequestValidationException validationException) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/MetricConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/MetricConfig.java index cc673c4ed0d..3a267e4cfa4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/MetricConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/MetricConfig.java @@ -16,22 +16,12 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountAggregationBuilder; -import org.elasticsearch.search.aggregations.support.ValueType; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.xpack.core.rollup.RollupField; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; @@ -54,11 +44,11 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constru public class MetricConfig implements Writeable, ToXContentObject { // TODO: replace these with an enum - private static final ParseField MIN = new ParseField("min"); - private static final ParseField MAX = new ParseField("max"); - private static final ParseField SUM = new ParseField("sum"); - private static final ParseField AVG = new ParseField("avg"); - private static final ParseField VALUE_COUNT = new ParseField("value_count"); + public static final ParseField MIN = new ParseField("min"); + public static final ParseField MAX = new ParseField("max"); + public static final ParseField SUM = new ParseField("sum"); + public static final ParseField AVG = new ParseField("avg"); + public static final ParseField VALUE_COUNT = new ParseField("value_count"); static final String NAME = "metrics"; private static final String FIELD = "field"; @@ -112,53 +102,6 @@ public class MetricConfig implements Writeable, ToXContentObject { return metrics; } - /** - * This returns a set of aggregation builders which represent the configured - * set of metrics. Used by the rollup indexer to iterate over historical data - */ - public List toBuilders() { - if (metrics.size() == 0) { - return Collections.emptyList(); - } - - List aggs = new ArrayList<>(metrics.size()); - for (String metric : metrics) { - ValuesSourceAggregationBuilder.LeafOnly newBuilder; - if (metric.equals(MIN.getPreferredName())) { - newBuilder = new MinAggregationBuilder(RollupField.formatFieldName(field, MinAggregationBuilder.NAME, RollupField.VALUE)); - } else if (metric.equals(MAX.getPreferredName())) { - newBuilder = new MaxAggregationBuilder(RollupField.formatFieldName(field, MaxAggregationBuilder.NAME, RollupField.VALUE)); - } else if (metric.equals(AVG.getPreferredName())) { - // Avgs are sum + count - newBuilder = new SumAggregationBuilder(RollupField.formatFieldName(field, AvgAggregationBuilder.NAME, RollupField.VALUE)); - ValuesSourceAggregationBuilder.LeafOnly countBuilder - = new ValueCountAggregationBuilder( - RollupField.formatFieldName(field, AvgAggregationBuilder.NAME, RollupField.COUNT_FIELD), ValueType.NUMERIC); - countBuilder.field(field); - aggs.add(countBuilder); - } else if (metric.equals(SUM.getPreferredName())) { - newBuilder = new SumAggregationBuilder(RollupField.formatFieldName(field, SumAggregationBuilder.NAME, RollupField.VALUE)); - } else if (metric.equals(VALUE_COUNT.getPreferredName())) { - // TODO allow non-numeric value_counts. - // Hardcoding this is fine for now since the job validation guarantees that all metric fields are numerics - newBuilder = new ValueCountAggregationBuilder( - RollupField.formatFieldName(field, ValueCountAggregationBuilder.NAME, RollupField.VALUE), ValueType.NUMERIC); - } else { - throw new IllegalArgumentException("Unsupported metric type [" + metric + "]"); - } - newBuilder.field(field); - aggs.add(newBuilder); - } - return aggs; - } - - /** - * @return A map representing this config object as a RollupCaps aggregation object - */ - public List> toAggCap() { - return metrics.stream().map(metric -> Collections.singletonMap("agg", (Object)metric)).collect(Collectors.toList()); - } - public void validateMappings(Map> fieldCapsResponse, ActionRequestValidationException validationException) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStats.java new file mode 100644 index 00000000000..87915671b79 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStats.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexing.IndexerJobStats; + +import java.io.IOException; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * The Rollup specialization of stats for the AsyncTwoPhaseIndexer. + * Note: instead of `documents_indexed`, this XContent show `rollups_indexed` + */ +public class RollupIndexerJobStats extends IndexerJobStats { + private static ParseField NUM_PAGES = new ParseField("pages_processed"); + private static ParseField NUM_INPUT_DOCUMENTS = new ParseField("documents_processed"); + private static ParseField NUM_OUTPUT_DOCUMENTS = new ParseField("rollups_indexed"); + private static ParseField NUM_INVOCATIONS = new ParseField("trigger_count"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>(NAME.getPreferredName(), + args -> new RollupIndexerJobStats((long) args[0], (long) args[1], (long) args[2], (long) args[3])); + + static { + PARSER.declareLong(constructorArg(), NUM_PAGES); + PARSER.declareLong(constructorArg(), NUM_INPUT_DOCUMENTS); + PARSER.declareLong(constructorArg(), NUM_OUTPUT_DOCUMENTS); + PARSER.declareLong(constructorArg(), NUM_INVOCATIONS); + } + + public RollupIndexerJobStats() { + super(); + } + + public RollupIndexerJobStats(long numPages, long numInputDocuments, long numOuputDocuments, long numInvocations) { + super(numPages, numInputDocuments, numOuputDocuments, numInvocations); + } + + public RollupIndexerJobStats(StreamInput in) throws IOException { + super(in); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(NUM_PAGES.getPreferredName(), numPages); + builder.field(NUM_INPUT_DOCUMENTS.getPreferredName(), numInputDocuments); + builder.field(NUM_OUTPUT_DOCUMENTS.getPreferredName(), numOuputDocuments); + builder.field(NUM_INVOCATIONS.getPreferredName(), numInvocations); + builder.endObject(); + return builder; + } + + public static RollupIndexerJobStats fromXContent(XContentParser parser) { + try { + return PARSER.parse(parser, null); + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStats.java deleted file mode 100644 index 06cfb520af5..00000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStats.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.rollup.job; - -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; - -import java.io.IOException; -import java.util.Objects; - -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; - -/** - * This class holds the runtime statistics of a job. The stats are not used by any internal process - * and are only for external monitoring/reference. Statistics are not persisted with the job, so if the - * allocated task is shutdown/restarted on a different node all the stats will reset. - */ -public class RollupJobStats implements ToXContentObject, Writeable { - - public static final ParseField NAME = new ParseField("job_stats"); - - private static ParseField NUM_PAGES = new ParseField("pages_processed"); - private static ParseField NUM_DOCUMENTS = new ParseField("documents_processed"); - private static ParseField NUM_ROLLUPS = new ParseField("rollups_indexed"); - private static ParseField NUM_INVOCATIONS = new ParseField("trigger_count"); - - private long numPages = 0; - private long numDocuments = 0; - private long numRollups = 0; - private long numInvocations = 0; - - public static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>(NAME.getPreferredName(), - args -> new RollupJobStats((long) args[0], (long) args[1], (long) args[2], (long) args[3])); - - static { - PARSER.declareLong(constructorArg(), NUM_PAGES); - PARSER.declareLong(constructorArg(), NUM_DOCUMENTS); - PARSER.declareLong(constructorArg(), NUM_ROLLUPS); - PARSER.declareLong(constructorArg(), NUM_INVOCATIONS); - } - - public RollupJobStats() { - } - - public RollupJobStats(long numPages, long numDocuments, long numRollups, long numInvocations) { - this.numPages = numPages; - this.numDocuments = numDocuments; - this.numRollups = numRollups; - this.numInvocations = numInvocations; - } - - public RollupJobStats(StreamInput in) throws IOException { - this.numPages = in.readVLong(); - this.numDocuments = in.readVLong(); - this.numRollups = in.readVLong(); - this.numInvocations = in.readVLong(); - } - - public long getNumPages() { - return numPages; - } - - public long getNumDocuments() { - return numDocuments; - } - - public long getNumInvocations() { - return numInvocations; - } - - public long getNumRollups() { - return numRollups; - } - - public void incrementNumPages(long n) { - assert(n >= 0); - numPages += n; - } - - public void incrementNumDocuments(long n) { - assert(n >= 0); - numDocuments += n; - } - - public void incrementNumInvocations(long n) { - assert(n >= 0); - numInvocations += n; - } - - public void incrementNumRollups(long n) { - assert(n >= 0); - numRollups += n; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(numPages); - out.writeVLong(numDocuments); - out.writeVLong(numRollups); - out.writeVLong(numInvocations); - } - - public static RollupJobStats fromXContent(XContentParser parser) { - try { - return PARSER.parse(parser, null); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(NUM_PAGES.getPreferredName(), numPages); - builder.field(NUM_DOCUMENTS.getPreferredName(), numDocuments); - builder.field(NUM_ROLLUPS.getPreferredName(), numRollups); - builder.field(NUM_INVOCATIONS.getPreferredName(), numInvocations); - builder.endObject(); - return builder; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } - - if (other == null || getClass() != other.getClass()) { - return false; - } - - RollupJobStats that = (RollupJobStats) other; - - return Objects.equals(this.numPages, that.numPages) - && Objects.equals(this.numDocuments, that.numDocuments) - && Objects.equals(this.numRollups, that.numRollups) - && Objects.equals(this.numInvocations, that.numInvocations); - } - - @Override - public int hashCode() { - return Objects.hash(numPages, numDocuments, numRollups, numInvocations); - } - -} - diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java index 640385c9c80..0a2f046907c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.indexing.IndexerState; import java.io.IOException; import java.util.HashMap; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfig.java index 32507d57f32..fbc03984325 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfig.java @@ -18,17 +18,11 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.TextFieldMapper; -import org.elasticsearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; -import org.elasticsearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder; -import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.xpack.core.rollup.RollupField; import java.io.IOException; import java.util.Arrays; -import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; @@ -80,29 +74,6 @@ public class TermsGroupConfig implements Writeable, ToXContentObject { return fields; } - /** - * This returns a set of aggregation builders which represent the configured - * set of date histograms. Used by the rollup indexer to iterate over historical data - */ - public List> toBuilders() { - return Arrays.stream(fields).map(f -> { - TermsValuesSourceBuilder vsBuilder - = new TermsValuesSourceBuilder(RollupField.formatIndexerAggName(f, TermsAggregationBuilder.NAME)); - vsBuilder.field(f); - vsBuilder.missingBucket(true); - return vsBuilder; - }).collect(Collectors.toList()); - } - - /** - * @return A map representing this config object as a RollupCaps aggregation object - */ - public Map toAggCap() { - Map map = new HashMap<>(1); - map.put("agg", TermsAggregationBuilder.NAME); - return map; - } - public void validateMappings(Map> fieldCapsResponse, ActionRequestValidationException validationException) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java index e405c5e4e00..66a2eb35898 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java @@ -3,8 +3,14 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.core.scheduler; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.FutureUtils; @@ -13,6 +19,7 @@ import java.time.Clock; import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -88,13 +95,20 @@ public class SchedulerEngine { } private final Map schedules = ConcurrentCollections.newConcurrentMap(); - private final ScheduledExecutorService scheduler; private final Clock clock; + private final ScheduledExecutorService scheduler; + private final Logger logger; private final List listeners = new CopyOnWriteArrayList<>(); - public SchedulerEngine(Clock clock) { - this.clock = clock; - this.scheduler = Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory("trigger_engine_scheduler")); + public SchedulerEngine(final Settings settings, final Clock clock) { + this(settings, clock, LogManager.getLogger(SchedulerEngine.class)); + } + + SchedulerEngine(final Settings settings, final Clock clock, final Logger logger) { + this.clock = Objects.requireNonNull(clock, "clock"); + this.scheduler = Executors.newScheduledThreadPool( + 1, EsExecutors.daemonThreadFactory(Objects.requireNonNull(settings, "settings"), "trigger_engine_scheduler")); + this.logger = Objects.requireNonNull(logger, "logger"); } public void register(Listener listener) { @@ -143,10 +157,15 @@ public class SchedulerEngine { return schedules.size(); } - protected void notifyListeners(String name, long triggeredTime, long scheduledTime) { + protected void notifyListeners(final String name, final long triggeredTime, final long scheduledTime) { final Event event = new Event(name, triggeredTime, scheduledTime); - for (Listener listener : listeners) { - listener.triggered(event); + for (final Listener listener : listeners) { + try { + listener.triggered(event); + } catch (final Exception e) { + // do not allow exceptions to escape this method; we should continue to notify listeners and schedule the next run + logger.warn(new ParameterizedMessage("listener failed while handling triggered event [{}]", name), e); + } } } @@ -168,8 +187,20 @@ public class SchedulerEngine { @Override public void run() { - long triggeredTime = clock.millis(); - notifyListeners(name, triggeredTime, scheduledTime); + final long triggeredTime = clock.millis(); + try { + notifyListeners(name, triggeredTime, scheduledTime); + } catch (final Throwable t) { + /* + * Allowing the throwable to escape here will lead to be it being caught in FutureTask#run and set as the outcome of this + * task; however, we never inspect the the outcomes of these scheduled tasks and so allowing the throwable to escape + * unhandled here could lead to us losing fatal errors. Instead, we rely on ExceptionsHelper#maybeDieOnAnotherThread to + * appropriately dispatch any error to the uncaught exception handler. We should never see an exception here as these do + * not escape from SchedulerEngine#notifyListeners. + */ + ExceptionsHelper.maybeDieOnAnotherThread(t); + throw t; + } scheduleNextRun(triggeredTime); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityContext.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityContext.java index 8d56221d78b..99788ac1de4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityContext.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityContext.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.concurrent.ThreadContext.StoredContext; import org.elasticsearch.node.Node; import org.elasticsearch.xpack.core.security.authc.Authentication; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.io.IOException; import java.util.Objects; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/UserSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/UserSettings.java index 536464cb337..7f22f90351e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/UserSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/UserSettings.java @@ -10,7 +10,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.xpack.core.security.authc.Authentication; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.io.IOException; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java index 96c9c817182..82863a6e8d1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java @@ -167,7 +167,7 @@ public class PutRoleRequest extends ActionRequest implements WriteRequest SUPPORTED_GRANT_TYPES = Collections.unmodifiableSet( + EnumSet.of(GrantType.PASSWORD, GrantType.REFRESH_TOKEN, GrantType.CLIENT_CREDENTIALS)); + private String grantType; private String username; private SecureString password; @@ -49,33 +84,58 @@ public final class CreateTokenRequest extends ActionRequest { @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; - if ("password".equals(grantType)) { - if (Strings.isNullOrEmpty(username)) { - validationException = addValidationError("username is missing", validationException); - } - if (password == null || password.getChars() == null || password.getChars().length == 0) { - validationException = addValidationError("password is missing", validationException); - } - if (refreshToken != null) { - validationException = - addValidationError("refresh_token is not supported with the password grant_type", validationException); - } - } else if ("refresh_token".equals(grantType)) { - if (username != null) { - validationException = - addValidationError("username is not supported with the refresh_token grant_type", validationException); - } - if (password != null) { - validationException = - addValidationError("password is not supported with the refresh_token grant_type", validationException); - } - if (refreshToken == null) { - validationException = addValidationError("refresh_token is missing", validationException); + GrantType type = GrantType.fromString(grantType); + if (type != null) { + switch (type) { + case PASSWORD: + if (Strings.isNullOrEmpty(username)) { + validationException = addValidationError("username is missing", validationException); + } + if (password == null || password.getChars() == null || password.getChars().length == 0) { + validationException = addValidationError("password is missing", validationException); + } + if (refreshToken != null) { + validationException = + addValidationError("refresh_token is not supported with the password grant_type", validationException); + } + break; + case REFRESH_TOKEN: + if (username != null) { + validationException = + addValidationError("username is not supported with the refresh_token grant_type", validationException); + } + if (password != null) { + validationException = + addValidationError("password is not supported with the refresh_token grant_type", validationException); + } + if (refreshToken == null) { + validationException = addValidationError("refresh_token is missing", validationException); + } + break; + case CLIENT_CREDENTIALS: + if (username != null) { + validationException = + addValidationError("username is not supported with the client_credentials grant_type", validationException); + } + if (password != null) { + validationException = + addValidationError("password is not supported with the client_credentials grant_type", validationException); + } + if (refreshToken != null) { + validationException = addValidationError("refresh_token is not supported with the client_credentials grant_type", + validationException); + } + break; + default: + validationException = addValidationError("grant_type only supports the values: [" + + SUPPORTED_GRANT_TYPES.stream().map(GrantType::getValue).collect(Collectors.joining(", ")) + "]", + validationException); } } else { - validationException = addValidationError("grant_type only supports the values: [password, refresh_token]", validationException); + validationException = addValidationError("grant_type only supports the values: [" + + SUPPORTED_GRANT_TYPES.stream().map(GrantType::getValue).collect(Collectors.joining(", ")) + "]", + validationException); } - return validationException; } @@ -126,6 +186,11 @@ public final class CreateTokenRequest extends ActionRequest { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); + if (out.getVersion().before(Version.V_6_5_0) && GrantType.CLIENT_CREDENTIALS.getValue().equals(grantType)) { + throw new IllegalArgumentException("a request with the client_credentials grant_type cannot be sent to version [" + + out.getVersion() + "]"); + } + out.writeString(grantType); if (out.getVersion().onOrAfter(Version.V_6_2_0)) { out.writeOptionalString(username); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java index 1cb1029e820..30111a92431 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java @@ -59,8 +59,14 @@ public final class CreateTokenResponse extends ActionResponse implements ToXCont out.writeString(tokenString); out.writeTimeValue(expiresIn); out.writeOptionalString(scope); - if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - out.writeString(refreshToken); + if (out.getVersion().onOrAfter(Version.V_6_5_0)) { + out.writeOptionalString(refreshToken); + } else if (out.getVersion().onOrAfter(Version.V_6_2_0)) { + if (refreshToken == null) { + out.writeString(""); + } else { + out.writeString(refreshToken); + } } } @@ -70,7 +76,9 @@ public final class CreateTokenResponse extends ActionResponse implements ToXCont tokenString = in.readString(); expiresIn = in.readTimeValue(); scope = in.readOptionalString(); - if (in.getVersion().onOrAfter(Version.V_6_2_0)) { + if (in.getVersion().onOrAfter(Version.V_6_5_0)) { + refreshToken = in.readOptionalString(); + } else if (in.getVersion().onOrAfter(Version.V_6_2_0)) { refreshToken = in.readString(); } } @@ -90,4 +98,20 @@ public final class CreateTokenResponse extends ActionResponse implements ToXCont } return builder.endObject(); } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CreateTokenResponse that = (CreateTokenResponse) o; + return Objects.equals(tokenString, that.tokenString) && + Objects.equals(expiresIn, that.expiresIn) && + Objects.equals(scope, that.scope) && + Objects.equals(refreshToken, that.refreshToken); + } + + @Override + public int hashCode() { + return Objects.hash(tokenString, expiresIn, scope, refreshToken); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java index c3e126b92ce..0cf7ace1103 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java @@ -8,7 +8,7 @@ package org.elasticsearch.xpack.core.security.action.user; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.io.IOException; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequestBuilder.java index 05b3af41de2..d7538c2a556 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequestBuilder.java @@ -18,7 +18,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.support.Validation; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.xcontent.XContentUtils; import java.io.IOException; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersResponse.java index 0da525b6ffc..666b79cfe5d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersResponse.java @@ -8,7 +8,7 @@ package org.elasticsearch.xpack.core.security.action.user; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.io.IOException; import java.util.Collection; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequest.java index dc43db0115e..4f5aed012cb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequest.java @@ -109,7 +109,7 @@ public class HasPrivilegesRequest extends ActionRequest implements UserRequest { for (int i = 0; i < indexSize; i++) { indexPrivileges[i] = RoleDescriptor.IndicesPrivileges.createFrom(in); } - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { applicationPrivileges = in.readArray(ApplicationResourcePrivileges::createFrom, ApplicationResourcePrivileges[]::new); } } @@ -123,7 +123,7 @@ public class HasPrivilegesRequest extends ActionRequest implements UserRequest { for (RoleDescriptor.IndicesPrivileges priv : indexPrivileges) { priv.writeTo(out); } - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { out.writeArray(ApplicationResourcePrivileges::write, applicationPrivileges); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java index b0711fc1bc1..8cd8b510c64 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java @@ -66,7 +66,7 @@ public class HasPrivilegesResponse extends ActionResponse { super.readFrom(in); completeMatch = in.readBoolean(); index = readResourcePrivileges(in); - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { application = in.readMap(StreamInput::readString, HasPrivilegesResponse::readResourcePrivileges); } } @@ -87,7 +87,7 @@ public class HasPrivilegesResponse extends ActionResponse { super.writeTo(out); out.writeBoolean(completeMatch); writeResourcePrivileges(out, index); - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { out.writeMap(application, StreamOutput::writeString, HasPrivilegesResponse::writeResourcePrivileges); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java index e704259396a..f39c5682516 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java @@ -3,6 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.core.security.action.user; import org.elasticsearch.action.ActionRequest; @@ -31,7 +32,6 @@ public class PutUserRequest extends ActionRequest implements UserRequest, WriteR private String email; private Map metadata; private char[] passwordHash; - private char[] password; private boolean enabled = true; private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE; @@ -50,9 +50,6 @@ public class PutUserRequest extends ActionRequest implements UserRequest, WriteR if (metadata != null && metadata.keySet().stream().anyMatch(s -> s.startsWith("_"))) { validationException = addValidationError("metadata keys may not start with [_]", validationException); } - if (password != null && passwordHash != null) { - validationException = addValidationError("only one of [password, passwordHash] can be provided", validationException); - } // we do not check for a password hash here since it is possible that the user exists and we don't want to update the password return validationException; } @@ -85,10 +82,6 @@ public class PutUserRequest extends ActionRequest implements UserRequest, WriteR this.enabled = enabled; } - public void password(@Nullable char[] password) { - this.password = password; - } - /** * Should this request trigger a refresh ({@linkplain RefreshPolicy#IMMEDIATE}, the default), wait for a refresh ( * {@linkplain RefreshPolicy#WAIT_UNTIL}), or proceed ignore refreshes entirely ({@linkplain RefreshPolicy#NONE}). @@ -138,11 +131,6 @@ public class PutUserRequest extends ActionRequest implements UserRequest, WriteR return new String[] { username }; } - @Nullable - public char[] password() { - return password; - } - @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -161,9 +149,6 @@ public class PutUserRequest extends ActionRequest implements UserRequest, WriteR super.writeTo(out); out.writeString(username); writeCharArrayToStream(out, passwordHash); - if (password != null) { - throw new IllegalStateException("password cannot be serialized. it is only used for HL rest"); - } out.writeStringArray(roles); out.writeOptionalString(fullName); out.writeOptionalString(email); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequestBuilder.java index 7dc958bbef9..eea804d81fe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequestBuilder.java @@ -20,7 +20,7 @@ import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.support.Validation; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.xcontent.XContentUtils; import java.io.IOException; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserResponse.java index 30d15e5a3fd..4d0e5fdfa4b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserResponse.java @@ -3,12 +3,13 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.core.security.action.user; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -17,7 +18,7 @@ import java.io.IOException; * Response when adding a user to the security index. Returns a * single boolean field for whether the user was created or updated. */ -public class PutUserResponse extends ActionResponse implements ToXContentObject { +public class PutUserResponse extends ActionResponse implements ToXContentFragment { private boolean created; @@ -32,12 +33,6 @@ public class PutUserResponse extends ActionResponse implements ToXContentObject return created; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject().field("created", created).endObject(); - return builder; - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -49,4 +44,9 @@ public class PutUserResponse extends ActionResponse implements ToXContentObject super.readFrom(in); this.created = in.readBoolean(); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field("created", created); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java index e72df007cf6..161d9d44999 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java @@ -12,7 +12,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.xpack.core.security.user.InternalUserSerializationHelper; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.io.IOException; import java.util.Base64; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationResult.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationResult.java index 08e01025e7e..0f073ef4ae3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationResult.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationResult.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.core.security.authc; import org.elasticsearch.common.Nullable; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.util.Objects; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandler.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandler.java index d6f678a2dcb..736b9378e38 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandler.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandler.java @@ -12,9 +12,11 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.transport.TransportMessage; import org.elasticsearch.xpack.core.XPackField; +import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.security.support.Exceptions.authenticationError; @@ -44,12 +46,42 @@ public class DefaultAuthenticationFailureHandler implements AuthenticationFailur * be sent as failure response. * @see Realm#getAuthenticationFailureHeaders() */ - public DefaultAuthenticationFailureHandler(Map> failureResponseHeaders) { + public DefaultAuthenticationFailureHandler(final Map> failureResponseHeaders) { if (failureResponseHeaders == null || failureResponseHeaders.isEmpty()) { - failureResponseHeaders = Collections.singletonMap("WWW-Authenticate", + this.defaultFailureResponseHeaders = Collections.singletonMap("WWW-Authenticate", Collections.singletonList("Basic realm=\"" + XPackField.SECURITY + "\" charset=\"UTF-8\"")); + } else { + this.defaultFailureResponseHeaders = Collections.unmodifiableMap(failureResponseHeaders.entrySet().stream().collect(Collectors + .toMap(entry -> entry.getKey(), entry -> { + if (entry.getKey().equalsIgnoreCase("WWW-Authenticate")) { + List values = new ArrayList<>(entry.getValue()); + Collections.sort(values, (o1, o2) -> authSchemePriority(o1).compareTo(authSchemePriority(o2))); + return Collections.unmodifiableList(values); + } else { + return Collections.unmodifiableList(entry.getValue()); + } + }))); + } + } + + /** + * For given 'WWW-Authenticate' header value returns the priority based on + * the auth-scheme. Lower number denotes more secure and preferred + * auth-scheme than the higher number. + * + * @param headerValue string starting with auth-scheme name + * @return integer value denoting priority for given auth scheme. + */ + private static Integer authSchemePriority(final String headerValue) { + if (headerValue.regionMatches(true, 0, "negotiate", 0, "negotiate".length())) { + return 0; + } else if (headerValue.regionMatches(true, 0, "bearer", 0, "bearer".length())) { + return 1; + } else if (headerValue.regionMatches(true, 0, "basic", 0, "basic".length())) { + return 2; + } else { + return 3; } - this.defaultFailureResponseHeaders = Collections.unmodifiableMap(failureResponseHeaders); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java index ae04d474f41..bc8869d5d83 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java @@ -8,8 +8,10 @@ package org.elasticsearch.xpack.core.security.authc; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.util.Collections; import java.util.HashMap; @@ -146,6 +148,14 @@ public abstract class Realm implements Comparable { return type + "/" + config.name; } + /** + * This is no-op in the base class, but allows realms to be aware of what other realms are configured + * + * @see DelegatedAuthorizationSettings + */ + public void initialize(Iterable realms, XPackLicenseState licenseState) { + } + /** * A factory interface to construct a security realm. */ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmSettings.java index f7fabab2799..daf1775a80a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmSettings.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.core.security.SecurityExtension; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -181,7 +182,8 @@ public class RealmSettings { settingSet.add(TYPE_SETTING); settingSet.add(ENABLED_SETTING); settingSet.add(ORDER_SETTING); - final AbstractScopedSettings validator = new AbstractScopedSettings(settings, settingSet, Setting.Property.NodeScope) { }; + final AbstractScopedSettings validator = + new AbstractScopedSettings(settings, settingSet, Collections.emptySet(), Setting.Property.NodeScope) { }; try { validator.validate(settings, false); } catch (RuntimeException e) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/ClientReservedRealm.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/ClientReservedRealm.java index c9868f448b4..5a228133073 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/ClientReservedRealm.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/ClientReservedRealm.java @@ -19,6 +19,7 @@ public class ClientReservedRealm { case UsernamesField.KIBANA_NAME: case UsernamesField.LOGSTASH_NAME: case UsernamesField.BEATS_NAME: + case UsernamesField.APM_NAME: return XPackSettings.RESERVED_REALM_ENABLED_SETTING.get(settings); default: return AnonymousUser.isAnonymousUsername(username, settings); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/kerberos/KerberosRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/kerberos/KerberosRealmSettings.java index 7524ef08c1e..656632a2ec6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/kerberos/KerberosRealmSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/kerberos/KerberosRealmSettings.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import java.util.Set; @@ -44,7 +45,9 @@ public final class KerberosRealmSettings { * @return the valid set of {@link Setting}s for a {@value #TYPE} realm */ public static Set> getSettings() { - return Sets.newHashSet(HTTP_SERVICE_KEYTAB_PATH, CACHE_TTL_SETTING, CACHE_MAX_USERS_SETTING, SETTING_KRB_DEBUG_ENABLE, - SETTING_REMOVE_REALM_NAME); + final Set> settings = Sets.newHashSet(HTTP_SERVICE_KEYTAB_PATH, CACHE_TTL_SETTING, CACHE_MAX_USERS_SETTING, + SETTING_KRB_DEBUG_ENABLE, SETTING_REMOVE_REALM_NAME); + settings.addAll(DelegatedAuthorizationSettings.getSettings()); + return settings; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapRealmSettings.java index 0bb9f195af7..3f79c722be3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapRealmSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapRealmSettings.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapMetaDataResolverSettings; import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import org.elasticsearch.xpack.core.security.authc.support.mapper.CompositeRoleMapperSettings; import java.util.HashSet; @@ -37,6 +38,7 @@ public final class LdapRealmSettings { assert LDAP_TYPE.equals(type) : "type [" + type + "] is unknown. expected one of [" + AD_TYPE + ", " + LDAP_TYPE + "]"; settings.addAll(LdapSessionFactorySettings.getSettings()); settings.addAll(LdapUserSearchSessionFactorySettings.getSettings()); + settings.addAll(DelegatedAuthorizationSettings.getSettings()); } settings.addAll(LdapMetaDataResolverSettings.getSettings()); return settings; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/pki/PkiRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/pki/PkiRealmSettings.java index a3539b30d3e..53af4938a8f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/pki/PkiRealmSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/pki/PkiRealmSettings.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.security.authc.pki; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import org.elasticsearch.xpack.core.security.authc.support.mapper.CompositeRoleMapperSettings; import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; @@ -43,6 +44,7 @@ public final class PkiRealmSettings { settings.add(SSL_SETTINGS.truststoreAlgorithm); settings.add(SSL_SETTINGS.caPaths); + settings.addAll(DelegatedAuthorizationSettings.getSettings()); settings.addAll(CompositeRoleMapperSettings.getSettings()); return settings; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/saml/SamlRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/saml/SamlRealmSettings.java index cf28b995127..e254cee1243 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/saml/SamlRealmSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/saml/SamlRealmSettings.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.security.authc.saml; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.core.ssl.X509KeyPairSettings; @@ -89,6 +90,7 @@ public class SamlRealmSettings { set.addAll(DN_ATTRIBUTE.settings()); set.addAll(NAME_ATTRIBUTE.settings()); set.addAll(MAIL_ATTRIBUTE.settings()); + set.addAll(DelegatedAuthorizationSettings.getSettings()); return set; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/DelegatedAuthorizationSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/DelegatedAuthorizationSettings.java new file mode 100644 index 00000000000..b8384a76b41 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/DelegatedAuthorizationSettings.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.security.authc.support; + +import org.elasticsearch.common.settings.Setting; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.function.Function; + +/** + * Settings related to "Delegated Authorization" (aka Lookup Realms) + */ +public class DelegatedAuthorizationSettings { + + public static final Setting> AUTHZ_REALMS = Setting.listSetting("authorization_realms", + Collections.emptyList(), Function.identity(), Setting.Property.NodeScope); + + public static Collection> getSettings() { + return Collections.singleton(AUTHZ_REALMS); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java index 54fd8cc7974..69712a6f33d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java @@ -243,16 +243,11 @@ public class RoleDescriptor implements ToXContentObject { String[] runAs = in.readStringArray(); Map metadata = in.readMap(); - final Map transientMetadata; - if (in.getVersion().onOrAfter(Version.V_5_2_0)) { - transientMetadata = in.readMap(); - } else { - transientMetadata = Collections.emptyMap(); - } + final Map transientMetadata = in.readMap(); final ApplicationResourcePrivileges[] applicationPrivileges; final ConditionalClusterPrivilege[] conditionalClusterPrivileges; - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { applicationPrivileges = in.readArray(ApplicationResourcePrivileges::createFrom, ApplicationResourcePrivileges[]::new); conditionalClusterPrivileges = ConditionalClusterPrivileges.readArray(in); } else { @@ -273,10 +268,8 @@ public class RoleDescriptor implements ToXContentObject { } out.writeStringArray(descriptor.runAs); out.writeMap(descriptor.metadata); - if (out.getVersion().onOrAfter(Version.V_5_2_0)) { - out.writeMap(descriptor.transientMetadata); - } - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + out.writeMap(descriptor.transientMetadata); + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { out.writeArray(ApplicationResourcePrivileges::write, descriptor.applicationPrivileges); ConditionalClusterPrivileges.writeArray(out, descriptor.getConditionalClusterPrivileges()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java index 8559ab0703b..6d3864aa3eb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java @@ -35,6 +35,7 @@ import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -70,7 +71,11 @@ public final class FieldSubsetReader extends FilterLeafReader { super(in, new FilterDirectoryReader.SubReaderWrapper() { @Override public LeafReader wrap(LeafReader reader) { - return new FieldSubsetReader(reader, filter); + try { + return new FieldSubsetReader(reader, filter); + } catch (IOException e) { + throw new UncheckedIOException(e); + } } }); this.filter = filter; @@ -109,11 +114,13 @@ public final class FieldSubsetReader extends FilterLeafReader { private final FieldInfos fieldInfos; /** An automaton that only accepts authorized fields. */ private final CharacterRunAutomaton filter; + /** {@link Terms} cache with filtered stats for the {@link FieldNamesFieldMapper} field. */ + private final Terms fieldNamesFilterTerms; /** * Wrap a single segment, exposing a subset of its fields. */ - FieldSubsetReader(LeafReader in, CharacterRunAutomaton filter) { + FieldSubsetReader(LeafReader in, CharacterRunAutomaton filter) throws IOException { super(in); ArrayList filteredInfos = new ArrayList<>(); for (FieldInfo fi : in.getFieldInfos()) { @@ -123,6 +130,8 @@ public final class FieldSubsetReader extends FilterLeafReader { } fieldInfos = new FieldInfos(filteredInfos.toArray(new FieldInfo[filteredInfos.size()])); this.filter = filter; + final Terms fieldNameTerms = super.terms(FieldNamesFieldMapper.NAME); + this.fieldNamesFilterTerms = fieldNameTerms == null ? null : new FieldNamesTerms(fieldNameTerms); } /** returns true if this field is allowed. */ @@ -346,21 +355,14 @@ public final class FieldSubsetReader extends FilterLeafReader { } } - private Terms wrapTerms(Terms terms, String field) { + private Terms wrapTerms(Terms terms, String field) throws IOException { if (!hasField(field)) { return null; } else if (FieldNamesFieldMapper.NAME.equals(field)) { // for the _field_names field, fields for the document // are encoded as postings, where term is the field. // so we hide terms for fields we filter out. - if (terms != null) { - // check for null, in case term dictionary is not a ghostbuster - // So just because its in fieldinfos and "indexed=true" doesn't mean you can go grab a Terms for it. - // It just means at one point there was a document with that field indexed... - // The fields infos isn't updates/removed even if no docs refer to it - terms = new FieldNamesTerms(terms); - } - return terms; + return fieldNamesFilterTerms; } else { return terms; } @@ -371,9 +373,25 @@ public final class FieldSubsetReader extends FilterLeafReader { * representing fields that should not be visible in this reader. */ class FieldNamesTerms extends FilterTerms { + final long size; + final long sumDocFreq; + final long sumTotalFreq; - FieldNamesTerms(Terms in) { + FieldNamesTerms(Terms in) throws IOException { super(in); + assert in.hasFreqs() == false; + // re-compute the stats for the field to take + // into account the filtered terms. + final TermsEnum e = iterator(); + long size = 0, sumDocFreq = 0, sumTotalFreq = 0; + while (e.next() != null) { + size ++; + sumDocFreq += e.docFreq(); + sumTotalFreq += e.totalTermFreq(); + } + this.size = size; + this.sumDocFreq = sumDocFreq; + this.sumTotalFreq = sumTotalFreq; } @Override @@ -381,27 +399,25 @@ public final class FieldSubsetReader extends FilterLeafReader { return new FieldNamesTermsEnum(in.iterator()); } - // we don't support field statistics (since we filter out terms) - // but this isn't really a big deal: _field_names is not used for ranking. - @Override - public int getDocCount() throws IOException { - return -1; + public long size() throws IOException { + return size; } @Override public long getSumDocFreq() throws IOException { - return -1; + return sumDocFreq; } @Override public long getSumTotalTermFreq() throws IOException { - return -1; + return sumTotalFreq; } @Override - public long size() throws IOException { - return -1; + public int getDocCount() throws IOException { + // it is costly to recompute this value so we assume that docCount == maxDoc. + return maxDoc(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java index e812f0cfc73..60b598a3a99 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java @@ -63,7 +63,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; import org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetReader.DocumentSubsetDirectoryReader; import org.elasticsearch.xpack.core.security.support.Exceptions; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.io.IOException; import java.util.ArrayList; @@ -183,7 +183,7 @@ public class SecurityIndexSearcherWrapper extends IndexSearcherWrapper { IndexSearcher indexSearcher = new IndexSearcherWrapper((DocumentSubsetDirectoryReader) directoryReader); indexSearcher.setQueryCache(indexSearcher.getQueryCache()); indexSearcher.setQueryCachingPolicy(indexSearcher.getQueryCachingPolicy()); - indexSearcher.setSimilarity(indexSearcher.getSimilarity(true)); + indexSearcher.setSimilarity(indexSearcher.getSimilarity()); return indexSearcher; } return searcher; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java index 068c722c778..6c52d3e75dc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.security.authz.privilege; import org.apache.lucene.util.automaton.Automaton; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenAction; @@ -41,6 +42,7 @@ public final class ClusterPrivilege extends Privilege { private static final Automaton MANAGE_IDX_TEMPLATE_AUTOMATON = patterns("indices:admin/template/*"); private static final Automaton MANAGE_INGEST_PIPELINE_AUTOMATON = patterns("cluster:admin/ingest/pipeline/*"); private static final Automaton MANAGE_ROLLUP_AUTOMATON = patterns("cluster:admin/xpack/rollup/*", "cluster:monitor/xpack/rollup/*"); + private static final Automaton MANAGE_CCR_AUTOMATON = patterns("cluster:admin/xpack/ccr/*", ClusterStateAction.NAME); public static final ClusterPrivilege NONE = new ClusterPrivilege("none", Automatons.EMPTY); public static final ClusterPrivilege ALL = new ClusterPrivilege("all", ALL_CLUSTER_AUTOMATON); @@ -60,6 +62,7 @@ public final class ClusterPrivilege extends Privilege { public static final ClusterPrivilege MANAGE_SECURITY = new ClusterPrivilege("manage_security", MANAGE_SECURITY_AUTOMATON); public static final ClusterPrivilege MANAGE_SAML = new ClusterPrivilege("manage_saml", MANAGE_SAML_AUTOMATON); public static final ClusterPrivilege MANAGE_PIPELINE = new ClusterPrivilege("manage_pipeline", "cluster:admin/ingest/pipeline/*"); + public static final ClusterPrivilege MANAGE_CCR = new ClusterPrivilege("manage_ccr", MANAGE_CCR_AUTOMATON); public static final Predicate ACTION_MATCHER = ClusterPrivilege.ALL.predicate(); @@ -80,6 +83,7 @@ public final class ClusterPrivilege extends Privilege { .put("manage_saml", MANAGE_SAML) .put("manage_pipeline", MANAGE_PIPELINE) .put("manage_rollup", MANAGE_ROLLUP) + .put("manage_ccr", MANAGE_CCR) .immutableMap(); private static final ConcurrentHashMap, ClusterPrivilege> CACHE = new ConcurrentHashMap<>(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java index 723dff61679..779f2765f48 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java @@ -55,6 +55,7 @@ public final class IndexPrivilege extends Privilege { private static final Automaton VIEW_METADATA_AUTOMATON = patterns(GetAliasesAction.NAME, AliasesExistAction.NAME, GetIndexAction.NAME, IndicesExistsAction.NAME, GetFieldMappingsAction.NAME + "*", GetMappingsAction.NAME, ClusterSearchShardsAction.NAME, TypesExistsAction.NAME, ValidateQueryAction.NAME + "*", GetSettingsAction.NAME); + private static final Automaton CREATE_FOLLOW_INDEX_AUTOMATON = patterns("indices:admin/xpack/ccr/create_and_follow_index"); public static final IndexPrivilege NONE = new IndexPrivilege("none", Automatons.EMPTY); public static final IndexPrivilege ALL = new IndexPrivilege("all", ALL_AUTOMATON); @@ -69,6 +70,7 @@ public final class IndexPrivilege extends Privilege { public static final IndexPrivilege DELETE_INDEX = new IndexPrivilege("delete_index", DELETE_INDEX_AUTOMATON); public static final IndexPrivilege CREATE_INDEX = new IndexPrivilege("create_index", CREATE_INDEX_AUTOMATON); public static final IndexPrivilege VIEW_METADATA = new IndexPrivilege("view_index_metadata", VIEW_METADATA_AUTOMATON); + public static final IndexPrivilege CREATE_FOLLOW_INDEX = new IndexPrivilege("create_follow_index", CREATE_FOLLOW_INDEX_AUTOMATON); private static final Map VALUES = MapBuilder.newMapBuilder() .put("none", NONE) @@ -84,6 +86,7 @@ public final class IndexPrivilege extends Privilege { .put("delete_index", DELETE_INDEX) .put("view_index_metadata", VIEW_METADATA) .put("read_cross_cluster", READ_CROSS_CLUSTER) + .put("create_follow_index", CREATE_FOLLOW_INDEX) .immutableMap(); public static final Predicate ACTION_MATCHER = ALL.predicate(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index 0c593436365..22cb1c357c6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -112,6 +112,8 @@ public class ReservedRolesStore { null, MetadataUtils.DEFAULT_RESERVED_METADATA)) .put(UsernamesField.BEATS_ROLE, new RoleDescriptor(UsernamesField.BEATS_ROLE, new String[] { "monitor", MonitoringBulkAction.NAME}, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put(UsernamesField.APM_ROLE, new RoleDescriptor(UsernamesField.APM_ROLE, + new String[] { "monitor", MonitoringBulkAction.NAME}, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) .put("machine_learning_user", new RoleDescriptor("machine_learning_user", new String[] { "monitor_ml" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(".ml-anomalies*", ".ml-notifications").privileges("view_index_metadata", "read").build() }, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java index f828a82d95f..36b480c29c7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java @@ -12,6 +12,7 @@ import io.netty.channel.ChannelOutboundHandlerAdapter; import io.netty.channel.ChannelPromise; import io.netty.handler.ssl.SslHandler; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.network.NetworkService; @@ -19,6 +20,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.netty4.Netty4Transport; @@ -27,7 +29,10 @@ import org.elasticsearch.xpack.core.security.transport.SSLExceptionHelper; import org.elasticsearch.xpack.core.ssl.SSLConfiguration; import org.elasticsearch.xpack.core.ssl.SSLService; +import javax.net.ssl.SNIHostName; +import javax.net.ssl.SNIServerName; import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLParameters; import java.net.InetSocketAddress; import java.net.SocketAddress; import java.util.Collections; @@ -106,8 +111,8 @@ public class SecurityNetty4Transport extends Netty4Transport { } @Override - protected ChannelHandler getClientChannelInitializer() { - return new SecurityClientChannelInitializer(); + protected ChannelHandler getClientChannelInitializer(DiscoveryNode node) { + return new SecurityClientChannelInitializer(node); } @Override @@ -167,16 +172,28 @@ public class SecurityNetty4Transport extends Netty4Transport { private class SecurityClientChannelInitializer extends ClientChannelInitializer { private final boolean hostnameVerificationEnabled; + private final SNIHostName serverName; - SecurityClientChannelInitializer() { + SecurityClientChannelInitializer(DiscoveryNode node) { this.hostnameVerificationEnabled = sslEnabled && sslConfiguration.verificationMode().isHostnameVerificationEnabled(); + String configuredServerName = node.getAttributes().get("server_name"); + if (configuredServerName != null) { + try { + serverName = new SNIHostName(configuredServerName); + } catch (IllegalArgumentException e) { + throw new ConnectTransportException(node, "invalid DiscoveryNode server_name [" + configuredServerName + "]", e); + } + } else { + serverName = null; + } } @Override protected void initChannel(Channel ch) throws Exception { super.initChannel(ch); if (sslEnabled) { - ch.pipeline().addFirst(new ClientSslHandlerInitializer(sslConfiguration, sslService, hostnameVerificationEnabled)); + ch.pipeline().addFirst(new ClientSslHandlerInitializer(sslConfiguration, sslService, hostnameVerificationEnabled, + serverName)); } } } @@ -186,11 +203,14 @@ public class SecurityNetty4Transport extends Netty4Transport { private final boolean hostnameVerificationEnabled; private final SSLConfiguration sslConfiguration; private final SSLService sslService; + private final SNIServerName serverName; - private ClientSslHandlerInitializer(SSLConfiguration sslConfiguration, SSLService sslService, boolean hostnameVerificationEnabled) { + private ClientSslHandlerInitializer(SSLConfiguration sslConfiguration, SSLService sslService, boolean hostnameVerificationEnabled, + SNIServerName serverName) { this.sslConfiguration = sslConfiguration; this.hostnameVerificationEnabled = hostnameVerificationEnabled; this.sslService = sslService; + this.serverName = serverName; } @Override @@ -207,6 +227,11 @@ public class SecurityNetty4Transport extends Netty4Transport { } sslEngine.setUseClientMode(true); + if (serverName != null) { + SSLParameters sslParameters = sslEngine.getSSLParameters(); + sslParameters.setServerNames(Collections.singletonList(serverName)); + sslEngine.setSSLParameters(sslParameters); + } ctx.pipeline().replace(this, "ssl", new SslHandler(sslEngine)); super.connect(ctx, remoteAddress, localAddress, promise); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/APMSystemUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/APMSystemUser.java new file mode 100644 index 00000000000..c26b66875e6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/APMSystemUser.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.user; + +import org.elasticsearch.Version; +import org.elasticsearch.xpack.core.security.support.MetadataUtils; + +/** + * Built in user for APM server internals. Currently used for APM server monitoring. + */ +public class APMSystemUser extends User { + + public static final String NAME = UsernamesField.APM_NAME; + public static final String ROLE_NAME = UsernamesField.APM_ROLE; + public static final Version DEFINED_SINCE = Version.V_6_5_0; + public static final BuiltinUserInfo USER_INFO = new BuiltinUserInfo(NAME, ROLE_NAME, DEFINED_SINCE); + + public APMSystemUser(boolean enabled) { + super(NAME, new String[]{ ROLE_NAME }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, enabled); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/AnonymousUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/AnonymousUser.java index ae6f41c3a1b..36354ff58b3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/AnonymousUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/AnonymousUser.java @@ -9,7 +9,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.protocol.xpack.security.User; import org.elasticsearch.xpack.core.security.support.MetadataUtils; import java.util.Collections; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BeatsSystemUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BeatsSystemUser.java index 9db64da97a5..dfa437fa8d2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BeatsSystemUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BeatsSystemUser.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.core.security.user; import org.elasticsearch.Version; -import org.elasticsearch.protocol.xpack.security.User; import org.elasticsearch.xpack.core.security.support.MetadataUtils; /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/ElasticUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/ElasticUser.java index c58f86ea422..ec618a4f482 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/ElasticUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/ElasticUser.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.security.user; -import org.elasticsearch.protocol.xpack.security.User; import org.elasticsearch.xpack.core.security.support.MetadataUtils; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUserSerializationHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUserSerializationHelper.java index c0b45aea57c..fa41828a7bb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUserSerializationHelper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUserSerializationHelper.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.core.security.user; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.protocol.xpack.security.User; import java.io.IOException; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/KibanaUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/KibanaUser.java index 3e816aa54bc..8dfa149987d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/KibanaUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/KibanaUser.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.security.user; -import org.elasticsearch.protocol.xpack.security.User; import org.elasticsearch.xpack.core.security.support.MetadataUtils; /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/LogstashSystemUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/LogstashSystemUser.java index 047758177fb..88381482ef3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/LogstashSystemUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/LogstashSystemUser.java @@ -5,8 +5,6 @@ */ package org.elasticsearch.xpack.core.security.user; -import org.elasticsearch.Version; -import org.elasticsearch.protocol.xpack.security.User; import org.elasticsearch.xpack.core.security.support.MetadataUtils; /** @@ -16,8 +14,6 @@ public class LogstashSystemUser extends User { public static final String NAME = UsernamesField.LOGSTASH_NAME; public static final String ROLE_NAME = UsernamesField.LOGSTASH_ROLE; - public static final Version DEFINED_SINCE = Version.V_5_2_0; - public static final BuiltinUserInfo USER_INFO = new BuiltinUserInfo(NAME, ROLE_NAME, DEFINED_SINCE); public LogstashSystemUser(boolean enabled) { super(NAME, new String[]{ ROLE_NAME }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, enabled); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/SystemUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/SystemUser.java index 1c7ac129d17..4569c2a68a0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/SystemUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/SystemUser.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.security.user; -import org.elasticsearch.protocol.xpack.security.User; import org.elasticsearch.xpack.core.security.authz.privilege.SystemPrivilege; import java.util.function.Predicate; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/security/User.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/User.java similarity index 85% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/security/User.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/User.java index 42e957ecf2d..028b14f882a 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/security/User.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/User.java @@ -1,25 +1,10 @@ /* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. */ +package org.elasticsearch.xpack.core.security.user; -package org.elasticsearch.protocol.xpack.security; - -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; @@ -199,12 +184,7 @@ public class User implements ToXContentObject { boolean hasInnerUser = input.readBoolean(); if (hasInnerUser) { User innerUser = readFrom(input); - if (input.getVersion().onOrBefore(Version.V_5_4_0)) { - // backcompat: runas user was read first, so reverse outer and inner - return new User(innerUser, outerUser); - } else { - return new User(outerUser, innerUser); - } + return new User(outerUser, innerUser); } else { return outerUser; } @@ -221,11 +201,6 @@ public class User implements ToXContentObject { if (user.authenticatedUser == null) { // no backcompat necessary, since there is no inner user writeUser(user, output); - } else if (output.getVersion().onOrBefore(Version.V_5_4_0)) { - // backcompat: write runas user as the "inner" user - writeUser(user.authenticatedUser, output); - output.writeBoolean(true); - writeUser(user, output); } else { writeUser(user, output); output.writeBoolean(true); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/UsernamesField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/UsernamesField.java index 3b691b927b4..bd886567ed1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/UsernamesField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/UsernamesField.java @@ -20,6 +20,8 @@ public final class UsernamesField { public static final String LOGSTASH_ROLE = "logstash_system"; public static final String BEATS_NAME = "beats_system"; public static final String BEATS_ROLE = "beats_system"; + public static final String APM_NAME = "apm_system"; + public static final String APM_ROLE = "apm_system"; private UsernamesField() {} } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackSecurityUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackSecurityUser.java index e98df7fb50a..906d3548377 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackSecurityUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackSecurityUser.java @@ -5,8 +5,6 @@ */ package org.elasticsearch.xpack.core.security.user; -import org.elasticsearch.protocol.xpack.security.User; - /** * internal user that manages xpack security. Has all cluster/indices permissions. */ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackUser.java index fe50b1b9c88..38c9fe84aa9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackUser.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.security.user; -import org.elasticsearch.protocol.xpack.security.User; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/DefaultJDKTrustConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/DefaultJDKTrustConfig.java index ff818bb09f5..0a4c0552f69 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/DefaultJDKTrustConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/DefaultJDKTrustConfig.java @@ -16,6 +16,10 @@ import javax.net.ssl.X509ExtendedTrustManager; import java.io.IOException; import java.nio.file.Path; import java.security.GeneralSecurityException; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.cert.CertificateException; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -34,7 +38,7 @@ class DefaultJDKTrustConfig extends TrustConfig { @Override X509ExtendedTrustManager createTrustManager(@Nullable Environment environment) { try { - return CertParsingUtils.trustManager(null, TrustManagerFactory.getDefaultAlgorithm()); + return CertParsingUtils.trustManager(getSystemTrustStore(), TrustManagerFactory.getDefaultAlgorithm()); } catch (Exception e) { throw new ElasticsearchException("failed to initialize a TrustManagerFactory", e); } @@ -81,4 +85,20 @@ class DefaultJDKTrustConfig extends TrustConfig { return new CombiningTrustConfig(Arrays.asList(INSTANCE, trustConfig)); } } + + /** + * When a PKCS#11 token is used as the system default keystore/truststore, we need to pass the keystore + * password when loading, even for reading certificates only ( as opposed to i.e. JKS keystores where + * we only need to pass the password for reading Private Key entries ). + * + * @return the KeyStore used as truststore for PKCS#11 initialized with the password, null otherwise + */ + private KeyStore getSystemTrustStore() throws KeyStoreException, CertificateException, NoSuchAlgorithmException, IOException { + if (System.getProperty("javax.net.ssl.trustStoreType", "").equalsIgnoreCase("PKCS11")) { + KeyStore keyStore = KeyStore.getInstance("PKCS11"); + keyStore.load(null, System.getProperty("javax.net.ssl.trustStorePassword", "").toCharArray()); + return keyStore; + } + return null; + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PemUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PemUtils.java index a3814a76a3e..421b30baac7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PemUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PemUtils.java @@ -58,6 +58,7 @@ public class PemUtils { private static final String OPENSSL_EC_FOOTER = "-----END EC PRIVATE KEY-----"; private static final String OPENSSL_EC_PARAMS_HEADER = "-----BEGIN EC PARAMETERS-----"; private static final String OPENSSL_EC_PARAMS_FOOTER = "-----END EC PARAMETERS-----"; + private static final String HEADER = "-----BEGIN"; private PemUtils() { throw new IllegalStateException("Utility class should not be instantiated"); @@ -74,6 +75,9 @@ public class PemUtils { public static PrivateKey readPrivateKey(Path keyPath, Supplier passwordSupplier) { try (BufferedReader bReader = Files.newBufferedReader(keyPath, StandardCharsets.UTF_8)) { String line = bReader.readLine(); + while (null != line && line.startsWith(HEADER) == false){ + line = bReader.readLine(); + } if (null == line) { throw new IllegalStateException("Error parsing Private Key from: " + keyPath.toString() + ". File is empty"); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfiguration.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfiguration.java index 731d59a3ac0..48dba65a3a6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfiguration.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfiguration.java @@ -192,7 +192,7 @@ public final class SSLConfiguration { if (global != null) { return global.keyConfig(); } - if (System.getProperty("javax.net.ssl.keyStore") != null) { + if (System.getProperty("javax.net.ssl.keyStore") != null && System.getProperty("javax.net.ssl.keyStore").equals("NONE") == false) { // TODO: we should not support loading a keystore from sysprops... try (SecureString keystorePassword = new SecureString(System.getProperty("javax.net.ssl.keyStorePassword", ""))) { return new StoreKeyConfig(System.getProperty("javax.net.ssl.keyStore"), KeyStore.getDefaultType(), keystorePassword, @@ -233,7 +233,8 @@ public final class SSLConfiguration { String trustStoreAlgorithm = SETTINGS_PARSER.truststoreAlgorithm.get(settings); String trustStoreType = getKeyStoreType(SETTINGS_PARSER.truststoreType, settings, trustStorePath); return new StoreTrustConfig(trustStorePath, trustStoreType, trustStorePassword, trustStoreAlgorithm); - } else if (global == null && System.getProperty("javax.net.ssl.trustStore") != null) { + } else if (global == null && System.getProperty("javax.net.ssl.trustStore") != null + && System.getProperty("javax.net.ssl.trustStore").equals("NONE") == false) { try (SecureString truststorePassword = new SecureString(System.getProperty("javax.net.ssl.trustStorePassword", ""))) { return new StoreTrustConfig(System.getProperty("javax.net.ssl.trustStore"), KeyStore.getDefaultType(), truststorePassword, System.getProperty("ssl.TrustManagerFactory.algorithm", TrustManagerFactory.getDefaultAlgorithm())); diff --git a/x-pack/plugin/core/src/main/resources/monitoring-beats.json b/x-pack/plugin/core/src/main/resources/monitoring-beats.json index 07756ba2602..d23db9a11a4 100644 --- a/x-pack/plugin/core/src/main/resources/monitoring-beats.json +++ b/x-pack/plugin/core/src/main/resources/monitoring-beats.json @@ -224,6 +224,274 @@ } } }, + "apm-server": { + "properties": { + "server": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } + }, + "concurrent": { + "properties": { + "wait": { + "properties": { + "ms": { + "type": "long" + } + } + } + } + }, + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "toolarge": { + "type": "long" + }, + "validate": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "queue": { + "type": "long" + }, + "closed": { + "type": "long" + }, + "forbidden": { + "type": "long" + }, + "concurrency": { + "type": "long" + }, + "unauthorized": { + "type": "long" + }, + "decode": { + "type": "long" + }, + "method": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "ok": { + "type": "long" + }, + "accepted": { + "type": "long" + }, + "count": { + "type": "long" + } + } + } + } + } + } + }, + "decoder": { + "properties": { + "deflate": { + "properties": { + "content-length": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "gzip": { + "properties": { + "content-length": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "uncompressed": { + "properties": { + "content-length": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "reader": { + "properties": { + "size": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "missing-content-length": { + "properties": { + "count": { + "type": "long" + } + } + } + } + + }, + "processor": { + "properties": { + "metric": { + "properties": { + "decoding": { + "properties": { + "errors": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "validation": { + "properties": { + "errors": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "transformations": { + "type": "long" + } + } + }, + "sourcemap": { + "properties": { + "counter": { + "type": "long" + }, + "decoding": { + "properties": { + "errors": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "validation": { + "properties": { + "errors": { + "type": "long" + }, + "count": { + "type": "long" + } + } + } + } + }, + "transaction": { + "properties": { + "decoding": { + "properties": { + "errors": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "validation": { + "properties": { + "errors": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "transformations": { + "type": "long" + }, + "transactions": { + "type": "long" + }, + "spans": { + "type": "long" + }, + "stacktraces": { + "type": "long" + }, + "frames": { + "type": "long" + } + } + }, + "error": { + "properties": { + "decoding": { + "properties": { + "errors": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "validation": { + "properties": { + "errors": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "transformations": { + "type": "long" + }, + "errors": { + "type": "long" + }, + "stacktraces": { + "type": "long" + }, + "frames": { + "type": "long" + } + } + } + } + } + } + }, "libbeat": { "properties": { "config": { diff --git a/x-pack/plugin/core/src/main/resources/security-index-template.json b/x-pack/plugin/core/src/main/resources/security-index-template.json index dd17baf0474..bac5930c0d5 100644 --- a/x-pack/plugin/core/src/main/resources/security-index-template.json +++ b/x-pack/plugin/core/src/main/resources/security-index-template.json @@ -4,7 +4,7 @@ "settings" : { "number_of_shards" : 1, "number_of_replicas" : 0, - "auto_expand_replicas" : "0-all", + "auto_expand_replicas" : "0-1", "index.priority": 1000, "index.format": 6, "analysis" : { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java new file mode 100644 index 00000000000..58ca42c7f68 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java @@ -0,0 +1,414 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.license; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.protocol.xpack.XPackInfoResponse; +import org.elasticsearch.protocol.xpack.license.LicenseStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.action.XPackInfoAction; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.argThat; +import static org.mockito.Matchers.same; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public final class RemoteClusterLicenseCheckerTests extends ESTestCase { + + public void testIsNotRemoteIndex() { + assertFalse(RemoteClusterLicenseChecker.isRemoteIndex("local-index")); + } + + public void testIsRemoteIndex() { + assertTrue(RemoteClusterLicenseChecker.isRemoteIndex("remote-cluster:remote-index")); + } + + public void testNoRemoteIndex() { + final List indices = Arrays.asList("local-index1", "local-index2"); + assertFalse(RemoteClusterLicenseChecker.containsRemoteIndex(indices)); + } + + public void testRemoteIndex() { + final List indices = Arrays.asList("local-index", "remote-cluster:remote-index"); + assertTrue(RemoteClusterLicenseChecker.containsRemoteIndex(indices)); + } + + public void testNoRemoteIndices() { + final List indices = Collections.singletonList("local-index"); + assertThat(RemoteClusterLicenseChecker.remoteIndices(indices), is(empty())); + } + + public void testRemoteIndices() { + final List indices = Arrays.asList("local-index1", "remote-cluster1:index1", "local-index2", "remote-cluster2:index1"); + assertThat( + RemoteClusterLicenseChecker.remoteIndices(indices), + containsInAnyOrder("remote-cluster1:index1", "remote-cluster2:index1")); + } + + public void testNoRemoteClusterAliases() { + final List indices = Arrays.asList("local-index1", "local-index2"); + assertThat(RemoteClusterLicenseChecker.remoteClusterAliases(indices), empty()); + } + + public void testOneRemoteClusterAlias() { + final List indices = Arrays.asList("local-index1", "remote-cluster1:remote-index1"); + assertThat(RemoteClusterLicenseChecker.remoteClusterAliases(indices), contains("remote-cluster1")); + } + + public void testMoreThanOneRemoteClusterAlias() { + final List indices = Arrays.asList("remote-cluster1:remote-index1", "local-index1", "remote-cluster2:remote-index1"); + assertThat(RemoteClusterLicenseChecker.remoteClusterAliases(indices), contains("remote-cluster1", "remote-cluster2")); + } + + public void testDuplicateRemoteClusterAlias() { + final List indices = Arrays.asList( + "remote-cluster1:remote-index1", "local-index1", "remote-cluster2:index1", "remote-cluster2:remote-index2"); + assertThat(RemoteClusterLicenseChecker.remoteClusterAliases(indices), contains("remote-cluster1", "remote-cluster2")); + } + + public void testCheckRemoteClusterLicensesGivenCompatibleLicenses() { + final AtomicInteger index = new AtomicInteger(); + final List responses = new ArrayList<>(); + + final ThreadPool threadPool = createMockThreadPool(); + final Client client = createMockClient(threadPool); + doAnswer(invocationMock -> { + @SuppressWarnings("unchecked") ActionListener listener = + (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(responses.get(index.getAndIncrement())); + return null; + }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); + + final List remoteClusterAliases = Arrays.asList("valid1", "valid2", "valid3"); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + + final RemoteClusterLicenseChecker licenseChecker = + new RemoteClusterLicenseChecker(client, XPackLicenseState::isPlatinumOrTrialOperationMode); + final AtomicReference licenseCheck = new AtomicReference<>(); + + licenseChecker.checkRemoteClusterLicenses( + remoteClusterAliases, + doubleInvocationProtectingListener(new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck response) { + licenseCheck.set(response); + } + + @Override + public void onFailure(final Exception e) { + fail(e.getMessage()); + } + + })); + + verify(client, times(3)).execute(same(XPackInfoAction.INSTANCE), any(), any()); + assertNotNull(licenseCheck.get()); + assertTrue(licenseCheck.get().isSuccess()); + } + + public void testCheckRemoteClusterLicensesGivenIncompatibleLicense() { + final AtomicInteger index = new AtomicInteger(); + final List remoteClusterAliases = Arrays.asList("good", "cluster-with-basic-license", "good2"); + final List responses = new ArrayList<>(); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createBasicLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + + final ThreadPool threadPool = createMockThreadPool(); + final Client client = createMockClient(threadPool); + doAnswer(invocationMock -> { + @SuppressWarnings("unchecked") ActionListener listener = + (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(responses.get(index.getAndIncrement())); + return null; + }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); + + final RemoteClusterLicenseChecker licenseChecker = + new RemoteClusterLicenseChecker(client, XPackLicenseState::isPlatinumOrTrialOperationMode); + final AtomicReference licenseCheck = new AtomicReference<>(); + + licenseChecker.checkRemoteClusterLicenses( + remoteClusterAliases, + doubleInvocationProtectingListener(new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck response) { + licenseCheck.set(response); + } + + @Override + public void onFailure(final Exception e) { + fail(e.getMessage()); + } + + })); + + verify(client, times(2)).execute(same(XPackInfoAction.INSTANCE), any(), any()); + assertNotNull(licenseCheck.get()); + assertFalse(licenseCheck.get().isSuccess()); + assertThat(licenseCheck.get().remoteClusterLicenseInfo().clusterAlias(), equalTo("cluster-with-basic-license")); + assertThat(licenseCheck.get().remoteClusterLicenseInfo().licenseInfo().getType(), equalTo("BASIC")); + } + + public void testCheckRemoteClusterLicencesGivenNonExistentCluster() { + final AtomicInteger index = new AtomicInteger(); + final List responses = new ArrayList<>(); + + final List remoteClusterAliases = Arrays.asList("valid1", "valid2", "valid3"); + final String failingClusterAlias = randomFrom(remoteClusterAliases); + final ThreadPool threadPool = createMockThreadPool(); + final Client client = createMockClientThatThrowsOnGetRemoteClusterClient(threadPool, failingClusterAlias); + doAnswer(invocationMock -> { + @SuppressWarnings("unchecked") ActionListener listener = + (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(responses.get(index.getAndIncrement())); + return null; + }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); + + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + + final RemoteClusterLicenseChecker licenseChecker = + new RemoteClusterLicenseChecker(client, XPackLicenseState::isPlatinumOrTrialOperationMode); + final AtomicReference exception = new AtomicReference<>(); + + licenseChecker.checkRemoteClusterLicenses( + remoteClusterAliases, + doubleInvocationProtectingListener(new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck response) { + fail(); + } + + @Override + public void onFailure(final Exception e) { + exception.set(e); + } + + })); + + assertNotNull(exception.get()); + assertThat(exception.get(), instanceOf(ElasticsearchException.class)); + assertThat(exception.get().getMessage(), equalTo("could not determine the license type for cluster [" + failingClusterAlias + "]")); + assertNotNull(exception.get().getCause()); + assertThat(exception.get().getCause(), instanceOf(IllegalArgumentException.class)); + } + + public void testRemoteClusterLicenseCallUsesSystemContext() throws InterruptedException { + final ThreadPool threadPool = new TestThreadPool(getTestName()); + + try { + final Client client = createMockClient(threadPool); + doAnswer(invocationMock -> { + assertTrue(threadPool.getThreadContext().isSystemContext()); + @SuppressWarnings("unchecked") ActionListener listener = + (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + return null; + }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); + + final RemoteClusterLicenseChecker licenseChecker = + new RemoteClusterLicenseChecker(client, XPackLicenseState::isPlatinumOrTrialOperationMode); + + final List remoteClusterAliases = Collections.singletonList("valid"); + licenseChecker.checkRemoteClusterLicenses( + remoteClusterAliases, doubleInvocationProtectingListener(ActionListener.wrap(() -> {}))); + + verify(client, times(1)).execute(same(XPackInfoAction.INSTANCE), any(), any()); + } finally { + terminate(threadPool); + } + } + + public void testListenerIsExecutedWithCallingContext() throws InterruptedException { + final AtomicInteger index = new AtomicInteger(); + final List responses = new ArrayList<>(); + + final ThreadPool threadPool = new TestThreadPool(getTestName()); + + try { + final List remoteClusterAliases = Arrays.asList("valid1", "valid2", "valid3"); + final Client client; + final boolean failure = randomBoolean(); + if (failure) { + client = createMockClientThatThrowsOnGetRemoteClusterClient(threadPool, randomFrom(remoteClusterAliases)); + } else { + client = createMockClient(threadPool); + } + doAnswer(invocationMock -> { + @SuppressWarnings("unchecked") ActionListener listener = + (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(responses.get(index.getAndIncrement())); + return null; + }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); + + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + + final RemoteClusterLicenseChecker licenseChecker = + new RemoteClusterLicenseChecker(client, XPackLicenseState::isPlatinumOrTrialOperationMode); + + final AtomicBoolean listenerInvoked = new AtomicBoolean(); + threadPool.getThreadContext().putHeader("key", "value"); + licenseChecker.checkRemoteClusterLicenses( + remoteClusterAliases, + doubleInvocationProtectingListener(new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck response) { + if (failure) { + fail(); + } + assertThat(threadPool.getThreadContext().getHeader("key"), equalTo("value")); + assertFalse(threadPool.getThreadContext().isSystemContext()); + listenerInvoked.set(true); + } + + @Override + public void onFailure(final Exception e) { + if (failure == false) { + fail(); + } + assertThat(threadPool.getThreadContext().getHeader("key"), equalTo("value")); + assertFalse(threadPool.getThreadContext().isSystemContext()); + listenerInvoked.set(true); + } + + })); + + assertTrue(listenerInvoked.get()); + } finally { + terminate(threadPool); + } + } + + public void testBuildErrorMessageForActiveCompatibleLicense() { + final XPackInfoResponse.LicenseInfo platinumLicence = createPlatinumLicenseResponse(); + final RemoteClusterLicenseChecker.RemoteClusterLicenseInfo info = + new RemoteClusterLicenseChecker.RemoteClusterLicenseInfo("platinum-cluster", platinumLicence); + final AssertionError e = expectThrows( + AssertionError.class, + () -> RemoteClusterLicenseChecker.buildErrorMessage("", info, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial)); + assertThat(e, hasToString(containsString("license must be incompatible to build error message"))); + } + + public void testBuildErrorMessageForIncompatibleLicense() { + final XPackInfoResponse.LicenseInfo basicLicense = createBasicLicenseResponse(); + final RemoteClusterLicenseChecker.RemoteClusterLicenseInfo info = + new RemoteClusterLicenseChecker.RemoteClusterLicenseInfo("basic-cluster", basicLicense); + assertThat( + RemoteClusterLicenseChecker.buildErrorMessage("Feature", info, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial), + equalTo("the license mode [BASIC] on cluster [basic-cluster] does not enable [Feature]")); + } + + public void testBuildErrorMessageForInactiveLicense() { + final XPackInfoResponse.LicenseInfo expiredLicense = createExpiredLicenseResponse(); + final RemoteClusterLicenseChecker.RemoteClusterLicenseInfo info = + new RemoteClusterLicenseChecker.RemoteClusterLicenseInfo("expired-cluster", expiredLicense); + assertThat( + RemoteClusterLicenseChecker.buildErrorMessage("Feature", info, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial), + equalTo("the license on cluster [expired-cluster] is not active")); + } + + private ActionListener doubleInvocationProtectingListener( + final ActionListener listener) { + final AtomicBoolean listenerInvoked = new AtomicBoolean(); + return new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck response) { + if (listenerInvoked.compareAndSet(false, true) == false) { + fail("listener invoked twice"); + } + listener.onResponse(response); + } + + @Override + public void onFailure(final Exception e) { + if (listenerInvoked.compareAndSet(false, true) == false) { + fail("listener invoked twice"); + } + listener.onFailure(e); + } + + }; + } + + private ThreadPool createMockThreadPool() { + final ThreadPool threadPool = mock(ThreadPool.class); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + return threadPool; + } + + private Client createMockClient(final ThreadPool threadPool) { + return createMockClient(threadPool, client -> when(client.getRemoteClusterClient(anyString())).thenReturn(client)); + } + + private Client createMockClientThatThrowsOnGetRemoteClusterClient(final ThreadPool threadPool, final String clusterAlias) { + return createMockClient( + threadPool, + client -> { + when(client.getRemoteClusterClient(clusterAlias)).thenThrow(new IllegalArgumentException()); + when(client.getRemoteClusterClient(argThat(not(clusterAlias)))).thenReturn(client); + }); + } + + private Client createMockClient(final ThreadPool threadPool, final Consumer finish) { + final Client client = mock(Client.class); + when(client.threadPool()).thenReturn(threadPool); + finish.accept(client); + return client; + } + + private XPackInfoResponse.LicenseInfo createPlatinumLicenseResponse() { + return new XPackInfoResponse.LicenseInfo("uid", "PLATINUM", "PLATINUM", LicenseStatus.ACTIVE, randomNonNegativeLong()); + } + + private XPackInfoResponse.LicenseInfo createBasicLicenseResponse() { + return new XPackInfoResponse.LicenseInfo("uid", "BASIC", "BASIC", LicenseStatus.ACTIVE, randomNonNegativeLong()); + } + + private XPackInfoResponse.LicenseInfo createExpiredLicenseResponse() { + return new XPackInfoResponse.LicenseInfo("uid", "PLATINUM", "PLATINUM", LicenseStatus.EXPIRED, randomNonNegativeLong()); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java index bb21ddbd1a1..c2cb5af1305 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java @@ -229,7 +229,7 @@ public class XPackLicenseStateTests extends ESTestCase { public void testOldTrialDefaultsSecurityOn() { XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); - licenseState.update(TRIAL, true, rarely() ? null : VersionUtils.randomVersionBetween(random(), Version.V_5_6_0, Version.V_6_2_4)); + licenseState.update(TRIAL, true, rarely() ? null : VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_2_4)); assertThat(licenseState.isSecurityEnabled(), is(true)); assertThat(licenseState.isAuthAllowed(), is(true)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoResponseTests.java new file mode 100644 index 00000000000..fac99959c53 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoResponseTests.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack; + +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.XPackInfoResponse.BuildInfo; +import org.elasticsearch.protocol.xpack.XPackInfoResponse.LicenseInfo; +import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo; +import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo.FeatureSet; +import org.elasticsearch.protocol.xpack.license.LicenseStatus; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.function.Predicate; +import java.io.IOException; + +public class XPackInfoResponseTests extends AbstractStreamableXContentTestCase { + @Override + protected XPackInfoResponse doParseInstance(XContentParser parser) throws IOException { + return XPackInfoResponse.fromXContent(parser); + } + + @Override + protected XPackInfoResponse createBlankInstance() { + return new XPackInfoResponse(); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return path -> path.equals("features") + || (path.startsWith("features") && path.endsWith("native_code_info")); + } + + @Override + protected ToXContent.Params getToXContentParams() { + Map params = new HashMap<>(); + if (randomBoolean()) { + params.put("human", randomBoolean() ? "true" : "false"); + } + if (randomBoolean()) { + params.put("categories", "_none"); + } + return new ToXContent.MapParams(params); + } + + @Override + protected XPackInfoResponse createTestInstance() { + return new XPackInfoResponse( + randomBoolean() ? null : randomBuildInfo(), + randomBoolean() ? null : randomLicenseInfo(), + randomBoolean() ? null : randomFeatureSetsInfo()); + } + + @Override + protected XPackInfoResponse mutateInstance(XPackInfoResponse response) { + @SuppressWarnings("unchecked") + Function mutator = randomFrom( + r -> new XPackInfoResponse( + mutateBuildInfo(r.getBuildInfo()), + r.getLicenseInfo(), + r.getFeatureSetsInfo()), + r -> new XPackInfoResponse( + r.getBuildInfo(), + mutateLicenseInfo(r.getLicenseInfo()), + r.getFeatureSetsInfo()), + r -> new XPackInfoResponse( + r.getBuildInfo(), + r.getLicenseInfo(), + mutateFeatureSetsInfo(r.getFeatureSetsInfo()))); + return mutator.apply(response); + } + + private BuildInfo randomBuildInfo() { + return new BuildInfo( + randomAlphaOfLength(10), + randomAlphaOfLength(15)); + } + + private BuildInfo mutateBuildInfo(BuildInfo buildInfo) { + if (buildInfo == null) { + return randomBuildInfo(); + } + return null; + } + + private LicenseInfo randomLicenseInfo() { + return new LicenseInfo( + randomAlphaOfLength(10), + randomAlphaOfLength(4), + randomAlphaOfLength(5), + randomFrom(LicenseStatus.values()), + randomLong()); + } + + private LicenseInfo mutateLicenseInfo(LicenseInfo licenseInfo) { + if (licenseInfo == null) { + return randomLicenseInfo(); + } + return null; + } + + private FeatureSetsInfo randomFeatureSetsInfo() { + int size = between(0, 10); + Set featureSets = new HashSet<>(size); + while (featureSets.size() < size) { + featureSets.add(randomFeatureSet()); + } + return new FeatureSetsInfo(featureSets); + } + + private FeatureSetsInfo mutateFeatureSetsInfo(FeatureSetsInfo featureSetsInfo) { + if (featureSetsInfo == null) { + return randomFeatureSetsInfo(); + } + return null; + } + + private FeatureSet randomFeatureSet() { + return new FeatureSet( + randomAlphaOfLength(5), + randomBoolean() ? null : randomAlphaOfLength(20), + randomBoolean(), + randomBoolean(), + randomNativeCodeInfo()); + } + + private Map randomNativeCodeInfo() { + if (randomBoolean()) { + return null; + } + int size = between(0, 10); + Map nativeCodeInfo = new HashMap<>(size); + while (nativeCodeInfo.size() < size) { + nativeCodeInfo.put(randomAlphaOfLength(5), randomAlphaOfLength(5)); + } + return nativeCodeInfo; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/common/ProtocolUtilsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/common/ProtocolUtilsTests.java new file mode 100644 index 00000000000..c4e29d7c230 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/common/ProtocolUtilsTests.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.common; + +import org.elasticsearch.test.ESTestCase; + +import java.util.HashMap; +import java.util.Map; + +public class ProtocolUtilsTests extends ESTestCase { + + public void testMapStringEqualsAndHash() { + assertTrue(ProtocolUtils.equals(null, null)); + assertFalse(ProtocolUtils.equals(null, new HashMap<>())); + assertFalse(ProtocolUtils.equals(new HashMap<>(), null)); + + Map a = new HashMap<>(); + a.put("foo", new String[] { "a", "b" }); + a.put("bar", new String[] { "b", "c" }); + + Map b = new HashMap<>(); + b.put("foo", new String[] { "a", "b" }); + + assertFalse(ProtocolUtils.equals(a, b)); + assertFalse(ProtocolUtils.equals(b, a)); + + b.put("bar", new String[] { "c", "b" }); + + assertFalse(ProtocolUtils.equals(a, b)); + assertFalse(ProtocolUtils.equals(b, a)); + + b.put("bar", new String[] { "b", "c" }); + + assertTrue(ProtocolUtils.equals(a, b)); + assertTrue(ProtocolUtils.equals(b, a)); + assertEquals(ProtocolUtils.hashCode(a), ProtocolUtils.hashCode(b)); + + b.put("baz", new String[] { "b", "c" }); + + assertFalse(ProtocolUtils.equals(a, b)); + assertFalse(ProtocolUtils.equals(b, a)); + + a.put("non", null); + + assertFalse(ProtocolUtils.equals(a, b)); + assertFalse(ProtocolUtils.equals(b, a)); + + b.put("non", null); + b.remove("baz"); + + assertTrue(ProtocolUtils.equals(a, b)); + assertTrue(ProtocolUtils.equals(b, a)); + assertEquals(ProtocolUtils.hashCode(a), ProtocolUtils.hashCode(b)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java new file mode 100644 index 00000000000..9f0f4145695 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Predicate; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; + +public class GraphExploreResponseTests extends AbstractXContentTestCase< GraphExploreResponse> { + + @Override + protected GraphExploreResponse createTestInstance() { + return createInstance(0); + } + private static GraphExploreResponse createInstance(int numFailures) { + int numItems = randomIntBetween(4, 128); + boolean timedOut = randomBoolean(); + boolean showDetails = randomBoolean(); + long overallTookInMillis = randomNonNegativeLong(); + Map vertices = new HashMap<>(); + Map connections = new HashMap<>(); + ShardOperationFailedException [] failures = new ShardOperationFailedException [numFailures]; + for (int i = 0; i < failures.length; i++) { + failures[i] = new ShardSearchFailure(new ElasticsearchException("an error")); + } + + //Create random set of vertices + for (int i = 0; i < numItems; i++) { + Vertex v = new Vertex("field1", randomAlphaOfLength(5), randomDouble(), 0, + showDetails?randomIntBetween(100, 200):0, + showDetails?randomIntBetween(1, 100):0); + vertices.put(v.getId(), v); + } + + //Wire up half the vertices randomly + Vertex[] vs = vertices.values().toArray(new Vertex[vertices.size()]); + for (int i = 0; i < numItems/2; i++) { + Vertex v1 = vs[randomIntBetween(0, vs.length-1)]; + Vertex v2 = vs[randomIntBetween(0, vs.length-1)]; + if(v1 != v2) { + Connection conn = new Connection(v1, v2, randomDouble(), randomLongBetween(1, 10)); + connections.put(conn.getId(), conn); + } + } + return new GraphExploreResponse(overallTookInMillis, timedOut, failures, vertices, connections, showDetails); + } + + + private static GraphExploreResponse createTestInstanceWithFailures() { + return createInstance(randomIntBetween(1, 128)); + } + + @Override + protected GraphExploreResponse doParseInstance(XContentParser parser) throws IOException { + return GraphExploreResponse.fromXContext(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected String[] getShuffleFieldsExceptions() { + return new String[]{"vertices", "connections"}; + } + + protected Predicate getRandomFieldsExcludeFilterWhenResultHasErrors() { + return field -> field.startsWith("responses"); + } + + @Override + protected void assertEqualInstances( GraphExploreResponse expectedInstance, GraphExploreResponse newInstance) { + assertThat(newInstance.getTook(), equalTo(expectedInstance.getTook())); + assertThat(newInstance.isTimedOut(), equalTo(expectedInstance.isTimedOut())); + + Connection[] newConns = newInstance.getConnections().toArray(new Connection[0]); + Connection[] expectedConns = expectedInstance.getConnections().toArray(new Connection[0]); + assertArrayEquals(expectedConns, newConns); + + Vertex[] newVertices = newInstance.getVertices().toArray(new Vertex[0]); + Vertex[] expectedVertices = expectedInstance.getVertices().toArray(new Vertex[0]); + assertArrayEquals(expectedVertices, newVertices); + + ShardOperationFailedException[] newFailures = newInstance.getShardFailures(); + ShardOperationFailedException[] expectedFailures = expectedInstance.getShardFailures(); + assertEquals(expectedFailures.length, newFailures.length); + + } + + /** + * Test parsing {@link GraphExploreResponse} with inner failures as they don't support asserting on xcontent equivalence, given + * exceptions are not parsed back as the same original class. We run the usual {@link AbstractXContentTestCase#testFromXContent()} + * without failures, and this other test with failures where we disable asserting on xcontent equivalence at the end. + */ + public void testFromXContentWithFailures() throws IOException { + Supplier< GraphExploreResponse> instanceSupplier = GraphExploreResponseTests::createTestInstanceWithFailures; + //with random fields insertion in the inner exceptions, some random stuff may be parsed back as metadata, + //but that does not bother our assertions, as we only want to test that we don't break. + boolean supportsUnknownFields = true; + //exceptions are not of the same type whenever parsed back + boolean assertToXContentEquivalence = false; + AbstractXContentTestCase.testFromXContent( + NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields, getShuffleFieldsExceptions(), + getRandomFieldsExcludeFilterWhenResultHasErrors(), this::createParser, this::doParseInstance, + this::assertEqualInstances, assertToXContentEquivalence, ToXContent.EMPTY_PARAMS); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/LicenseStatusTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/LicenseStatusTests.java new file mode 100644 index 00000000000..7149477d007 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/LicenseStatusTests.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import java.io.IOException; + +import org.elasticsearch.test.ESTestCase; + +public class LicenseStatusTests extends ESTestCase { + public void testSerialization() throws IOException { + LicenseStatus status = randomFrom(LicenseStatus.values()); + assertSame(status, copyWriteable(status, writableRegistry(), LicenseStatus::readFrom)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponseTests.java new file mode 100644 index 00000000000..a09fd6fb99b --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponseTests.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; +import java.util.function.Predicate; + +public class PutLicenseResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + // The structure of the response is such that unknown fields inside acknowledge cannot be supported since they + // are treated as messages from new services + return p -> p.startsWith("acknowledge"); + } + + @Override + protected PutLicenseResponse createTestInstance() { + boolean acknowledged = randomBoolean(); + LicensesStatus status = randomFrom(LicensesStatus.VALID, LicensesStatus.INVALID, LicensesStatus.EXPIRED); + String messageHeader; + Map ackMessages; + if (randomBoolean()) { + messageHeader = randomAlphaOfLength(10); + ackMessages = randomAckMessages(); + } else { + messageHeader = null; + ackMessages = Collections.emptyMap(); + } + + return new PutLicenseResponse(acknowledged, status, messageHeader, ackMessages); + } + + private static Map randomAckMessages() { + int nFeatures = randomIntBetween(1, 5); + + Map ackMessages = new HashMap<>(); + + for (int i = 0; i < nFeatures; i++) { + String feature = randomAlphaOfLengthBetween(9, 15); + int nMessages = randomIntBetween(1, 5); + String[] messages = new String[nMessages]; + for (int j = 0; j < nMessages; j++) { + messages[j] = randomAlphaOfLengthBetween(10, 30); + } + ackMessages.put(feature, messages); + } + + return ackMessages; + } + + @Override + protected PutLicenseResponse doParseInstance(XContentParser parser) throws IOException { + return PutLicenseResponse.fromXContent(parser); + } + + @Override + protected PutLicenseResponse createBlankInstance() { + return new PutLicenseResponse(); + } + + @Override + protected PutLicenseResponse mutateInstance(PutLicenseResponse response) { + @SuppressWarnings("unchecked") + Function mutator = randomFrom( + r -> new PutLicenseResponse( + r.isAcknowledged() == false, + r.status(), + r.acknowledgeHeader(), + r.acknowledgeMessages()), + r -> new PutLicenseResponse( + r.isAcknowledged(), + mutateStatus(r.status()), + r.acknowledgeHeader(), + r.acknowledgeMessages()), + r -> { + if (r.acknowledgeMessages().isEmpty()) { + return new PutLicenseResponse( + r.isAcknowledged(), + r.status(), + randomAlphaOfLength(10), + randomAckMessages() + ); + } else { + return new PutLicenseResponse(r.isAcknowledged(), r.status()); + } + } + + ); + return mutator.apply(response); + } + + private LicensesStatus mutateStatus(LicensesStatus status) { + return randomValueOtherThan(status, () -> randomFrom(LicensesStatus.values())); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequestTests.java new file mode 100644 index 00000000000..0e09a05fb96 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequestTests.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.migration; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +public class IndexUpgradeInfoRequestTests extends AbstractWireSerializingTestCase { + @Override + protected IndexUpgradeInfoRequest createTestInstance() { + int indexCount = randomInt(4); + String[] indices = new String[indexCount]; + for (int i = 0; i < indexCount; i++) { + indices[i] = randomAlphaOfLength(10); + } + IndexUpgradeInfoRequest request = new IndexUpgradeInfoRequest(indices); + if (randomBoolean()) { + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + } + return request; + } + + @Override + protected Writeable.Reader instanceReader() { + return IndexUpgradeInfoRequest::new; + } + + public void testNullIndices() { + expectThrows(NullPointerException.class, () -> new IndexUpgradeInfoRequest((String[])null)); + expectThrows(NullPointerException.class, () -> new IndexUpgradeInfoRequest().indices((String[])null)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponseTests.java new file mode 100644 index 00000000000..57f01a4454e --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponseTests.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.migration; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; + +public class IndexUpgradeInfoResponseTests extends AbstractStreamableXContentTestCase { + @Override + protected IndexUpgradeInfoResponse doParseInstance(XContentParser parser) { + return IndexUpgradeInfoResponse.fromXContent(parser); + } + + @Override + protected IndexUpgradeInfoResponse createBlankInstance() { + return new IndexUpgradeInfoResponse(); + } + + @Override + protected IndexUpgradeInfoResponse createTestInstance() { + return randomIndexUpgradeInfoResponse(randomIntBetween(0, 10)); + } + + private static IndexUpgradeInfoResponse randomIndexUpgradeInfoResponse(int numIndices) { + Map actions = new HashMap<>(); + for (int i = 0; i < numIndices; i++) { + actions.put(randomAlphaOfLength(5), randomFrom(UpgradeActionRequired.values())); + } + return new IndexUpgradeInfoResponse(actions); + } + + @Override + protected IndexUpgradeInfoResponse mutateInstance(IndexUpgradeInfoResponse instance) { + if (instance.getActions().size() == 0) { + return randomIndexUpgradeInfoResponse(1); + } + Map actions = new HashMap<>(instance.getActions()); + if (randomBoolean()) { + Iterator> iterator = actions.entrySet().iterator(); + iterator.next(); + iterator.remove(); + } else { + actions.put(randomAlphaOfLength(5), randomFrom(UpgradeActionRequired.values())); + } + return new IndexUpgradeInfoResponse(actions); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponseTests.java new file mode 100644 index 00000000000..209bc790a8c --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponseTests.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.watcher; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class DeleteWatchResponseTests extends AbstractXContentTestCase { + + @Override + protected DeleteWatchResponse createTestInstance() { + String id = randomAlphaOfLength(10); + long version = randomLongBetween(1, 10); + boolean found = randomBoolean(); + return new DeleteWatchResponse(id, version, found); + } + + @Override + protected DeleteWatchResponse doParseInstance(XContentParser parser) throws IOException { + return DeleteWatchResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponseTests.java new file mode 100644 index 00000000000..1fc2f61b684 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponseTests.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.watcher; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class PutWatchResponseTests extends AbstractXContentTestCase { + + @Override + protected PutWatchResponse createTestInstance() { + String id = randomAlphaOfLength(10); + long version = randomLongBetween(1, 10); + boolean created = randomBoolean(); + return new PutWatchResponse(id, version, created); + } + + @Override + protected PutWatchResponse doParseInstance(XContentParser parser) throws IOException { + return PutWatchResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java index e0029362003..f0db64d3271 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java @@ -33,7 +33,9 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.TokenizerFactory; +import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.ingest.Processor; @@ -44,6 +46,7 @@ import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.AnalysisPlugin; import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.plugins.DiscoveryPlugin; +import org.elasticsearch.plugins.EnginePlugin; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.NetworkPlugin; @@ -70,6 +73,8 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; +import java.util.Optional; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.Predicate; @@ -80,7 +85,7 @@ import java.util.stream.Collectors; import static java.util.stream.Collectors.toList; public class LocalStateCompositeXPackPlugin extends XPackPlugin implements ScriptPlugin, ActionPlugin, IngestPlugin, NetworkPlugin, - ClusterPlugin, DiscoveryPlugin, MapperPlugin, AnalysisPlugin, PersistentTaskPlugin { + ClusterPlugin, DiscoveryPlugin, MapperPlugin, AnalysisPlugin, PersistentTaskPlugin, EnginePlugin { private XPackLicenseState licenseState; private SSLService sslService; @@ -246,8 +251,8 @@ public class LocalStateCompositeXPackPlugin extends XPackPlugin implements Scrip @Override - public List getContexts() { - List contexts = new ArrayList<>(); + public List> getContexts() { + List> contexts = new ArrayList<>(); contexts.addAll(super.getContexts()); filterPlugins(ScriptPlugin.class).stream().forEach(p -> contexts.addAll(p.getContexts())); return contexts; @@ -391,6 +396,20 @@ public class LocalStateCompositeXPackPlugin extends XPackPlugin implements Scrip IOUtils.close(plugins); } + @Override + public Optional getEngineFactory(IndexSettings indexSettings) { + List> enginePlugins = filterPlugins(EnginePlugin.class).stream() + .map(p -> p.getEngineFactory(indexSettings)) + .collect(Collectors.toList()); + if (enginePlugins.size() == 0) { + return Optional.empty(); + } else if (enginePlugins.size() == 1) { + return enginePlugins.stream().findFirst().get(); + } else { + throw new IllegalStateException("Only one EngineFactory plugin allowed"); + } + } + private List filterPlugins(Class type) { return plugins.stream().filter(x -> type.isAssignableFrom(x.getClass())).map(p -> ((T)p)) .collect(Collectors.toList()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadataTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadataTests.java new file mode 100644 index 00000000000..5227c04962a --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadataTests.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ccr; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Predicate; + +public class AutoFollowMetadataTests extends AbstractSerializingTestCase { + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return s -> true; + } + + @Override + protected AutoFollowMetadata doParseInstance(XContentParser parser) throws IOException { + return AutoFollowMetadata.fromXContent(parser); + } + + @Override + protected AutoFollowMetadata createTestInstance() { + int numEntries = randomIntBetween(0, 32); + Map configs = new HashMap<>(numEntries); + Map> followedLeaderIndices = new HashMap<>(numEntries); + for (int i = 0; i < numEntries; i++) { + List leaderPatterns = Arrays.asList(generateRandomStringArray(4, 4, false)); + AutoFollowMetadata.AutoFollowPattern autoFollowPattern = + new AutoFollowMetadata.AutoFollowPattern(leaderPatterns, randomAlphaOfLength(4), randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(0, Integer.MAX_VALUE), randomNonNegativeLong(), randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(0, Integer.MAX_VALUE), TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(500)); + configs.put(Integer.toString(i), autoFollowPattern); + followedLeaderIndices.put(Integer.toString(i), Arrays.asList(generateRandomStringArray(4, 4, false))); + } + return new AutoFollowMetadata(configs, followedLeaderIndices); + } + + @Override + protected Writeable.Reader instanceReader() { + return AutoFollowMetadata::new; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java new file mode 100644 index 00000000000..2662e05570c --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -0,0 +1,143 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexing; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchResponseSections; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.equalTo; + +public class AsyncTwoPhaseIndexerTests extends ESTestCase { + + AtomicBoolean isFinished = new AtomicBoolean(false); + + private class MockIndexer extends AsyncTwoPhaseIndexer { + + // test the execution order + private int step; + + protected MockIndexer(Executor executor, AtomicReference initialState, Integer initialPosition) { + super(executor, initialState, initialPosition, new MockJobStats()); + } + + @Override + protected String getJobId() { + return "mock"; + } + + @Override + protected IterationResult doProcess(SearchResponse searchResponse) { + assertThat(step, equalTo(3)); + ++step; + return new IterationResult(Collections.emptyList(), 3, true); + } + + @Override + protected SearchRequest buildSearchRequest() { + assertThat(step, equalTo(1)); + ++step; + return null; + } + + @Override + protected void onStartJob(long now) { + assertThat(step, equalTo(0)); + ++step; + } + + @Override + protected void doNextSearch(SearchRequest request, ActionListener nextPhase) { + assertThat(step, equalTo(2)); + ++step; + final SearchResponseSections sections = new SearchResponseSections(new SearchHits(new SearchHit[0], 0, 0), null, null, false, + null, null, 1); + + nextPhase.onResponse(new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null)); + } + + @Override + protected void doNextBulk(BulkRequest request, ActionListener nextPhase) { + fail("should not be called"); + } + + @Override + protected void doSaveState(IndexerState state, Integer position, Runnable next) { + assertThat(step, equalTo(4)); + ++step; + next.run(); + } + + @Override + protected void onFailure(Exception exc) { + fail(exc.getMessage()); + } + + @Override + protected void onFinish() { + assertThat(step, equalTo(5)); + ++step; + isFinished.set(true); + } + + @Override + protected void onAbort() { + } + + public int getStep() { + return step; + } + + } + + private static class MockJobStats extends IndexerJobStats { + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return null; + } + } + + public void testStateMachine() throws InterruptedException { + AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); + final ExecutorService executor = Executors.newFixedThreadPool(1); + + try { + + MockIndexer indexer = new MockIndexer(executor, state, 2); + indexer.start(); + assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); + assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); + assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); + assertThat(indexer.getPosition(), equalTo(2)); + ESTestCase.awaitBusy(() -> isFinished.get()); + assertThat(indexer.getStep(), equalTo(6)); + assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); + assertThat(indexer.getStats().getNumPages(), equalTo(1L)); + assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); + assertTrue(indexer.abort()); + } finally { + executor.shutdownNow(); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/IndexerStateEnumTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/IndexerStateEnumTests.java similarity index 98% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/IndexerStateEnumTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/IndexerStateEnumTests.java index ec17a37e23b..329800c2f1a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/IndexerStateEnumTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/IndexerStateEnumTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.rollup.job; +package org.elasticsearch.xpack.core.indexing; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionRequestTests.java new file mode 100644 index 00000000000..05ba0e7f306 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionRequestTests.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.test.AbstractStreamableTestCase; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.startsWith; + +public class FindFileStructureActionRequestTests extends AbstractStreamableTestCase { + + @Override + protected FindFileStructureAction.Request createTestInstance() { + + FindFileStructureAction.Request request = new FindFileStructureAction.Request(); + + if (randomBoolean()) { + request.setLinesToSample(randomIntBetween(10, 2000)); + } + request.setSample(new BytesArray(randomByteArrayOfLength(randomIntBetween(1000, 20000)))); + + return request; + } + + @Override + protected FindFileStructureAction.Request createBlankInstance() { + return new FindFileStructureAction.Request(); + } + + public void testValidateLinesToSample() { + + FindFileStructureAction.Request request = new FindFileStructureAction.Request(); + request.setLinesToSample(randomFrom(-1, 0)); + request.setSample(new BytesArray("foo\n")); + + ActionRequestValidationException e = request.validate(); + assertNotNull(e); + assertThat(e.getMessage(), startsWith("Validation Failed: ")); + assertThat(e.getMessage(), containsString(" lines_to_sample must be positive if specified")); + } + + public void testValidateSample() { + + FindFileStructureAction.Request request = new FindFileStructureAction.Request(); + if (randomBoolean()) { + request.setSample(BytesArray.EMPTY); + } + + ActionRequestValidationException e = request.validate(); + assertNotNull(e); + assertThat(e.getMessage(), startsWith("Validation Failed: ")); + assertThat(e.getMessage(), containsString(" sample must be specified")); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionResponseTests.java new file mode 100644 index 00000000000..706ee44a4fd --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionResponseTests.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructureTests; + +public class FindFileStructureActionResponseTests extends AbstractStreamableTestCase { + + @Override + protected FindFileStructureAction.Response createTestInstance() { + return new FindFileStructureAction.Response(FileStructureTests.createTestFileStructure()); + } + + @Override + protected FindFileStructureAction.Response createBlankInstance() { + return new FindFileStructureAction.Response(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobStatsActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobStatsActionRequestTests.java index 913618de38b..edf3f73a8af 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobStatsActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobStatsActionRequestTests.java @@ -6,9 +6,13 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.tasks.Task; import org.elasticsearch.test.AbstractStreamableTestCase; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction.Request; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; + public class GetJobStatsActionRequestTests extends AbstractStreamableTestCase { @Override @@ -23,4 +27,9 @@ public class GetJobStatsActionRequestTests extends AbstractStreamableTestCase { + + @Override + protected FieldStats createTestInstance() { + return createTestFieldStats(); + } + + static FieldStats createTestFieldStats() { + + long count = randomIntBetween(1, 100000); + int cardinality = randomIntBetween(1, (int) count); + + Double minValue = null; + Double maxValue = null; + Double meanValue = null; + Double medianValue = null; + boolean isMetric = randomBoolean(); + if (isMetric) { + minValue = randomDouble(); + maxValue = randomDouble(); + meanValue = randomDouble(); + medianValue = randomDouble(); + } + + List> topHits = new ArrayList<>(); + for (int i = 0; i < Math.min(10, cardinality); ++i) { + Map topHit = new LinkedHashMap<>(); + if (isMetric) { + topHit.put("value", randomDouble()); + } else { + topHit.put("value", randomAlphaOfLength(20)); + } + topHit.put("count", randomIntBetween(1, cardinality)); + topHits.add(topHit); + } + + return new FieldStats(count, cardinality, minValue, maxValue, meanValue, medianValue, topHits); + } + + @Override + protected Writeable.Reader instanceReader() { + return FieldStats::new; + } + + @Override + protected FieldStats doParseInstance(XContentParser parser) { + return FieldStats.PARSER.apply(parser, null); + } +} diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructureTests.java similarity index 58% rename from x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructureTests.java index 738928ed28a..6dcf6751965 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructureTests.java @@ -3,10 +3,12 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.core.ml.filestructurefinder; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import java.nio.charset.Charset; import java.util.Arrays; @@ -16,13 +18,18 @@ import java.util.Locale; import java.util.Map; import java.util.TreeMap; -public class LogStructureTests extends AbstractXContentTestCase { +public class FileStructureTests extends AbstractSerializingTestCase { - protected LogStructure createTestInstance() { + @Override + protected FileStructure createTestInstance() { + return createTestFileStructure(); + } - LogStructure.Format format = randomFrom(EnumSet.allOf(LogStructure.Format.class)); + public static FileStructure createTestFileStructure() { - LogStructure.Builder builder = new LogStructure.Builder(format); + FileStructure.Format format = randomFrom(EnumSet.allOf(FileStructure.Format.class)); + + FileStructure.Builder builder = new FileStructure.Builder(format); int numLinesAnalyzed = randomIntBetween(2, 10000); builder.setNumLinesAnalyzed(numLinesAnalyzed); @@ -43,14 +50,12 @@ public class LogStructureTests extends AbstractXContentTestCase { builder.setExcludeLinesPattern(randomAlphaOfLength(100)); } - if (format.isSeparatedValues() || (format.supportsNesting() && randomBoolean())) { + if (format == FileStructure.Format.DELIMITED || (format.supportsNesting() && randomBoolean())) { builder.setInputFields(Arrays.asList(generateRandomStringArray(10, 10, false, false))); } - if (format.isSeparatedValues()) { + if (format == FileStructure.Format.DELIMITED) { builder.setHasHeaderRow(randomBoolean()); - if (rarely()) { - builder.setSeparator(format.separator()); - } + builder.setDelimiter(randomFrom(',', '\t', ';', '|')); } if (format.isSemiStructured()) { builder.setGrokPattern(randomAlphaOfLength(100)); @@ -68,16 +73,31 @@ public class LogStructureTests extends AbstractXContentTestCase { } builder.setMappings(mappings); + if (randomBoolean()) { + Map fieldStats = new TreeMap<>(); + for (String field : generateRandomStringArray(5, 20, false, false)) { + fieldStats.put(field, FieldStatsTests.createTestFieldStats()); + } + builder.setFieldStats(fieldStats); + } + builder.setExplanation(Arrays.asList(generateRandomStringArray(10, 150, false, false))); return builder.build(); } - protected LogStructure doParseInstance(XContentParser parser) { - return LogStructure.PARSER.apply(parser, null).build(); + @Override + protected Writeable.Reader instanceReader() { + return FileStructure::new; } - protected boolean supportsUnknownFields() { - return false; + @Override + protected FileStructure doParseInstance(XContentParser parser) { + return FileStructure.PARSER.apply(parser, null).build(); + } + + @Override + protected ToXContent.Params getToXContentParams() { + return new ToXContent.MapParams(Collections.singletonMap(FileStructure.EXPLAIN, "true")); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java index 88d9b07816d..7e53478533e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java @@ -39,7 +39,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -479,19 +478,6 @@ public class JobTests extends AbstractSerializingTestCase { assertThat(e.getMessage(), equalTo(Messages.getMessage(Messages.JOB_CONFIG_TIME_FIELD_NOT_ALLOWED_IN_ANALYSIS_CONFIG))); } - public void testGetCompatibleJobTypes_givenVersionBefore_V_5_4() { - assertThat(Job.getCompatibleJobTypes(Version.V_5_0_0).isEmpty(), is(true)); - assertThat(Job.getCompatibleJobTypes(Version.V_5_3_0).isEmpty(), is(true)); - assertThat(Job.getCompatibleJobTypes(Version.V_5_3_2).isEmpty(), is(true)); - } - - public void testGetCompatibleJobTypes_givenVersionAfter_V_5_4() { - assertThat(Job.getCompatibleJobTypes(Version.V_5_4_0), contains(Job.ANOMALY_DETECTOR_JOB_TYPE)); - assertThat(Job.getCompatibleJobTypes(Version.V_5_4_0).size(), equalTo(1)); - assertThat(Job.getCompatibleJobTypes(Version.V_5_5_0), contains(Job.ANOMALY_DETECTOR_JOB_TYPE)); - assertThat(Job.getCompatibleJobTypes(Version.V_5_5_0).size(), equalTo(1)); - } - public void testInvalidCreateTimeSettings() { Job.Builder builder = new Job.Builder("invalid-settings"); builder.setModelSnapshotId("snapshot-foo"); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/stats/StatsAccumulatorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/stats/StatsAccumulatorTests.java index bd2df0823ae..87d2acff9e3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/stats/StatsAccumulatorTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/stats/StatsAccumulatorTests.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.core.ml.stats; import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.search.aggregations.metrics.stats.Stats; +import org.elasticsearch.search.aggregations.metrics.Stats; import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.util.HashMap; import java.util.Map; @@ -157,4 +157,4 @@ public class StatsAccumulatorTests extends AbstractWireSerializingTestCase instanceReader() { return StatsAccumulator::new; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfigSerializingTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfigSerializingTests.java index 6b8846def72..415e1a00a60 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfigSerializingTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfigSerializingTests.java @@ -125,6 +125,20 @@ public class DateHistogramGroupConfigSerializingTests extends AbstractSerializin assertThat(e.validationErrors().size(), equalTo(0)); } + public void testValidateWeek() { + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + when(fieldCaps.isAggregatable()).thenReturn(true); + responseMap.put("my_field", Collections.singletonMap("date", fieldCaps)); + + DateHistogramGroupConfig config = new DateHistogramGroupConfig("my_field", new DateHistogramInterval("1w"), null, null); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().size(), equalTo(0)); + } + /** * Tests that a DateHistogramGroupConfig can be serialized/deserialized correctly after * the timezone was changed from DateTimeZone to String. diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/JobWrapperSerializingTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/JobWrapperSerializingTests.java index a0df63bc38d..1ab6e6a55d4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/JobWrapperSerializingTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/JobWrapperSerializingTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.rollup.job; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.action.GetRollupJobsAction; @@ -40,7 +41,8 @@ public class JobWrapperSerializingTests extends AbstractSerializingTestCase { + + @Override + protected RollupIndexerJobStats createTestInstance() { + return randomStats(); + } + + @Override + protected Writeable.Reader instanceReader() { + return RollupIndexerJobStats::new; + } + + @Override + protected RollupIndexerJobStats doParseInstance(XContentParser parser) { + return RollupIndexerJobStats.fromXContent(parser); + } + + public static RollupIndexerJobStats randomStats() { + return new RollupIndexerJobStats(randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), randomNonNegativeLong()); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatsTests.java deleted file mode 100644 index 0091b21dc40..00000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatsTests.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.rollup.job; - -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractSerializingTestCase; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; - -public class RollupJobStatsTests extends AbstractSerializingTestCase { - - @Override - protected RollupJobStats createTestInstance() { - return randomStats(); - } - - @Override - protected Writeable.Reader instanceReader() { - return RollupJobStats::new; - } - - @Override - protected RollupJobStats doParseInstance(XContentParser parser) { - return RollupJobStats.fromXContent(parser); - } - - public static RollupJobStats randomStats() { - return new RollupJobStats(randomNonNegativeLong(), randomNonNegativeLong(), - randomNonNegativeLong(), randomNonNegativeLong()); - } -} - diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatusTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatusTests.java index 2c802a7e41d..f46bda788bf 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatusTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatusTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.rollup.job; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.indexing.IndexerState; import java.util.HashMap; import java.util.Map; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfigSerializingTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfigSerializingTests.java index ccdd616df7b..b0e33579eb3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfigSerializingTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfigSerializingTests.java @@ -9,19 +9,16 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomTermsGroupConfig; import static org.hamcrest.Matchers.equalTo; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class TermsGroupConfigSerializingTests extends AbstractSerializingTestCase { @@ -77,62 +74,4 @@ public class TermsGroupConfigSerializingTests extends AbstractSerializingTestCas assertThat(e.validationErrors().get(0), equalTo("The field referenced by a terms group must be a [numeric] or " + "[keyword/text] type, but found [geo_point] for field [my_field]")); } - - public void testValidateFieldMatchingNotAggregatable() { - ActionRequestValidationException e = new ActionRequestValidationException(); - Map> responseMap = new HashMap<>(); - - // Have to mock fieldcaps because the ctor's aren't public... - FieldCapabilities fieldCaps = mock(FieldCapabilities.class); - when(fieldCaps.isAggregatable()).thenReturn(false); - responseMap.put("my_field", Collections.singletonMap(getRandomType(), fieldCaps)); - - TermsGroupConfig config = new TermsGroupConfig("my_field"); - config.validateMappings(responseMap, e); - assertThat(e.validationErrors().get(0), equalTo("The field [my_field] must be aggregatable across all indices, but is not.")); - } - - public void testValidateMatchingField() { - ActionRequestValidationException e = new ActionRequestValidationException(); - Map> responseMap = new HashMap<>(); - String type = getRandomType(); - - // Have to mock fieldcaps because the ctor's aren't public... - FieldCapabilities fieldCaps = mock(FieldCapabilities.class); - when(fieldCaps.isAggregatable()).thenReturn(true); - responseMap.put("my_field", Collections.singletonMap(type, fieldCaps)); - - TermsGroupConfig config = new TermsGroupConfig("my_field"); - config.validateMappings(responseMap, e); - if (e.validationErrors().size() != 0) { - fail(e.getMessage()); - } - - List> builders = config.toBuilders(); - assertThat(builders.size(), equalTo(1)); - } - - private String getRandomType() { - int n = randomIntBetween(0,8); - if (n == 0) { - return "keyword"; - } else if (n == 1) { - return "text"; - } else if (n == 2) { - return "long"; - } else if (n == 3) { - return "integer"; - } else if (n == 4) { - return "short"; - } else if (n == 5) { - return "float"; - } else if (n == 6) { - return "double"; - } else if (n == 7) { - return "scaled_float"; - } else if (n == 8) { - return "half_float"; - } - return "long"; - } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngineTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngineTests.java new file mode 100644 index 00000000000..5ab7b805cc1 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngineTests.java @@ -0,0 +1,168 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.scheduler; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.mockito.ArgumentCaptor; + +import java.time.Clock; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.hamcrest.Matchers.any; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Matchers.argThat; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; + +public class SchedulerEngineTests extends ESTestCase { + + public void testListenersThrowingExceptionsDoNotCauseOtherListenersToBeSkipped() throws InterruptedException { + final Logger mockLogger = mock(Logger.class); + final SchedulerEngine engine = new SchedulerEngine(Settings.EMPTY, Clock.systemUTC(), mockLogger); + try { + final List> listeners = new ArrayList<>(); + final int numberOfListeners = randomIntBetween(1, 32); + int numberOfFailingListeners = 0; + final CountDownLatch latch = new CountDownLatch(numberOfListeners); + + for (int i = 0; i < numberOfListeners; i++) { + final AtomicBoolean trigger = new AtomicBoolean(); + final SchedulerEngine.Listener listener; + if (randomBoolean()) { + listener = event -> { + if (trigger.compareAndSet(false, true)) { + latch.countDown(); + } else { + fail("listener invoked twice"); + } + }; + } else { + numberOfFailingListeners++; + listener = event -> { + if (trigger.compareAndSet(false, true)) { + // we count down the latch after this exception is caught and mock logged in SchedulerEngine#notifyListeners + throw new RuntimeException(getTestName()); + } else { + fail("listener invoked twice"); + } + }; + doAnswer(invocationOnMock -> { + // this happens after the listener has been notified, threw an exception, and then mock logged the exception + latch.countDown(); + return null; + }).when(mockLogger).warn(argThat(any(ParameterizedMessage.class)), argThat(any(RuntimeException.class))); + } + listeners.add(Tuple.tuple(listener, trigger)); + } + + // randomize the order and register the listeners + Collections.shuffle(listeners, random()); + listeners.stream().map(Tuple::v1).forEach(engine::register); + + final AtomicBoolean scheduled = new AtomicBoolean(); + engine.add(new SchedulerEngine.Job( + getTestName(), + (startTime, now) -> { + // only allow one triggering of the listeners + if (scheduled.compareAndSet(false, true)) { + return 0; + } else { + return -1; + } + })); + + latch.await(); + + // now check that every listener was invoked + assertTrue(listeners.stream().map(Tuple::v2).allMatch(AtomicBoolean::get)); + if (numberOfFailingListeners > 0) { + assertFailedListenerLogMessage(mockLogger, numberOfFailingListeners); + } + verifyNoMoreInteractions(mockLogger); + } finally { + engine.stop(); + } + } + + public void testListenersThrowingExceptionsDoNotCauseNextScheduledTaskToBeSkipped() throws InterruptedException { + final Logger mockLogger = mock(Logger.class); + final SchedulerEngine engine = new SchedulerEngine(Settings.EMPTY, Clock.systemUTC(), mockLogger); + try { + final List> listeners = new ArrayList<>(); + final int numberOfListeners = randomIntBetween(1, 32); + final int numberOfSchedules = randomIntBetween(1, 32); + final CountDownLatch listenersLatch = new CountDownLatch(numberOfSchedules * numberOfListeners); + for (int i = 0; i < numberOfListeners; i++) { + final AtomicInteger triggerCount = new AtomicInteger(); + final SchedulerEngine.Listener listener = event -> { + if (triggerCount.incrementAndGet() <= numberOfSchedules) { + listenersLatch.countDown(); + throw new RuntimeException(getTestName()); + } else { + fail("listener invoked more than [" + numberOfSchedules + "] times"); + } + }; + listeners.add(Tuple.tuple(listener, triggerCount)); + engine.register(listener); + } + + // latch for each invocation of nextScheduledTimeAfter, once for each scheduled run, and then a final time when we disable + final CountDownLatch latch = new CountDownLatch(1 + numberOfSchedules); + engine.add(new SchedulerEngine.Job( + getTestName(), + (startTime, now) -> { + if (latch.getCount() >= 2) { + latch.countDown(); + return 0; + } else if (latch.getCount() == 1) { + latch.countDown(); + return -1; + } else { + throw new AssertionError("nextScheduledTimeAfter invoked more than the expected number of times"); + } + })); + + listenersLatch.await(); + assertTrue(listeners.stream().map(Tuple::v2).allMatch(count -> count.get() == numberOfSchedules)); + latch.await(); + assertFailedListenerLogMessage(mockLogger, numberOfSchedules * numberOfListeners); + verifyNoMoreInteractions(mockLogger); + } finally { + engine.stop(); + } + } + + private void assertFailedListenerLogMessage(Logger mockLogger, int times) { + final ArgumentCaptor messageCaptor = ArgumentCaptor.forClass(ParameterizedMessage.class); + final ArgumentCaptor throwableCaptor = ArgumentCaptor.forClass(Throwable.class); + verify(mockLogger, times(times)).warn(messageCaptor.capture(), throwableCaptor.capture()); + for (final ParameterizedMessage message : messageCaptor.getAllValues()) { + assertThat(message.getFormat(), equalTo("listener failed while handling triggered event [{}]")); + assertThat(message.getParameters(), arrayWithSize(1)); + assertThat(message.getParameters()[0], equalTo(getTestName())); + } + for (final Throwable throwable : throwableCaptor.getAllValues()) { + assertThat(throwable, instanceOf(RuntimeException.class)); + assertThat(throwable.getMessage(), equalTo(getTestName())); + } + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java index ae458cbb2f5..a68a522f024 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java @@ -58,11 +58,17 @@ public class PutRoleRequestTests extends ESTestCase { final PutRoleRequest original = buildRandomRequest(); final BytesStreamOutput out = new BytesStreamOutput(); + if (randomBoolean()) { + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.CURRENT); + logger.info("Serializing with version {}", version); + out.setVersion(version); + } original.writeTo(out); final PutRoleRequest copy = new PutRoleRequest(); final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin(Settings.EMPTY).getNamedWriteables()); StreamInput in = new NamedWriteableAwareStreamInput(ByteBufferStreamInput.wrap(BytesReference.toBytes(out.bytes())), registry); + in.setVersion(out.getVersion()); copy.readFrom(in); assertThat(copy.roleDescriptor(), equalTo(original.roleDescriptor())); @@ -72,7 +78,7 @@ public class PutRoleRequestTests extends ESTestCase { final PutRoleRequest original = buildRandomRequest(); final BytesStreamOutput out = new BytesStreamOutput(); - final Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_6_0, Version.V_6_3_2); + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_3_2); out.setVersion(version); original.writeTo(out); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/CreateTokenRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequestTests.java similarity index 78% rename from x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/CreateTokenRequestTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequestTests.java index 44045263284..bd23198e8ea 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/CreateTokenRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequestTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.security.action.token; +package org.elasticsearch.xpack.core.security.action.token; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.settings.SecureString; @@ -20,7 +20,7 @@ public class CreateTokenRequestTests extends ESTestCase { ActionRequestValidationException ve = request.validate(); assertNotNull(ve); assertEquals(1, ve.validationErrors().size()); - assertThat(ve.validationErrors().get(0), containsString("[password, refresh_token]")); + assertThat(ve.validationErrors().get(0), containsString("[password, refresh_token, client_credentials]")); assertThat(ve.validationErrors().get(0), containsString("grant_type")); request.setGrantType("password"); @@ -72,5 +72,19 @@ public class CreateTokenRequestTests extends ESTestCase { assertNotNull(ve); assertEquals(1, ve.validationErrors().size()); assertThat(ve.validationErrors(), hasItem("refresh_token is missing")); + + request.setGrantType("client_credentials"); + ve = request.validate(); + assertNull(ve); + + request.setUsername(randomAlphaOfLengthBetween(1, 32)); + request.setPassword(new SecureString(randomAlphaOfLengthBetween(1, 32).toCharArray())); + request.setRefreshToken(randomAlphaOfLengthBetween(1, 32)); + ve = request.validate(); + assertNotNull(ve); + assertEquals(3, ve.validationErrors().size()); + assertThat(ve.validationErrors(), hasItem(containsString("username is not supported"))); + assertThat(ve.validationErrors(), hasItem(containsString("password is not supported"))); + assertThat(ve.validationErrors(), hasItem(containsString("refresh_token is not supported"))); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponseTests.java new file mode 100644 index 00000000000..b784310fdb2 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponseTests.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.token; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; + +public class CreateTokenResponseTests extends ESTestCase { + + public void testSerialization() throws Exception { + CreateTokenResponse response = new CreateTokenResponse(randomAlphaOfLengthBetween(1, 10), TimeValue.timeValueMinutes(20L), + randomBoolean() ? null : "FULL", randomAlphaOfLengthBetween(1, 10)); + try (BytesStreamOutput output = new BytesStreamOutput()) { + response.writeTo(output); + try (StreamInput input = output.bytes().streamInput()) { + CreateTokenResponse serialized = new CreateTokenResponse(); + serialized.readFrom(input); + assertEquals(response, serialized); + } + } + + response = new CreateTokenResponse(randomAlphaOfLengthBetween(1, 10), TimeValue.timeValueMinutes(20L), + randomBoolean() ? null : "FULL", null); + try (BytesStreamOutput output = new BytesStreamOutput()) { + response.writeTo(output); + try (StreamInput input = output.bytes().streamInput()) { + CreateTokenResponse serialized = new CreateTokenResponse(); + serialized.readFrom(input); + assertEquals(response, serialized); + } + } + } + + public void testSerializationToPre62Version() throws Exception { + CreateTokenResponse response = new CreateTokenResponse(randomAlphaOfLengthBetween(1, 10), TimeValue.timeValueMinutes(20L), + randomBoolean() ? null : "FULL", randomBoolean() ? null : randomAlphaOfLengthBetween(1, 10)); + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_1_4); + try (BytesStreamOutput output = new BytesStreamOutput()) { + output.setVersion(version); + response.writeTo(output); + try (StreamInput input = output.bytes().streamInput()) { + input.setVersion(version); + CreateTokenResponse serialized = new CreateTokenResponse(); + serialized.readFrom(input); + assertNull(serialized.getRefreshToken()); + assertEquals(response.getTokenString(), serialized.getTokenString()); + assertEquals(response.getExpiresIn(), serialized.getExpiresIn()); + assertEquals(response.getScope(), serialized.getScope()); + } + } + } + + public void testSerializationToPost62Pre65Version() throws Exception { + CreateTokenResponse response = new CreateTokenResponse(randomAlphaOfLengthBetween(1, 10), TimeValue.timeValueMinutes(20L), + randomBoolean() ? null : "FULL", randomAlphaOfLengthBetween(1, 10)); + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_2_0, Version.V_6_4_0); + try (BytesStreamOutput output = new BytesStreamOutput()) { + output.setVersion(version); + response.writeTo(output); + try (StreamInput input = output.bytes().streamInput()) { + input.setVersion(version); + CreateTokenResponse serialized = new CreateTokenResponse(); + serialized.readFrom(input); + assertEquals(response, serialized); + } + } + + // no refresh token + response = new CreateTokenResponse(randomAlphaOfLengthBetween(1, 10), TimeValue.timeValueMinutes(20L), + randomBoolean() ? null : "FULL", null); + try (BytesStreamOutput output = new BytesStreamOutput()) { + output.setVersion(version); + response.writeTo(output); + try (StreamInput input = output.bytes().streamInput()) { + input.setVersion(version); + CreateTokenResponse serialized = new CreateTokenResponse(); + serialized.readFrom(input); + assertEquals("", serialized.getRefreshToken()); + assertEquals(response.getTokenString(), serialized.getTokenString()); + assertEquals(response.getExpiresIn(), serialized.getExpiresIn()); + assertEquals(response.getScope(), serialized.getScope()); + } + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestTests.java index f458311e685..a6706542e96 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ApplicationResourcePrivileges; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; @@ -28,9 +29,10 @@ import static org.hamcrest.Matchers.nullValue; public class HasPrivilegesRequestTests extends ESTestCase { - public void testSerializationV7() throws IOException { + public void testSerializationV64OrLater() throws IOException { final HasPrivilegesRequest original = randomRequest(); - final HasPrivilegesRequest copy = serializeAndDeserialize(original, Version.V_7_0_0_alpha1); + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.CURRENT); + final HasPrivilegesRequest copy = serializeAndDeserialize(original, version); assertThat(copy.username(), equalTo(original.username())); assertThat(copy.clusterPrivileges(), equalTo(original.clusterPrivileges())); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponseTests.java new file mode 100644 index 00000000000..89c58945bad --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponseTests.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class HasPrivilegesResponseTests extends ESTestCase { + + public void testSerializationV64OrLater() throws IOException { + final HasPrivilegesResponse original = randomResponse(); + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.CURRENT); + final HasPrivilegesResponse copy = serializeAndDeserialize(original, version); + + assertThat(copy.isCompleteMatch(), equalTo(original.isCompleteMatch())); +// assertThat(copy.getClusterPrivileges(), equalTo(original.getClusterPrivileges())); + assertThat(copy.getIndexPrivileges(), equalTo(original.getIndexPrivileges())); + assertThat(copy.getApplicationPrivileges(), equalTo(original.getApplicationPrivileges())); + } + + public void testSerializationV63() throws IOException { + final HasPrivilegesResponse original = randomResponse(); + final HasPrivilegesResponse copy = serializeAndDeserialize(original, Version.V_6_3_0); + + assertThat(copy.isCompleteMatch(), equalTo(original.isCompleteMatch())); +// assertThat(copy.getClusterPrivileges(), equalTo(original.getClusterPrivileges())); + assertThat(copy.getIndexPrivileges(), equalTo(original.getIndexPrivileges())); + assertThat(copy.getApplicationPrivileges(), equalTo(Collections.emptyMap())); + } + + private HasPrivilegesResponse serializeAndDeserialize(HasPrivilegesResponse original, Version version) throws IOException { + logger.info("Test serialize/deserialize with version {}", version); + final BytesStreamOutput out = new BytesStreamOutput(); + out.setVersion(version); + original.writeTo(out); + + final HasPrivilegesResponse copy = new HasPrivilegesResponse(); + final StreamInput in = out.bytes().streamInput(); + in.setVersion(version); + copy.readFrom(in); + assertThat(in.read(), equalTo(-1)); + return copy; + } + + private HasPrivilegesResponse randomResponse() { + final Map cluster = new HashMap<>(); + for (String priv : randomArray(1, 6, String[]::new, () -> randomAlphaOfLengthBetween(3, 12))) { + cluster.put(priv, randomBoolean()); + } + final Collection index = randomResourcePrivileges(); + final Map> application = new HashMap<>(); + for (String app : randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(3, 6).toLowerCase(Locale.ROOT))) { + application.put(app, randomResourcePrivileges()); + } + return new HasPrivilegesResponse(randomBoolean(), cluster, index, application); + } + + private Collection randomResourcePrivileges() { + final Collection list = new ArrayList<>(); + for (String resource : randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(2, 6))) { + final Map privileges = new HashMap<>(); + for (String priv : randomArray(1, 5, String[]::new, () -> randomAlphaOfLengthBetween(3, 8))) { + privileges.put(priv, randomBoolean()); + } + list.add(new HasPrivilegesResponse.ResourcePrivileges(resource, privileges)); + } + return list; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandlerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandlerTests.java index 2598461c372..15593f0b82e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandlerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandlerTests.java @@ -50,7 +50,7 @@ public class DefaultAuthenticationFailureHandlerTests extends ESTestCase { if (testDefault) { assertWWWAuthenticateWithSchemes(ese, basicAuthScheme); } else { - assertWWWAuthenticateWithSchemes(ese, basicAuthScheme, bearerAuthScheme); + assertWWWAuthenticateWithSchemes(ese, bearerAuthScheme, basicAuthScheme); } } @@ -83,12 +83,12 @@ public class DefaultAuthenticationFailureHandlerTests extends ESTestCase { assertThat(ese.getHeader("WWW-Authenticate"), is(notNullValue())); assertThat(ese, is(sameInstance(cause))); if (withAuthenticateHeader == false) { - assertWWWAuthenticateWithSchemes(ese, basicAuthScheme, bearerAuthScheme, negotiateAuthScheme); + assertWWWAuthenticateWithSchemes(ese, negotiateAuthScheme, bearerAuthScheme, basicAuthScheme); } else { if (selectedScheme.contains("Negotiate ")) { assertWWWAuthenticateWithSchemes(ese, selectedScheme); } else { - assertWWWAuthenticateWithSchemes(ese, basicAuthScheme, bearerAuthScheme, negotiateAuthScheme); + assertWWWAuthenticateWithSchemes(ese, negotiateAuthScheme, bearerAuthScheme, basicAuthScheme); } } assertThat(ese.getMessage(), equalTo("unauthorized")); @@ -102,11 +102,30 @@ public class DefaultAuthenticationFailureHandlerTests extends ESTestCase { assertThat(ese, is(notNullValue())); assertThat(ese.getHeader("WWW-Authenticate"), is(notNullValue())); assertThat(ese.getMessage(), equalTo("error attempting to authenticate request")); - assertWWWAuthenticateWithSchemes(ese, basicAuthScheme, bearerAuthScheme, negotiateAuthScheme); + assertWWWAuthenticateWithSchemes(ese, negotiateAuthScheme, bearerAuthScheme, basicAuthScheme); } } + public void testSortsWWWAuthenticateHeaderValues() { + final String basicAuthScheme = "Basic realm=\"" + XPackField.SECURITY + "\" charset=\"UTF-8\""; + final String bearerAuthScheme = "Bearer realm=\"" + XPackField.SECURITY + "\""; + final String negotiateAuthScheme = randomFrom("Negotiate", "Negotiate Ijoijksdk"); + final Map> failureResponeHeaders = new HashMap<>(); + final List supportedSchemes = Arrays.asList(basicAuthScheme, bearerAuthScheme, negotiateAuthScheme); + Collections.shuffle(supportedSchemes, random()); + failureResponeHeaders.put("WWW-Authenticate", supportedSchemes); + final DefaultAuthenticationFailureHandler failuerHandler = new DefaultAuthenticationFailureHandler(failureResponeHeaders); + + final ElasticsearchSecurityException ese = failuerHandler.exceptionProcessingRequest(Mockito.mock(RestRequest.class), null, + new ThreadContext(Settings.builder().build())); + + assertThat(ese, is(notNullValue())); + assertThat(ese.getHeader("WWW-Authenticate"), is(notNullValue())); + assertThat(ese.getMessage(), equalTo("error attempting to authenticate request")); + assertWWWAuthenticateWithSchemes(ese, negotiateAuthScheme, bearerAuthScheme, basicAuthScheme); + } + private void assertWWWAuthenticateWithSchemes(final ElasticsearchSecurityException ese, final String... schemes) { assertThat(ese.getHeader("WWW-Authenticate").size(), is(schemes.length)); assertThat(ese.getHeader("WWW-Authenticate"), contains(schemes)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java index dca2f37f3f2..bd6ac12ee3c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java @@ -108,14 +108,14 @@ public class DocumentSubsetReaderTests extends ESTestCase { new TermQuery(new Term("field", "value1")))); assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1)); TopDocs result = indexSearcher.search(new MatchAllDocsQuery(), 1); - assertThat(result.totalHits, equalTo(1L)); + assertThat(result.totalHits.value, equalTo(1L)); assertThat(result.scoreDocs[0].doc, equalTo(0)); indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache, new TermQuery(new Term("field", "value2")))); assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1)); result = indexSearcher.search(new MatchAllDocsQuery(), 1); - assertThat(result.totalHits, equalTo(1L)); + assertThat(result.totalHits.value, equalTo(1L)); assertThat(result.scoreDocs[0].doc, equalTo(1)); // this doc has been marked as deleted: @@ -123,13 +123,13 @@ public class DocumentSubsetReaderTests extends ESTestCase { new TermQuery(new Term("field", "value3")))); assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(0)); result = indexSearcher.search(new MatchAllDocsQuery(), 1); - assertThat(result.totalHits, equalTo(0L)); + assertThat(result.totalHits.value, equalTo(0L)); indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache, new TermQuery(new Term("field", "value4")))); assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1)); result = indexSearcher.search(new MatchAllDocsQuery(), 1); - assertThat(result.totalHits, equalTo(1L)); + assertThat(result.totalHits.value, equalTo(1L)); assertThat(result.scoreDocs[0].doc, equalTo(3)); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java index e71b0e5e8bd..d2f7d7bdb96 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java @@ -78,7 +78,7 @@ import static org.hamcrest.Matchers.equalTo; /** Simple tests for this filterreader */ public class FieldSubsetReaderTests extends ESTestCase { - + /** * test filtering two string fields */ @@ -86,16 +86,16 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StringField("fieldA", "test", Field.Store.NO)); doc.add(new StringField("fieldB", "test", Field.Store.NO)); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); Set seenFields = new HashSet<>(); @@ -105,11 +105,11 @@ public class FieldSubsetReaderTests extends ESTestCase { assertEquals(Collections.singleton("fieldA"), seenFields); assertNotNull(segmentReader.terms("fieldA")); assertNull(segmentReader.terms("fieldB")); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two int points */ @@ -181,25 +181,25 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StoredField("fieldA", "testA")); doc.add(new StoredField("fieldB", "testB")); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field Document d2 = ir.document(0); assertEquals(1, d2.getFields().size()); assertEquals("testA", d2.get("fieldA")); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two stored fields (binary) */ @@ -207,25 +207,25 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StoredField("fieldA", new BytesRef("testA"))); doc.add(new StoredField("fieldB", new BytesRef("testB"))); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field Document d2 = ir.document(0); assertEquals(1, d2.getFields().size()); assertEquals(new BytesRef("testA"), d2.getBinaryValue("fieldA")); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two stored fields (int) */ @@ -233,25 +233,25 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StoredField("fieldA", 1)); doc.add(new StoredField("fieldB", 2)); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field Document d2 = ir.document(0); assertEquals(1, d2.getFields().size()); assertEquals(1, d2.getField("fieldA").numericValue()); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two stored fields (long) */ @@ -259,25 +259,25 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StoredField("fieldA", 1L)); doc.add(new StoredField("fieldB", 2L)); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field Document d2 = ir.document(0); assertEquals(1, d2.getFields().size()); assertEquals(1L, d2.getField("fieldA").numericValue()); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two stored fields (float) */ @@ -285,25 +285,25 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StoredField("fieldA", 1F)); doc.add(new StoredField("fieldB", 2F)); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field Document d2 = ir.document(0); assertEquals(1, d2.getFields().size()); assertEquals(1F, d2.getField("fieldA").numericValue()); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two stored fields (double) */ @@ -311,25 +311,25 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StoredField("fieldA", 1D)); doc.add(new StoredField("fieldB", 2D)); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field Document d2 = ir.document(0); assertEquals(1, d2.getFields().size()); assertEquals(1D, d2.getField("fieldA").numericValue()); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two vector fields */ @@ -337,7 +337,7 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); FieldType ft = new FieldType(StringField.TYPE_NOT_STORED); @@ -345,10 +345,10 @@ public class FieldSubsetReaderTests extends ESTestCase { doc.add(new Field("fieldA", "testA", ft)); doc.add(new Field("fieldB", "testB", ft)); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field Fields vectors = ir.getTermVectors(0); Set seenFields = new HashSet<>(); @@ -356,11 +356,11 @@ public class FieldSubsetReaderTests extends ESTestCase { seenFields.add(field); } assertEquals(Collections.singleton("fieldA"), seenFields); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two text fields */ @@ -368,25 +368,25 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new TextField("fieldA", "test", Field.Store.NO)); doc.add(new TextField("fieldB", "test", Field.Store.NO)); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); assertNotNull(segmentReader.getNormValues("fieldA")); assertNull(segmentReader.getNormValues("fieldB")); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two numeric dv fields */ @@ -394,16 +394,16 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new NumericDocValuesField("fieldA", 1)); doc.add(new NumericDocValuesField("fieldB", 2)); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); NumericDocValues values = segmentReader.getNumericDocValues("fieldA"); @@ -411,11 +411,11 @@ public class FieldSubsetReaderTests extends ESTestCase { assertTrue(values.advanceExact(0)); assertEquals(1, values.longValue()); assertNull(segmentReader.getNumericDocValues("fieldB")); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two binary dv fields */ @@ -423,16 +423,16 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new BinaryDocValuesField("fieldA", new BytesRef("testA"))); doc.add(new BinaryDocValuesField("fieldB", new BytesRef("testB"))); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); BinaryDocValues values = segmentReader.getBinaryDocValues("fieldA"); @@ -444,7 +444,7 @@ public class FieldSubsetReaderTests extends ESTestCase { TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two sorted dv fields */ @@ -452,16 +452,16 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new SortedDocValuesField("fieldA", new BytesRef("testA"))); doc.add(new SortedDocValuesField("fieldB", new BytesRef("testB"))); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); SortedDocValues values = segmentReader.getSortedDocValues("fieldA"); @@ -469,11 +469,11 @@ public class FieldSubsetReaderTests extends ESTestCase { assertTrue(values.advanceExact(0)); assertEquals(new BytesRef("testA"), values.binaryValue()); assertNull(segmentReader.getSortedDocValues("fieldB")); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two sortedset dv fields */ @@ -481,16 +481,16 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new SortedSetDocValuesField("fieldA", new BytesRef("testA"))); doc.add(new SortedSetDocValuesField("fieldB", new BytesRef("testB"))); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); SortedSetDocValues dv = segmentReader.getSortedSetDocValues("fieldA"); @@ -500,11 +500,11 @@ public class FieldSubsetReaderTests extends ESTestCase { assertEquals(SortedSetDocValues.NO_MORE_ORDS, dv.nextOrd()); assertEquals(new BytesRef("testA"), dv.lookupOrd(0)); assertNull(segmentReader.getSortedSetDocValues("fieldB")); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two sortednumeric dv fields */ @@ -512,16 +512,16 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new SortedNumericDocValuesField("fieldA", 1)); doc.add(new SortedNumericDocValuesField("fieldB", 2)); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); SortedNumericDocValues dv = segmentReader.getSortedNumericDocValues("fieldA"); @@ -530,11 +530,11 @@ public class FieldSubsetReaderTests extends ESTestCase { assertEquals(1, dv.docValueCount()); assertEquals(1, dv.nextValue()); assertNull(segmentReader.getSortedNumericDocValues("fieldB")); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test we have correct fieldinfos metadata */ @@ -542,27 +542,27 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StringField("fieldA", "test", Field.Store.NO)); doc.add(new StringField("fieldB", "test", Field.Store.NO)); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); FieldInfos infos = segmentReader.getFieldInfos(); assertEquals(1, infos.size()); assertNotNull(infos.fieldInfo("fieldA")); assertNull(infos.fieldInfo("fieldB")); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test special handling for _source field. */ @@ -570,7 +570,7 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StringField("fieldA", "testA", Field.Store.NO)); @@ -578,16 +578,16 @@ public class FieldSubsetReaderTests extends ESTestCase { byte bytes[] = "{\"fieldA\":\"testA\", \"fieldB\":\"testB\"}".getBytes(StandardCharsets.UTF_8); doc.add(new StoredField(SourceFieldMapper.NAME, bytes, 0, bytes.length)); iw.addDocument(doc); - + // open reader Automaton automaton = Automatons.patterns(Arrays.asList("fieldA", SourceFieldMapper.NAME)); DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(automaton)); - + // see only one field Document d2 = ir.document(0); assertEquals(1, d2.getFields().size()); assertEquals("{\"fieldA\":\"testA\"}", d2.getBinaryValue(SourceFieldMapper.NAME).utf8ToString()); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } @@ -741,7 +741,7 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StringField("fieldA", "test", Field.Store.NO)); @@ -749,37 +749,37 @@ public class FieldSubsetReaderTests extends ESTestCase { doc.add(new StringField(FieldNamesFieldMapper.NAME, "fieldA", Field.Store.NO)); doc.add(new StringField(FieldNamesFieldMapper.NAME, "fieldB", Field.Store.NO)); iw.addDocument(doc); - + // open reader Set fields = new HashSet<>(); fields.add("fieldA"); Automaton automaton = Automatons.patterns(Arrays.asList("fieldA", FieldNamesFieldMapper.NAME)); DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(automaton)); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); Terms terms = segmentReader.terms(FieldNamesFieldMapper.NAME); TermsEnum termsEnum = terms.iterator(); assertEquals(new BytesRef("fieldA"), termsEnum.next()); assertNull(termsEnum.next()); - - // seekExact + + // seekExact termsEnum = terms.iterator(); assertTrue(termsEnum.seekExact(new BytesRef("fieldA"))); assertFalse(termsEnum.seekExact(new BytesRef("fieldB"))); - - // seekCeil + + // seekCeil termsEnum = terms.iterator(); assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(new BytesRef("fieldA"))); assertEquals(SeekStatus.NOT_FOUND, termsEnum.seekCeil(new BytesRef("field0000"))); assertEquals(new BytesRef("fieldA"), termsEnum.term()); assertEquals(SeekStatus.END, termsEnum.seekCeil(new BytesRef("fieldAAA"))); assertEquals(SeekStatus.END, termsEnum.seekCeil(new BytesRef("fieldB"))); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test special handling for _field_names field (three fields, to exercise termsenum better) */ @@ -787,7 +787,7 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StringField("fieldA", "test", Field.Store.NO)); @@ -797,11 +797,11 @@ public class FieldSubsetReaderTests extends ESTestCase { doc.add(new StringField(FieldNamesFieldMapper.NAME, "fieldB", Field.Store.NO)); doc.add(new StringField(FieldNamesFieldMapper.NAME, "fieldC", Field.Store.NO)); iw.addDocument(doc); - + // open reader Automaton automaton = Automatons.patterns(Arrays.asList("fieldA", "fieldC", FieldNamesFieldMapper.NAME)); DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(automaton)); - + // see only two fields LeafReader segmentReader = ir.leaves().get(0).reader(); Terms terms = segmentReader.terms(FieldNamesFieldMapper.NAME); @@ -809,24 +809,24 @@ public class FieldSubsetReaderTests extends ESTestCase { assertEquals(new BytesRef("fieldA"), termsEnum.next()); assertEquals(new BytesRef("fieldC"), termsEnum.next()); assertNull(termsEnum.next()); - - // seekExact + + // seekExact termsEnum = terms.iterator(); assertTrue(termsEnum.seekExact(new BytesRef("fieldA"))); assertFalse(termsEnum.seekExact(new BytesRef("fieldB"))); assertTrue(termsEnum.seekExact(new BytesRef("fieldC"))); - - // seekCeil + + // seekCeil termsEnum = terms.iterator(); assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(new BytesRef("fieldA"))); assertEquals(SeekStatus.NOT_FOUND, termsEnum.seekCeil(new BytesRef("fieldB"))); assertEquals(new BytesRef("fieldC"), termsEnum.term()); assertEquals(SeekStatus.END, termsEnum.seekCeil(new BytesRef("fieldD"))); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test _field_names where a field is permitted, but doesn't exist in the segment. */ @@ -834,7 +834,7 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StringField("fieldA", "test", Field.Store.NO)); @@ -842,27 +842,27 @@ public class FieldSubsetReaderTests extends ESTestCase { doc.add(new StringField(FieldNamesFieldMapper.NAME, "fieldA", Field.Store.NO)); doc.add(new StringField(FieldNamesFieldMapper.NAME, "fieldB", Field.Store.NO)); iw.addDocument(doc); - + // open reader Automaton automaton = Automatons.patterns(Arrays.asList("fieldA", "fieldC", FieldNamesFieldMapper.NAME)); DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(automaton)); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); Terms terms = segmentReader.terms(FieldNamesFieldMapper.NAME); - - // seekExact + + // seekExact TermsEnum termsEnum = terms.iterator(); assertFalse(termsEnum.seekExact(new BytesRef("fieldC"))); - - // seekCeil + + // seekCeil termsEnum = terms.iterator(); assertEquals(SeekStatus.END, termsEnum.seekCeil(new BytesRef("fieldC"))); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test where _field_names does not exist */ @@ -870,25 +870,25 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StringField("fieldA", "test", Field.Store.NO)); doc.add(new StringField("fieldB", "test", Field.Store.NO)); iw.addDocument(doc); - + // open reader Automaton automaton = Automatons.patterns(Arrays.asList("fieldA", SourceFieldMapper.NAME)); DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(automaton)); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); assertNull(segmentReader.terms(FieldNamesFieldMapper.NAME)); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** test that core cache key (needed for NRT) is working */ public void testCoreCacheKey() throws Exception { Directory dir = newDirectory(); @@ -896,7 +896,7 @@ public class FieldSubsetReaderTests extends ESTestCase { iwc.setMaxBufferedDocs(100); iwc.setMergePolicy(NoMergePolicy.INSTANCE); IndexWriter iw = new IndexWriter(dir, iwc); - + // add two docs, id:0 and id:1 Document doc = new Document(); Field idField = new StringField("id", "", Field.Store.NO); @@ -905,7 +905,7 @@ public class FieldSubsetReaderTests extends ESTestCase { iw.addDocument(doc); idField.setStringValue("1"); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("id"))); assertEquals(2, ir.numDocs()); @@ -914,17 +914,17 @@ public class FieldSubsetReaderTests extends ESTestCase { // delete id:0 and reopen iw.deleteDocuments(new Term("id", "0")); DirectoryReader ir2 = DirectoryReader.openIfChanged(ir); - + // we should have the same cache key as before assertEquals(1, ir2.numDocs()); assertEquals(1, ir2.leaves().size()); assertSame(ir.leaves().get(0).reader().getCoreCacheHelper().getKey(), ir2.leaves().get(0).reader().getCoreCacheHelper().getKey()); - + TestUtil.checkReader(ir); IOUtils.close(ir, ir2, iw, dir); } - + /** * test filtering the only vector fields */ @@ -932,7 +932,7 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); FieldType ft = new FieldType(StringField.TYPE_NOT_STORED); @@ -940,17 +940,17 @@ public class FieldSubsetReaderTests extends ESTestCase { doc.add(new Field("fieldA", "testA", ft)); doc.add(new StringField("fieldB", "testB", Field.Store.NO)); // no vectors iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldB"))); - + // sees no fields assertNull(ir.getTermVectors(0)); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering an index with no fields */ @@ -959,10 +959,10 @@ public class FieldSubsetReaderTests extends ESTestCase { IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); iw.addDocument(new Document()); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see no fields LeafReader segmentReader = ir.leaves().get(0).reader(); Set seenFields = new HashSet<>(); @@ -971,14 +971,14 @@ public class FieldSubsetReaderTests extends ESTestCase { } assertEquals(0, seenFields.size()); assertNull(segmentReader.terms("foo")); - + // see no vectors assertNull(segmentReader.getTermVectors(0)); - + // see no stored fields Document document = segmentReader.document(0); assertEquals(0, document.getFields().size()); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java index c26968ce54a..e364b0a7e8a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java @@ -76,7 +76,7 @@ import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetReader.DocumentSubsetDirectoryReader; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.junit.After; import org.junit.Before; import org.mockito.ArgumentCaptor; @@ -232,7 +232,7 @@ public class SecurityIndexSearcherWrapperUnitTests extends ESTestCase { new SecurityIndexSearcherWrapper(indexSettings, null, null, threadContext, licenseState, scriptService); IndexSearcher result = securityIndexSearcherWrapper.wrap(indexSearcher); assertThat(result, not(sameInstance(indexSearcher))); - assertThat(result.getSimilarity(true), sameInstance(indexSearcher.getSimilarity(true))); + assertThat(result.getSimilarity(), sameInstance(indexSearcher.getSimilarity())); bitsetFilterCache.close(); } @@ -270,7 +270,8 @@ public class SecurityIndexSearcherWrapperUnitTests extends ESTestCase { iw.close(); DirectoryReader directoryReader = DirectoryReader.open(directory); IndexSearcher searcher = new IndexSearcher(directoryReader); - Weight weight = searcher.createNormalizedWeight(new TermQuery(new Term("field2", "value1")), false); + Weight weight = searcher.createWeight(new TermQuery(new Term("field2", "value1")), + org.apache.lucene.search.ScoreMode.COMPLETE_NO_SCORES, 1f); LeafReaderContext leaf = directoryReader.leaves().get(0); @@ -545,8 +546,8 @@ public class SecurityIndexSearcherWrapperUnitTests extends ESTestCase { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { - return new CreateScorerOnceWeight(query.createWeight(searcher, needsScores, boost)); + public Weight createWeight(IndexSearcher searcher, org.apache.lucene.search.ScoreMode scoreMode, float boost) throws IOException { + return new CreateScorerOnceWeight(query.createWeight(searcher, scoreMode, boost)); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java index 6857a48784b..c4c95211d4c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java @@ -127,4 +127,13 @@ public class PrivilegeTests extends ESTestCase { assertThat(predicate.test("indices:admin/seq_no/global_checkpoint_sync[p]"), is(true)); assertThat(predicate.test("indices:admin/seq_no/global_checkpoint_sync[r]"), is(true)); } + + public void testManageCcrPrivilege() { + Predicate predicate = ClusterPrivilege.MANAGE_CCR.predicate(); + assertThat(predicate.test("cluster:admin/xpack/ccr/follow_index"), is(true)); + assertThat(predicate.test("cluster:admin/xpack/ccr/unfollow_index"), is(true)); + assertThat(predicate.test("cluster:admin/xpack/ccr/brand_new_api"), is(true)); + assertThat(predicate.test("cluster:admin/xpack/whatever"), is(false)); + } + } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 9cb5e25c5b8..9972fc7b74b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -94,6 +94,7 @@ import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCa import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor; +import org.elasticsearch.xpack.core.security.user.APMSystemUser; import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; import org.elasticsearch.xpack.core.security.user.SystemUser; @@ -147,6 +148,7 @@ public class ReservedRolesStoreTests extends ESTestCase { assertThat(ReservedRolesStore.isReserved(XPackUser.ROLE_NAME), is(true)); assertThat(ReservedRolesStore.isReserved(LogstashSystemUser.ROLE_NAME), is(true)); assertThat(ReservedRolesStore.isReserved(BeatsSystemUser.ROLE_NAME), is(true)); + assertThat(ReservedRolesStore.isReserved(APMSystemUser.ROLE_NAME), is(true)); } public void testIngestAdminRole() { @@ -628,6 +630,30 @@ public class ReservedRolesStoreTests extends ESTestCase { is(false)); } + public void testAPMSystemRole() { + final TransportRequest request = mock(TransportRequest.class); + + RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor(APMSystemUser.ROLE_NAME); + assertNotNull(roleDescriptor); + assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); + + Role APMSystemRole = Role.builder(roleDescriptor, null).build(); + assertThat(APMSystemRole.cluster().check(ClusterHealthAction.NAME, request), is(true)); + assertThat(APMSystemRole.cluster().check(ClusterStateAction.NAME, request), is(true)); + assertThat(APMSystemRole.cluster().check(ClusterStatsAction.NAME, request), is(true)); + assertThat(APMSystemRole.cluster().check(PutIndexTemplateAction.NAME, request), is(false)); + assertThat(APMSystemRole.cluster().check(ClusterRerouteAction.NAME, request), is(false)); + assertThat(APMSystemRole.cluster().check(ClusterUpdateSettingsAction.NAME, request), is(false)); + assertThat(APMSystemRole.cluster().check(MonitoringBulkAction.NAME, request), is(true)); + + assertThat(APMSystemRole.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); + + assertThat(APMSystemRole.indices().allowedIndicesMatcher(IndexAction.NAME).test("foo"), is(false)); + assertThat(APMSystemRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(".reporting"), is(false)); + assertThat(APMSystemRole.indices().allowedIndicesMatcher("indices:foo").test(randomAlphaOfLengthBetween(8, 24)), + is(false)); + } + public void testMachineLearningAdminRole() { final TransportRequest request = mock(TransportRequest.class); diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/security/UserTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/UserTests.java similarity index 52% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/security/UserTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/UserTests.java index 2e3c67131df..02813f53b8c 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/security/UserTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/UserTests.java @@ -1,23 +1,9 @@ /* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. */ - -package org.elasticsearch.protocol.xpack.security; +package org.elasticsearch.xpack.core.security.user; import org.elasticsearch.test.ESTestCase; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/PemUtilsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/PemUtilsTests.java index b82275a8833..3134d42ce36 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/PemUtilsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/PemUtilsTests.java @@ -32,6 +32,16 @@ public class PemUtilsTests extends ESTestCase { assertThat(privateKey, equalTo(key)); } + public void testReadPKCS8RsaKeyWithBagAttrs() throws Exception { + Key key = getKeyFromKeystore("RSA"); + assertThat(key, notNullValue()); + assertThat(key, instanceOf(PrivateKey.class)); + PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath + ("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_with_bagattrs.pem"), ""::toCharArray); + assertThat(privateKey, notNullValue()); + assertThat(privateKey, equalTo(key)); + } + public void testReadPKCS8DsaKey() throws Exception { Key key = getKeyFromKeystore("DSA"); assertThat(key, notNullValue()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java index 3e36550e46f..df25b2fa126 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java @@ -78,7 +78,6 @@ public class SSLConfigurationReloaderTests extends ESTestCase { /** * Tests reloading a keystore that is used in the KeyManager of SSLContext */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32124") public void testReloadingKeyStore() throws Exception { assumeFalse("Can't run in a FIPS JVM", inFipsJvm()); final Path tempDir = createTempDir(); @@ -192,7 +191,6 @@ public class SSLConfigurationReloaderTests extends ESTestCase { * Tests the reloading of SSLContext when the trust store is modified. The same store is used as a TrustStore (for the * reloadable SSLContext used in the HTTPClient) and as a KeyStore for the MockWebServer */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32124") public void testReloadingTrustStore() throws Exception { assumeFalse("Can't run in a FIPS JVM", inFipsJvm()); Path tempDir = createTempDir(); @@ -479,7 +477,9 @@ public class SSLConfigurationReloaderTests extends ESTestCase { try (InputStream is = Files.newInputStream(keyStorePath)) { keyStore.load(is, keyStorePass.toCharArray()); } - final SSLContext sslContext = new SSLContextBuilder().loadKeyMaterial(keyStore, keyStorePass.toCharArray()) + // TODO Revisit TLS1.2 pinning when TLS1.3 is fully supported + // https://github.com/elastic/elasticsearch/issues/32276 + final SSLContext sslContext = new SSLContextBuilder().useProtocol("TLSv1.2").loadKeyMaterial(keyStore, keyStorePass.toCharArray()) .build(); MockWebServer server = new MockWebServer(sslContext, false); server.enqueue(new MockResponse().setResponseCode(200).setBody("body")); @@ -493,7 +493,9 @@ public class SSLConfigurationReloaderTests extends ESTestCase { keyStore.load(null, password.toCharArray()); keyStore.setKeyEntry("testnode_ec", PemUtils.readPrivateKey(keyPath, password::toCharArray), password.toCharArray(), CertParsingUtils.readCertificates(Collections.singletonList(certPath))); - final SSLContext sslContext = new SSLContextBuilder().loadKeyMaterial(keyStore, password.toCharArray()) + // TODO Revisit TLS1.2 pinning when TLS1.3 is fully supported + // https://github.com/elastic/elasticsearch/issues/32276 + final SSLContext sslContext = new SSLContextBuilder().useProtocol("TLSv1.2").loadKeyMaterial(keyStore, password.toCharArray()) .build(); MockWebServer server = new MockWebServer(sslContext, false); server.enqueue(new MockResponse().setResponseCode(200).setBody("body")); diff --git a/x-pack/plugin/core/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_with_bagattrs.pem b/x-pack/plugin/core/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_with_bagattrs.pem new file mode 100644 index 00000000000..ce8299cd070 --- /dev/null +++ b/x-pack/plugin/core/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_with_bagattrs.pem @@ -0,0 +1,32 @@ +Bag Attributes + friendlyName: testnode_rsa + localKeyID: 54 69 6D 65 20 31 35 32 35 33 33 36 38 32 39 33 39 37 +Key Attributes: +-----BEGIN PRIVATE KEY----- +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDesZnVBuxbT4y7 +KtIuYx8MUq0sGQgVbxXSBG66sWDU9Qoo1HUyra0xXCONgRMBT9RjSIpk7OOC9g8q +ENNgFO179YdHVkrgJhW/tNBf+C0VAb+B79zu7SwtyH2nt9t378dmItL+sERkMiiG ++BS/O+cDz44hifDiS7Eqj/mJugAhLjWSUyD+UBObxXvUsxjryKeG3vX9mRCgAcqB +xH3PjI1i9DVaoobwMbwpE5eW2WXexOspuXnMmGfrrR6z/VmdHqe/C3rGdJOX+Y0c +yOR+/Vuzisn+nLeo/GJx2hIif8rKiNRyAdUXfx+4DLYJBN2NUbl9aP2LP6ZC8ubf +6qwhhB0XAgMBAAECggEBAKuzP6qSNfaJNTayY2/EmRHFRSP1ANiV17sgE8f6L3DC +pdypQtuaMSkXo4nc9SxTwqvyKFJ8m0ZENZj3dCJmwFyNCIqmLAD7HFW9MdRs40WJ +HYEv0aaeUyvRo6CHD74/r/w96XTZr0GZssmtyUFRDGNRyoJter7gIW9xprLcKHFr +YTmdaAXbOm5W/K3844EBouTYzYnZYWQjB3jT/g5dIic3AtLb5YfGlpaXXb74xTOU +BqY1uKonGiDCh0aXXRl2Ucyre6FWslNNy4cAAXm6/5GT6iMo7wDXQftvtyK2IszP +IFcOG6xcAaJjgZ5wvM3ch0qNhQi4vL7c4Bm5JS9meoECgYEA88ItaVrfm2osX/6/ +fA8wYxxYU5RQRyOgLuzBXoRkISynLJaLVj2gFOQxVQeUK++xK6R182RQatOJcWFT +WwmIL3CchCwnnXgPvMc51iFKY94DbdvrRatP8c5sSk7IQlpS3aVa7f7DCqexggr5 +3PYysuiLirL+n9I1oZiUxpsS6/cCgYEA6eCcDshQzb7UQfWy//BRMp7u6DDuq+54 +38kJIFsPX0/CGyWsiFYEac8VH7jaGof99j7Zuebeb50TX57ZCBEK2LaHe474ggkY +GGSoo3VWBn44A1P5ADaRGRwJ4/u79qAg0ldnyxFHWtW+Wbn11DoOg40rl+DOnFBJ +W+bWJn4az+ECgYEAzWduDt5lmLfiRs4LG4ZNFudWwq8y6o9ptsEIvRXArnfLM3Z0 +Waq6T4Bu1aD6Sf/EAuul/QAmB67TnbgOnqMsoBU7vuDaTQZT9JbI9Ni+r+Lwbs2n +tuCCEFgKxp8Wf1tPgriJJA3O2xauLNAE9x57YGk21Ry6FYD0coR5sdYRHscCgYEA +lGQM4Fw82K5RoqAwOK/T9RheYTha1v/x9ZtqjPr53/GNKQhYVhCtsCzSLFRvHhJX +EpyCLK/NRmgVWMBC2BloFmSJxd3K00bN4PxM+5mBQZFoHMR04qu8mH/vzpV0h2DG +Mm9+zZti+MFRi0CwNz2248T4ed8LeKaARS1LhxTQEkECgYBFsPNkfGWyP4zsgzFs +3tMgXnIgl3Lh+vnEIzVakASf3RZrSucJhA713u5L9YB64wPdVJp4YZIoEmHebP9J +Jt1f9ghcWk6ffUVBQJPmWuRbB/BU8SI+kgtf50Jnizbfm5qoQEt2UdGUbwU3P1+t +z4SnBvIZ3b2inN+Hwdm5onOBlw== +-----END PRIVATE KEY----- diff --git a/x-pack/plugin/deprecation/build.gradle b/x-pack/plugin/deprecation/build.gradle index 3746287d615..d89eb62e884 100644 --- a/x-pack/plugin/deprecation/build.gradle +++ b/x-pack/plugin/deprecation/build.gradle @@ -10,7 +10,7 @@ esplugin { archivesBaseName = 'x-pack-deprecation' dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + compileOnly "org.elasticsearch.plugin:x-pack-core:${version}" } run { diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java index 0f54784a33f..d496eea2f0d 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java @@ -7,10 +7,7 @@ package org.elasticsearch.xpack.deprecation; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.deprecation.DeprecationInfoAction; @@ -23,153 +20,9 @@ import static java.util.Collections.singletonList; import static org.elasticsearch.xpack.deprecation.DeprecationChecks.INDEX_SETTINGS_CHECKS; public class IndexDeprecationChecksTests extends ESTestCase { - - private static void assertSettingsAndIssue(String key, String value, DeprecationIssue expected) { - IndexMetaData indexMetaData = IndexMetaData.builder("test") - .settings(settings(Version.V_5_6_0) - .put(key, value)) - .numberOfShards(1) - .numberOfReplicas(0) - .build(); - List issues = DeprecationInfoAction.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetaData)); - assertEquals(singletonList(expected), issues); - } - - public void testCoerceBooleanDeprecation() throws IOException { - XContentBuilder mapping = XContentFactory.jsonBuilder(); - mapping.startObject(); { - mapping.startObject("properties"); { - mapping.startObject("my_boolean"); { - mapping.field("type", "boolean"); - } - mapping.endObject(); - mapping.startObject("my_object"); { - mapping.startObject("properties"); { - mapping.startObject("my_inner_boolean"); { - mapping.field("type", "boolean"); - } - mapping.endObject(); - mapping.startObject("my_text"); { - mapping.field("type", "text"); - mapping.startObject("fields"); { - mapping.startObject("raw"); { - mapping.field("type", "boolean"); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - - IndexMetaData indexMetaData = IndexMetaData.builder("test") - .putMapping("testBooleanCoercion", Strings.toString(mapping)) - .settings(settings(Version.V_5_6_0)) - .numberOfShards(1) - .numberOfReplicas(0) - .build(); - - DeprecationIssue expected = new DeprecationIssue(DeprecationIssue.Level.INFO, - "Coercion of boolean fields", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + - "breaking_60_mappings_changes.html#_coercion_of_boolean_fields", - "[[type: testBooleanCoercion, field: my_boolean], [type: testBooleanCoercion, field: my_inner_boolean]," + - " [type: testBooleanCoercion, field: my_text, multifield: raw]]"); - List issues = DeprecationInfoAction.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetaData)); - assertEquals(singletonList(expected), issues); - } - - public void testMatchMappingTypeCheck() throws IOException { - XContentBuilder mapping = XContentFactory.jsonBuilder(); - mapping.startObject(); { - mapping.startArray("dynamic_templates"); - { - mapping.startObject(); - { - mapping.startObject("integers"); - { - mapping.field("match_mapping_type", "UNKNOWN_VALUE"); - mapping.startObject("mapping"); - { - mapping.field("type", "integer"); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endArray(); - } - mapping.endObject(); - - IndexMetaData indexMetaData = IndexMetaData.builder("test") - .putMapping("test", Strings.toString(mapping)) - .settings(settings(Version.V_5_6_0)) - .numberOfShards(1) - .numberOfReplicas(0) - .build(); - - DeprecationIssue expected = new DeprecationIssue(DeprecationIssue.Level.CRITICAL, - "Unrecognized match_mapping_type options not silently ignored", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + - "breaking_60_mappings_changes.html#_unrecognized_literal_match_mapping_type_literal_options_not_silently_ignored", - "[type: test, dynamicFieldDefinitionintegers, unknown match_mapping_type[UNKNOWN_VALUE]]"); - List issues = DeprecationInfoAction.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetaData)); - assertEquals(singletonList(expected), issues); - } - - public void testBaseSimilarityDefinedCheck() { - assertSettingsAndIssue("index.similarity.base.type", "classic", - new DeprecationIssue(DeprecationIssue.Level.WARNING, - "The base similarity is now ignored as coords and query normalization have been removed." + - "If provided, this setting will be ignored and issue a deprecation warning", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + - "breaking_60_settings_changes.html#_similarity_settings", null)); - } - - public void testIndexStoreTypeCheck() { - assertSettingsAndIssue("index.store.type", "niofs", - new DeprecationIssue(DeprecationIssue.Level.CRITICAL, - "The default index.store.type has been removed. If you were using it, " + - "we advise that you simply remove it from your index settings and Elasticsearch" + - "will use the best store implementation for your operating system.", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + - "breaking_60_settings_changes.html#_store_settings", null)); - } - public void testStoreThrottleSettingsCheck() { - assertSettingsAndIssue("index.store.throttle.max_bytes_per_sec", "32", - new DeprecationIssue(DeprecationIssue.Level.CRITICAL, - "index.store.throttle settings are no longer recognized. these settings should be removed", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + - "breaking_60_settings_changes.html#_store_throttling_settings", - "present settings: [index.store.throttle.max_bytes_per_sec]")); - assertSettingsAndIssue("index.store.throttle.type", "none", - new DeprecationIssue(DeprecationIssue.Level.CRITICAL, - "index.store.throttle settings are no longer recognized. these settings should be removed", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + - "breaking_60_settings_changes.html#_store_throttling_settings", - "present settings: [index.store.throttle.type]")); - } - - public void testSharedFileSystemSettingsCheck() { - assertSettingsAndIssue("index.shared_filesystem", "true", - new DeprecationIssue(DeprecationIssue.Level.CRITICAL, - "[index.shared_filesystem] setting should be removed", - "https://www.elastic.co/guide/en/elasticsearch/reference/6.0/" + - "breaking_60_indices_changes.html#_shadow_replicas_have_been_removed", null)); - } - public void testDelimitedPayloadFilterCheck() throws IOException { Settings settings = settings( - VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, VersionUtils.getPreviousVersion(Version.V_7_0_0_alpha1))) + VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_0_0_alpha1))) .put("index.analysis.filter.my_delimited_payload_filter.type", "delimited_payload_filter") .put("index.analysis.filter.my_delimited_payload_filter.delimiter", "^") .put("index.analysis.filter.my_delimited_payload_filter.encoding", "identity").build(); @@ -183,4 +36,4 @@ public class IndexDeprecationChecksTests extends ESTestCase { List issues = DeprecationInfoAction.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetaData)); assertEquals(singletonList(expected), issues); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/graph/build.gradle b/x-pack/plugin/graph/build.gradle index 2b0f592b720..069bfa5fbbe 100644 --- a/x-pack/plugin/graph/build.gradle +++ b/x-pack/plugin/graph/build.gradle @@ -10,7 +10,8 @@ esplugin { archivesBaseName = 'x-pack-graph' dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } diff --git a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java index 4eb136040e9..25f2511fbc0 100644 --- a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java +++ b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java @@ -24,6 +24,15 @@ import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.protocol.xpack.graph.Connection; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; +import org.elasticsearch.protocol.xpack.graph.Hop; +import org.elasticsearch.protocol.xpack.graph.Vertex; +import org.elasticsearch.protocol.xpack.graph.VertexRequest; +import org.elasticsearch.protocol.xpack.graph.Connection.ConnectionId; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest.TermBoost; +import org.elasticsearch.protocol.xpack.graph.Vertex.VertexId; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.bucket.sampler.DiversifiedAggregationBuilder; @@ -39,16 +48,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.xpack.core.graph.action.Connection; -import org.elasticsearch.xpack.core.graph.action.Connection.ConnectionId; import org.elasticsearch.xpack.core.graph.action.GraphExploreAction; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest.TermBoost; -import org.elasticsearch.xpack.core.graph.action.GraphExploreResponse; -import org.elasticsearch.xpack.core.graph.action.Hop; -import org.elasticsearch.xpack.core.graph.action.Vertex; -import org.elasticsearch.xpack.core.graph.action.Vertex.VertexId; -import org.elasticsearch.xpack.core.graph.action.VertexRequest; import java.util.ArrayList; import java.util.HashMap; diff --git a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java index 3f11d0c72bd..778eb261a07 100644 --- a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java +++ b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java @@ -12,14 +12,14 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.Hop; +import org.elasticsearch.protocol.xpack.graph.VertexRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest.TermBoost; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest.TermBoost; -import org.elasticsearch.xpack.core.graph.action.Hop; -import org.elasticsearch.xpack.core.graph.action.VertexRequest; import org.elasticsearch.xpack.core.rest.XPackRestHandler; import java.io.IOException; diff --git a/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/test/GraphTests.java b/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/test/GraphTests.java index 5bebef3d2d4..a58d8e8a8b0 100644 --- a/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/test/GraphTests.java +++ b/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/test/GraphTests.java @@ -17,6 +17,11 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.ScriptQueryBuilder; import org.elasticsearch.license.LicenseService; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; +import org.elasticsearch.protocol.xpack.graph.Hop; +import org.elasticsearch.protocol.xpack.graph.Vertex; +import org.elasticsearch.protocol.xpack.graph.VertexRequest; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -24,12 +29,7 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.graph.Graph; import org.elasticsearch.xpack.core.graph.action.GraphExploreAction; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest; import org.elasticsearch.xpack.core.graph.action.GraphExploreRequestBuilder; -import org.elasticsearch.xpack.core.graph.action.GraphExploreResponse; -import org.elasticsearch.xpack.core.graph.action.Hop; -import org.elasticsearch.xpack.core.graph.action.Vertex; -import org.elasticsearch.xpack.core.graph.action.VertexRequest; import java.util.Collection; import java.util.Collections; diff --git a/x-pack/plugin/logstash/build.gradle b/x-pack/plugin/logstash/build.gradle index 2e158a90ac7..1057a1c8526 100644 --- a/x-pack/plugin/logstash/build.gradle +++ b/x-pack/plugin/logstash/build.gradle @@ -10,9 +10,9 @@ esplugin { archivesBaseName = 'x-pack-logstash' dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') - } run { diff --git a/x-pack/plugin/ml/build.gradle b/x-pack/plugin/ml/build.gradle index 3602e1b359e..5996458537a 100644 --- a/x-pack/plugin/ml/build.gradle +++ b/x-pack/plugin/ml/build.gradle @@ -40,13 +40,15 @@ compileJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try, compileTestJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try,-unchecked" dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // This should not be here testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') // ml deps compile project(':libs:grok') + compile "com.ibm.icu:icu4j:${versions.icu4j}" compile "net.sf.supercsv:super-csv:${versions.supercsv}" nativeBundle "org.elasticsearch.ml:ml-cpp:${project.version}@zip" testCompile 'org.ini4j:ini4j:0.5.2' @@ -103,9 +105,19 @@ task internalClusterTest(type: RandomizedTestingTask, include '**/*IT.class' systemProperty 'es.set.netty.runtime.available.processors', 'false' } + check.dependsOn internalClusterTest internalClusterTest.mustRunAfter test +// add all sub-projects of the qa sub-project +gradle.projectsEvaluated { + project.subprojects + .find { it.path == project.path + ":qa" } + .subprojects + .findAll { it.path.startsWith(project.path + ":qa") } + .each { check.dependsOn it.check } +} + // also add an "alias" task to make typing on the command line easier task icTest { dependsOn internalClusterTest diff --git a/x-pack/plugin/ml/log-structure-finder/licenses/icu4j-62.1.jar.sha1 b/x-pack/plugin/ml/licenses/icu4j-62.1.jar.sha1 similarity index 100% rename from x-pack/plugin/ml/log-structure-finder/licenses/icu4j-62.1.jar.sha1 rename to x-pack/plugin/ml/licenses/icu4j-62.1.jar.sha1 diff --git a/x-pack/plugin/ml/log-structure-finder/licenses/icu4j-LICENSE.txt b/x-pack/plugin/ml/licenses/icu4j-LICENSE.txt similarity index 100% rename from x-pack/plugin/ml/log-structure-finder/licenses/icu4j-LICENSE.txt rename to x-pack/plugin/ml/licenses/icu4j-LICENSE.txt diff --git a/x-pack/plugin/ml/log-structure-finder/licenses/icu4j-NOTICE.txt b/x-pack/plugin/ml/licenses/icu4j-NOTICE.txt similarity index 100% rename from x-pack/plugin/ml/log-structure-finder/licenses/icu4j-NOTICE.txt rename to x-pack/plugin/ml/licenses/icu4j-NOTICE.txt diff --git a/x-pack/plugin/ml/log-structure-finder/build.gradle b/x-pack/plugin/ml/log-structure-finder/build.gradle deleted file mode 100644 index 9048a1c4686..00000000000 --- a/x-pack/plugin/ml/log-structure-finder/build.gradle +++ /dev/null @@ -1,36 +0,0 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - -apply plugin: 'elasticsearch.build' - -archivesBaseName = 'x-pack-log-structure-finder' - -description = 'Common code for reverse engineering log structure' - -dependencies { - compile "org.elasticsearch:elasticsearch-core:${version}" - compile "org.elasticsearch:elasticsearch-x-content:${version}" - compile project(':libs:grok') - compile "com.ibm.icu:icu4j:${versions.icu4j}" - compile "net.sf.supercsv:super-csv:${versions.supercsv}" - - testCompile "org.elasticsearch.test:framework:${version}" -} - -configurations { - testArtifacts.extendsFrom testRuntime -} -task testJar(type: Jar) { - appendix 'test' - from sourceSets.test.output -} -artifacts { - // normal es plugins do not publish the jar but we need to since users need it for Transport Clients and extensions - archives jar - testArtifacts testJar -} - -forbiddenApisMain { - // log-structure-finder does not depend on server, so cannot forbid server methods - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] -} - diff --git a/x-pack/plugin/ml/log-structure-finder/licenses/super-csv-2.4.0.jar.sha1 b/x-pack/plugin/ml/log-structure-finder/licenses/super-csv-2.4.0.jar.sha1 deleted file mode 100644 index a0b40213309..00000000000 --- a/x-pack/plugin/ml/log-structure-finder/licenses/super-csv-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -017f8708c929029dde48bc298deaf3c7ae2452d3 \ No newline at end of file diff --git a/x-pack/plugin/ml/log-structure-finder/licenses/super-csv-LICENSE.txt b/x-pack/plugin/ml/log-structure-finder/licenses/super-csv-LICENSE.txt deleted file mode 100644 index 9e0ad072b25..00000000000 --- a/x-pack/plugin/ml/log-structure-finder/licenses/super-csv-LICENSE.txt +++ /dev/null @@ -1,203 +0,0 @@ -/* - * Apache License - * Version 2.0, January 2004 - * http://www.apache.org/licenses/ - * - * TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - * - * 1. Definitions. - * - * "License" shall mean the terms and conditions for use, reproduction, - * and distribution as defined by Sections 1 through 9 of this document. - * - * "Licensor" shall mean the copyright owner or entity authorized by - * the copyright owner that is granting the License. - * - * "Legal Entity" shall mean the union of the acting entity and all - * other entities that control, are controlled by, or are under common - * control with that entity. For the purposes of this definition, - * "control" means (i) the power, direct or indirect, to cause the - * direction or management of such entity, whether by contract or - * otherwise, or (ii) ownership of fifty percent (50%) or more of the - * outstanding shares, or (iii) beneficial ownership of such entity. - * - * "You" (or "Your") shall mean an individual or Legal Entity - * exercising permissions granted by this License. - * - * "Source" form shall mean the preferred form for making modifications, - * including but not limited to software source code, documentation - * source, and configuration files. - * - * "Object" form shall mean any form resulting from mechanical - * transformation or translation of a Source form, including but - * not limited to compiled object code, generated documentation, - * and conversions to other media types. - * - * "Work" shall mean the work of authorship, whether in Source or - * Object form, made available under the License, as indicated by a - * copyright notice that is included in or attached to the work - * (an example is provided in the Appendix below). - * - * "Derivative Works" shall mean any work, whether in Source or Object - * form, that is based on (or derived from) the Work and for which the - * editorial revisions, annotations, elaborations, or other modifications - * represent, as a whole, an original work of authorship. For the purposes - * of this License, Derivative Works shall not include works that remain - * separable from, or merely link (or bind by name) to the interfaces of, - * the Work and Derivative Works thereof. - * - * "Contribution" shall mean any work of authorship, including - * the original version of the Work and any modifications or additions - * to that Work or Derivative Works thereof, that is intentionally - * submitted to Licensor for inclusion in the Work by the copyright owner - * or by an individual or Legal Entity authorized to submit on behalf of - * the copyright owner. For the purposes of this definition, "submitted" - * means any form of electronic, verbal, or written communication sent - * to the Licensor or its representatives, including but not limited to - * communication on electronic mailing lists, source code control systems, - * and issue tracking systems that are managed by, or on behalf of, the - * Licensor for the purpose of discussing and improving the Work, but - * excluding communication that is conspicuously marked or otherwise - * designated in writing by the copyright owner as "Not a Contribution." - * - * "Contributor" shall mean Licensor and any individual or Legal Entity - * on behalf of whom a Contribution has been received by Licensor and - * subsequently incorporated within the Work. - * - * 2. Grant of Copyright License. Subject to the terms and conditions of - * this License, each Contributor hereby grants to You a perpetual, - * worldwide, non-exclusive, no-charge, royalty-free, irrevocable - * copyright license to reproduce, prepare Derivative Works of, - * publicly display, publicly perform, sublicense, and distribute the - * Work and such Derivative Works in Source or Object form. - * - * 3. Grant of Patent License. Subject to the terms and conditions of - * this License, each Contributor hereby grants to You a perpetual, - * worldwide, non-exclusive, no-charge, royalty-free, irrevocable - * (except as stated in this section) patent license to make, have made, - * use, offer to sell, sell, import, and otherwise transfer the Work, - * where such license applies only to those patent claims licensable - * by such Contributor that are necessarily infringed by their - * Contribution(s) alone or by combination of their Contribution(s) - * with the Work to which such Contribution(s) was submitted. If You - * institute patent litigation against any entity (including a - * cross-claim or counterclaim in a lawsuit) alleging that the Work - * or a Contribution incorporated within the Work constitutes direct - * or contributory patent infringement, then any patent licenses - * granted to You under this License for that Work shall terminate - * as of the date such litigation is filed. - * - * 4. Redistribution. You may reproduce and distribute copies of the - * Work or Derivative Works thereof in any medium, with or without - * modifications, and in Source or Object form, provided that You - * meet the following conditions: - * - * (a) You must give any other recipients of the Work or - * Derivative Works a copy of this License; and - * - * (b) You must cause any modified files to carry prominent notices - * stating that You changed the files; and - * - * (c) You must retain, in the Source form of any Derivative Works - * that You distribute, all copyright, patent, trademark, and - * attribution notices from the Source form of the Work, - * excluding those notices that do not pertain to any part of - * the Derivative Works; and - * - * (d) If the Work includes a "NOTICE" text file as part of its - * distribution, then any Derivative Works that You distribute must - * include a readable copy of the attribution notices contained - * within such NOTICE file, excluding those notices that do not - * pertain to any part of the Derivative Works, in at least one - * of the following places: within a NOTICE text file distributed - * as part of the Derivative Works; within the Source form or - * documentation, if provided along with the Derivative Works; or, - * within a display generated by the Derivative Works, if and - * wherever such third-party notices normally appear. The contents - * of the NOTICE file are for informational purposes only and - * do not modify the License. You may add Your own attribution - * notices within Derivative Works that You distribute, alongside - * or as an addendum to the NOTICE text from the Work, provided - * that such additional attribution notices cannot be construed - * as modifying the License. - * - * You may add Your own copyright statement to Your modifications and - * may provide additional or different license terms and conditions - * for use, reproduction, or distribution of Your modifications, or - * for any such Derivative Works as a whole, provided Your use, - * reproduction, and distribution of the Work otherwise complies with - * the conditions stated in this License. - * - * 5. Submission of Contributions. Unless You explicitly state otherwise, - * any Contribution intentionally submitted for inclusion in the Work - * by You to the Licensor shall be under the terms and conditions of - * this License, without any additional terms or conditions. - * Notwithstanding the above, nothing herein shall supersede or modify - * the terms of any separate license agreement you may have executed - * with Licensor regarding such Contributions. - * - * 6. Trademarks. This License does not grant permission to use the trade - * names, trademarks, service marks, or product names of the Licensor, - * except as required for reasonable and customary use in describing the - * origin of the Work and reproducing the content of the NOTICE file. - * - * 7. Disclaimer of Warranty. Unless required by applicable law or - * agreed to in writing, Licensor provides the Work (and each - * Contributor provides its Contributions) on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - * implied, including, without limitation, any warranties or conditions - * of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - * PARTICULAR PURPOSE. You are solely responsible for determining the - * appropriateness of using or redistributing the Work and assume any - * risks associated with Your exercise of permissions under this License. - * - * 8. Limitation of Liability. In no event and under no legal theory, - * whether in tort (including negligence), contract, or otherwise, - * unless required by applicable law (such as deliberate and grossly - * negligent acts) or agreed to in writing, shall any Contributor be - * liable to You for damages, including any direct, indirect, special, - * incidental, or consequential damages of any character arising as a - * result of this License or out of the use or inability to use the - * Work (including but not limited to damages for loss of goodwill, - * work stoppage, computer failure or malfunction, or any and all - * other commercial damages or losses), even if such Contributor - * has been advised of the possibility of such damages. - * - * 9. Accepting Warranty or Additional Liability. While redistributing - * the Work or Derivative Works thereof, You may choose to offer, - * and charge a fee for, acceptance of support, warranty, indemnity, - * or other liability obligations and/or rights consistent with this - * License. However, in accepting such obligations, You may act only - * on Your own behalf and on Your sole responsibility, not on behalf - * of any other Contributor, and only if You agree to indemnify, - * defend, and hold each Contributor harmless for any liability - * incurred by, or claims asserted against, such Contributor by reason - * of your accepting any such warranty or additional liability. - * - * END OF TERMS AND CONDITIONS - * - * APPENDIX: How to apply the Apache License to your work. - * - * To apply the Apache License to your work, attach the following - * boilerplate notice, with the fields enclosed by brackets "[]" - * replaced with your own identifying information. (Don't include - * the brackets!) The text should be enclosed in the appropriate - * comment syntax for the file format. We also recommend that a - * file or class name and description of purpose be included on the - * same "printed page" as the copyright notice for easier - * identification within third-party archives. - * - * Copyright 2007 Kasper B. Graversen - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/CsvLogStructureFinderFactory.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/CsvLogStructureFinderFactory.java deleted file mode 100644 index cb9e6537252..00000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/CsvLogStructureFinderFactory.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -import org.supercsv.prefs.CsvPreference; - -import java.io.IOException; -import java.util.List; - -public class CsvLogStructureFinderFactory implements LogStructureFinderFactory { - - /** - * Rules are: - * - The file must be valid CSV - * - It must contain at least two complete records - * - There must be at least two fields per record (otherwise files with no commas could be treated as CSV!) - * - Every CSV record except the last must have the same number of fields - * The reason the last record is allowed to have fewer fields than the others is that - * it could have been truncated when the file was sampled. - */ - @Override - public boolean canCreateFromSample(List explanation, String sample) { - return SeparatedValuesLogStructureFinder.canCreateFromSample(explanation, sample, 2, CsvPreference.EXCEL_PREFERENCE, "CSV"); - } - - @Override - public LogStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) - throws IOException { - return SeparatedValuesLogStructureFinder.makeSeparatedValuesLogStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, - CsvPreference.EXCEL_PREFERENCE, false); - } -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinder.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinder.java deleted file mode 100644 index ea2e9efc5fb..00000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinder.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -import java.util.List; - -public interface LogStructureFinder { - - /** - * The (possibly multi-line) messages that the log sample was split into. - * @return A list of messages. - */ - List getSampleMessages(); - - /** - * Retrieve the structure of the log file used to instantiate the finder. - * @return The log file structure. - */ - LogStructure getStructure(); -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/PipeSeparatedValuesLogStructureFinderFactory.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/PipeSeparatedValuesLogStructureFinderFactory.java deleted file mode 100644 index 085599de847..00000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/PipeSeparatedValuesLogStructureFinderFactory.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -import org.supercsv.prefs.CsvPreference; - -import java.io.IOException; -import java.util.List; - -public class PipeSeparatedValuesLogStructureFinderFactory implements LogStructureFinderFactory { - - private static final CsvPreference PIPE_PREFERENCE = new CsvPreference.Builder('"', '|', "\n").build(); - - /** - * Rules are: - * - The file must be valid pipe (|) separated values - * - It must contain at least two complete records - * - There must be at least five fields per record (otherwise files with coincidental - * or no pipe characters could be treated as pipe separated) - * - Every pipe separated value record except the last must have the same number of fields - * The reason the last record is allowed to have fewer fields than the others is that - * it could have been truncated when the file was sampled. - */ - @Override - public boolean canCreateFromSample(List explanation, String sample) { - return SeparatedValuesLogStructureFinder.canCreateFromSample(explanation, sample, 5, PIPE_PREFERENCE, "pipe separated values"); - } - - @Override - public LogStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) - throws IOException { - return SeparatedValuesLogStructureFinder.makeSeparatedValuesLogStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, - PIPE_PREFERENCE, true); - } -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/SemiColonSeparatedValuesLogStructureFinderFactory.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/SemiColonSeparatedValuesLogStructureFinderFactory.java deleted file mode 100644 index e0e80fa7465..00000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/SemiColonSeparatedValuesLogStructureFinderFactory.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -import org.supercsv.prefs.CsvPreference; - -import java.io.IOException; -import java.util.List; - -public class SemiColonSeparatedValuesLogStructureFinderFactory implements LogStructureFinderFactory { - - /** - * Rules are: - * - The file must be valid semi-colon separated values - * - It must contain at least two complete records - * - There must be at least four fields per record (otherwise files with coincidental - * or no semi-colons could be treated as semi-colon separated) - * - Every semi-colon separated value record except the last must have the same number of fields - * The reason the last record is allowed to have fewer fields than the others is that - * it could have been truncated when the file was sampled. - */ - @Override - public boolean canCreateFromSample(List explanation, String sample) { - return SeparatedValuesLogStructureFinder.canCreateFromSample(explanation, sample, 4, - CsvPreference.EXCEL_NORTH_EUROPE_PREFERENCE, "semi-colon separated values"); - } - - @Override - public LogStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) - throws IOException { - return SeparatedValuesLogStructureFinder.makeSeparatedValuesLogStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, - CsvPreference.EXCEL_NORTH_EUROPE_PREFERENCE, false); - } -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/TsvLogStructureFinderFactory.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/TsvLogStructureFinderFactory.java deleted file mode 100644 index 733b32346fb..00000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/TsvLogStructureFinderFactory.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -import org.supercsv.prefs.CsvPreference; - -import java.io.IOException; -import java.util.List; - -public class TsvLogStructureFinderFactory implements LogStructureFinderFactory { - - /** - * Rules are: - * - The file must be valid TSV - * - It must contain at least two complete records - * - There must be at least two fields per record (otherwise files with no tabs could be treated as TSV!) - * - Every TSV record except the last must have the same number of fields - * The reason the last record is allowed to have fewer fields than the others is that - * it could have been truncated when the file was sampled. - */ - @Override - public boolean canCreateFromSample(List explanation, String sample) { - return SeparatedValuesLogStructureFinder.canCreateFromSample(explanation, sample, 2, CsvPreference.TAB_PREFERENCE, "TSV"); - } - - @Override - public LogStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) - throws IOException { - return SeparatedValuesLogStructureFinder.makeSeparatedValuesLogStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, - CsvPreference.TAB_PREFERENCE, false); - } -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/CsvLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/CsvLogStructureFinderFactoryTests.java deleted file mode 100644 index f53ee008d69..00000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/CsvLogStructureFinderFactoryTests.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -public class CsvLogStructureFinderFactoryTests extends LogStructureTestCase { - - private LogStructureFinderFactory factory = new CsvLogStructureFinderFactory(); - - // No need to check JSON or XML because they come earlier in the order we check formats - - public void testCanCreateFromSampleGivenCsv() { - - assertTrue(factory.canCreateFromSample(explanation, CSV_SAMPLE)); - } - - public void testCanCreateFromSampleGivenTsv() { - - assertFalse(factory.canCreateFromSample(explanation, TSV_SAMPLE)); - } - - public void testCanCreateFromSampleGivenSemiColonSeparatedValues() { - - assertFalse(factory.canCreateFromSample(explanation, SEMI_COLON_SEPARATED_VALUES_SAMPLE)); - } - - public void testCanCreateFromSampleGivenPipeSeparatedValues() { - - assertFalse(factory.canCreateFromSample(explanation, PIPE_SEPARATED_VALUES_SAMPLE)); - } - - public void testCanCreateFromSampleGivenText() { - - assertFalse(factory.canCreateFromSample(explanation, TEXT_SAMPLE)); - } -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/PipeSeparatedValuesLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/PipeSeparatedValuesLogStructureFinderFactoryTests.java deleted file mode 100644 index 3fd2fb7840a..00000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/PipeSeparatedValuesLogStructureFinderFactoryTests.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -public class PipeSeparatedValuesLogStructureFinderFactoryTests extends LogStructureTestCase { - - private LogStructureFinderFactory factory = new PipeSeparatedValuesLogStructureFinderFactory(); - - // No need to check JSON, XML, CSV, TSV or semi-colon separated values because they come earlier in the order we check formats - - public void testCanCreateFromSampleGivenPipeSeparatedValues() { - - assertTrue(factory.canCreateFromSample(explanation, PIPE_SEPARATED_VALUES_SAMPLE)); - } - - public void testCanCreateFromSampleGivenText() { - - assertFalse(factory.canCreateFromSample(explanation, TEXT_SAMPLE)); - } -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/SemiColonSeparatedValuesLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/SemiColonSeparatedValuesLogStructureFinderFactoryTests.java deleted file mode 100644 index 64dad7e078c..00000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/SemiColonSeparatedValuesLogStructureFinderFactoryTests.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -public class SemiColonSeparatedValuesLogStructureFinderFactoryTests extends LogStructureTestCase { - - private LogStructureFinderFactory factory = new SemiColonSeparatedValuesLogStructureFinderFactory(); - - // No need to check JSON, XML, CSV or TSV because they come earlier in the order we check formats - - public void testCanCreateFromSampleGivenSemiColonSeparatedValues() { - - assertTrue(factory.canCreateFromSample(explanation, SEMI_COLON_SEPARATED_VALUES_SAMPLE)); - } - - public void testCanCreateFromSampleGivenPipeSeparatedValues() { - - assertFalse(factory.canCreateFromSample(explanation, PIPE_SEPARATED_VALUES_SAMPLE)); - } - - public void testCanCreateFromSampleGivenText() { - - assertFalse(factory.canCreateFromSample(explanation, TEXT_SAMPLE)); - } -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TsvLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TsvLogStructureFinderFactoryTests.java deleted file mode 100644 index 1c8acc14d32..00000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TsvLogStructureFinderFactoryTests.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -public class TsvLogStructureFinderFactoryTests extends LogStructureTestCase { - - private LogStructureFinderFactory factory = new TsvLogStructureFinderFactory(); - - // No need to check JSON, XML or CSV because they come earlier in the order we check formats - - public void testCanCreateFromSampleGivenTsv() { - - assertTrue(factory.canCreateFromSample(explanation, TSV_SAMPLE)); - } - - public void testCanCreateFromSampleGivenSemiColonSeparatedValues() { - - assertFalse(factory.canCreateFromSample(explanation, SEMI_COLON_SEPARATED_VALUES_SAMPLE)); - } - - public void testCanCreateFromSampleGivenPipeSeparatedValues() { - - assertFalse(factory.canCreateFromSample(explanation, PIPE_SEPARATED_VALUES_SAMPLE)); - } - - public void testCanCreateFromSampleGivenText() { - - assertFalse(factory.canCreateFromSample(explanation, TEXT_SAMPLE)); - } -} diff --git a/x-pack/qa/ml-basic-multi-node/build.gradle b/x-pack/plugin/ml/qa/basic-multi-node/build.gradle similarity index 85% rename from x-pack/qa/ml-basic-multi-node/build.gradle rename to x-pack/plugin/ml/qa/basic-multi-node/build.gradle index 3df77aadccb..cc5a2cd68dd 100644 --- a/x-pack/qa/ml-basic-multi-node/build.gradle +++ b/x-pack/plugin/ml/qa/basic-multi-node/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('ml'), configuration: 'runtime') } diff --git a/x-pack/plugin/ml/qa/basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java b/x-pack/plugin/ml/qa/basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java new file mode 100644 index 00000000000..6e22e5b3f18 --- /dev/null +++ b/x-pack/plugin/ml/qa/basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java @@ -0,0 +1,322 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.apache.http.entity.ContentType; +import org.apache.http.nio.entity.NStringEntity; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.ml.MachineLearning; + +import java.io.IOException; +import java.net.URLEncoder; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class MlBasicMultiNodeIT extends ESRestTestCase { + + public void testMachineLearningInstalled() throws Exception { + Response response = client().performRequest(new Request("GET", "/_xpack")); + Map features = (Map) entityAsMap(response).get("features"); + Map ml = (Map) features.get("ml"); + assertNotNull(ml); + assertTrue((Boolean) ml.get("available")); + assertTrue((Boolean) ml.get("enabled")); + } + + public void testInvalidJob() throws Exception { + // The job name is invalid because it contains a space + String jobId = "invalid job"; + ResponseException e = expectThrows(ResponseException.class, () -> createFarequoteJob(jobId)); + assertTrue(e.getMessage(), e.getMessage().contains("can contain lowercase alphanumeric (a-z and 0-9), hyphens or underscores")); + // If validation of the invalid job is not done until after transportation to the master node then the + // root cause gets reported as a remote_transport_exception. The code in PubJobAction is supposed to + // validate before transportation to avoid this. This test must be done in a multi-node cluster to have + // a chance of catching a problem, hence it is here rather than in the single node integration tests. + assertFalse(e.getMessage(), e.getMessage().contains("remote_transport_exception")); + } + + public void testMiniFarequote() throws Exception { + String jobId = "mini-farequote-job"; + createFarequoteJob(jobId); + + Response openResponse = client().performRequest( + new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open")); + assertEquals(Collections.singletonMap("opened", true), entityAsMap(openResponse)); + + Request addData = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data"); + addData.setEntity(new NStringEntity( + "{\"airline\":\"AAL\",\"responsetime\":\"132.2046\",\"sourcetype\":\"farequote\",\"time\":\"1403481600\"}\n" + + "{\"airline\":\"JZA\",\"responsetime\":\"990.4628\",\"sourcetype\":\"farequote\",\"time\":\"1403481700\"}", + randomFrom(ContentType.APPLICATION_JSON, ContentType.create("application/x-ndjson")))); + Response addDataResponse = client().performRequest(addData); + assertEquals(202, addDataResponse.getStatusLine().getStatusCode()); + Map responseBody = entityAsMap(addDataResponse); + assertEquals(2, responseBody.get("processed_record_count")); + assertEquals(4, responseBody.get("processed_field_count")); + assertEquals(177, responseBody.get("input_bytes")); + assertEquals(6, responseBody.get("input_field_count")); + assertEquals(0, responseBody.get("invalid_date_count")); + assertEquals(0, responseBody.get("missing_field_count")); + assertEquals(0, responseBody.get("out_of_order_timestamp_count")); + assertEquals(0, responseBody.get("bucket_count")); + assertEquals(1403481600000L, responseBody.get("earliest_record_timestamp")); + assertEquals(1403481700000L, responseBody.get("latest_record_timestamp")); + + Response flushResponse = client().performRequest( + new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_flush")); + assertFlushResponse(flushResponse, true, 1403481600000L); + + Request closeRequest = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close"); + closeRequest.addParameter("timeout", "20s"); + Response closeResponse = client().performRequest(closeRequest); + assertEquals(Collections.singletonMap("closed", true), entityAsMap(closeResponse)); + + Response statsResponse = client().performRequest( + new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + Map dataCountsDoc = (Map) + ((Map)((List) entityAsMap(statsResponse).get("jobs")).get(0)).get("data_counts"); + assertEquals(2, dataCountsDoc.get("processed_record_count")); + assertEquals(4, dataCountsDoc.get("processed_field_count")); + assertEquals(177, dataCountsDoc.get("input_bytes")); + assertEquals(6, dataCountsDoc.get("input_field_count")); + assertEquals(0, dataCountsDoc.get("invalid_date_count")); + assertEquals(0, dataCountsDoc.get("missing_field_count")); + assertEquals(0, dataCountsDoc.get("out_of_order_timestamp_count")); + assertEquals(0, dataCountsDoc.get("bucket_count")); + assertEquals(1403481600000L, dataCountsDoc.get("earliest_record_timestamp")); + assertEquals(1403481700000L, dataCountsDoc.get("latest_record_timestamp")); + + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); + } + + public void testMiniFarequoteWithDatafeeder() throws Exception { + Request createAirlineDataRequest = new Request("PUT", "/airline-data"); + createAirlineDataRequest.setJsonEntity("{" + + " \"mappings\": {" + + " \"response\": {" + + " \"properties\": {" + + " \"time\": { \"type\":\"date\"}," + + " \"airline\": { \"type\":\"keyword\"}," + + " \"responsetime\": { \"type\":\"float\"}" + + " }" + + " }" + + " }" + + "}"); + client().performRequest(createAirlineDataRequest); + Request airlineData1 = new Request("PUT", "/airline-data/response/1"); + airlineData1.setJsonEntity("{\"time\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}"); + client().performRequest(airlineData1); + Request airlineData2 = new Request("PUT", "/airline-data/response/2"); + airlineData2.setJsonEntity("{\"time\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}"); + client().performRequest(airlineData2); + + // Ensure all data is searchable + client().performRequest(new Request("POST", "/_refresh")); + + String jobId = "mini-farequote-with-data-feeder-job"; + createFarequoteJob(jobId); + String datafeedId = "bar"; + createDatafeed(datafeedId, jobId); + + Response openResponse = client().performRequest( + new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open")); + assertEquals(Collections.singletonMap("opened", true), entityAsMap(openResponse)); + + Request startRequest = new Request("POST", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start"); + startRequest.addParameter("start", "0"); + Response startResponse = client().performRequest(startRequest); + assertEquals(Collections.singletonMap("started", true), entityAsMap(startResponse)); + + assertBusy(() -> { + try { + Response statsResponse = client().performRequest( + new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + Map dataCountsDoc = (Map) + ((Map)((List) entityAsMap(statsResponse).get("jobs")).get(0)).get("data_counts"); + assertEquals(2, dataCountsDoc.get("input_record_count")); + assertEquals(2, dataCountsDoc.get("processed_record_count")); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + + Response stopResponse = client().performRequest( + new Request("POST", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_stop")); + assertEquals(Collections.singletonMap("stopped", true), entityAsMap(stopResponse)); + + Request closeRequest = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close"); + closeRequest.addParameter("timeout", "20s"); + assertEquals(Collections.singletonMap("closed", true), + entityAsMap(client().performRequest(closeRequest))); + + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId)); + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); + } + + public void testMiniFarequoteReopen() throws Exception { + String jobId = "mini-farequote-reopen"; + createFarequoteJob(jobId); + + Response openResponse = client().performRequest( + new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open")); + assertEquals(Collections.singletonMap("opened", true), entityAsMap(openResponse)); + + Request addDataRequest = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data"); + addDataRequest.setEntity(new NStringEntity( + "{\"airline\":\"AAL\",\"responsetime\":\"132.2046\",\"sourcetype\":\"farequote\",\"time\":\"1403481600\"}\n" + + "{\"airline\":\"JZA\",\"responsetime\":\"990.4628\",\"sourcetype\":\"farequote\",\"time\":\"1403481700\"}\n" + + "{\"airline\":\"JBU\",\"responsetime\":\"877.5927\",\"sourcetype\":\"farequote\",\"time\":\"1403481800\"}\n" + + "{\"airline\":\"KLM\",\"responsetime\":\"1355.4812\",\"sourcetype\":\"farequote\",\"time\":\"1403481900\"}\n" + + "{\"airline\":\"NKS\",\"responsetime\":\"9991.3981\",\"sourcetype\":\"farequote\",\"time\":\"1403482000\"}", + randomFrom(ContentType.APPLICATION_JSON, ContentType.create("application/x-ndjson")))); + Response addDataResponse = client().performRequest(addDataRequest); + assertEquals(202, addDataResponse.getStatusLine().getStatusCode()); + Map responseBody = entityAsMap(addDataResponse); + assertEquals(5, responseBody.get("processed_record_count")); + assertEquals(10, responseBody.get("processed_field_count")); + assertEquals(446, responseBody.get("input_bytes")); + assertEquals(15, responseBody.get("input_field_count")); + assertEquals(0, responseBody.get("invalid_date_count")); + assertEquals(0, responseBody.get("missing_field_count")); + assertEquals(0, responseBody.get("out_of_order_timestamp_count")); + assertEquals(0, responseBody.get("bucket_count")); + assertEquals(1403481600000L, responseBody.get("earliest_record_timestamp")); + assertEquals(1403482000000L, responseBody.get("latest_record_timestamp")); + + Response flushResponse = client().performRequest( + new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_flush")); + assertFlushResponse(flushResponse, true, 1403481600000L); + + Request closeRequest = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close"); + closeRequest.addParameter("timeout", "20s"); + assertEquals(Collections.singletonMap("closed", true), + entityAsMap(client().performRequest(closeRequest))); + + Request statsRequest = new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); + client().performRequest(statsRequest); + + Request openRequest = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open"); + openRequest.addParameter("timeout", "20s"); + Response openResponse2 = client().performRequest(openRequest); + assertEquals(Collections.singletonMap("opened", true), entityAsMap(openResponse2)); + + // feed some more data points + Request addDataRequest2 = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data"); + addDataRequest2.setEntity(new NStringEntity( + "{\"airline\":\"AAL\",\"responsetime\":\"136.2361\",\"sourcetype\":\"farequote\",\"time\":\"1407081600\"}\n" + + "{\"airline\":\"VRD\",\"responsetime\":\"282.9847\",\"sourcetype\":\"farequote\",\"time\":\"1407081700\"}\n" + + "{\"airline\":\"JAL\",\"responsetime\":\"493.0338\",\"sourcetype\":\"farequote\",\"time\":\"1407081800\"}\n" + + "{\"airline\":\"UAL\",\"responsetime\":\"8.4275\",\"sourcetype\":\"farequote\",\"time\":\"1407081900\"}\n" + + "{\"airline\":\"FFT\",\"responsetime\":\"221.8693\",\"sourcetype\":\"farequote\",\"time\":\"1407082000\"}", + randomFrom(ContentType.APPLICATION_JSON, ContentType.create("application/x-ndjson")))); + Response addDataResponse2 = client().performRequest(addDataRequest2); + assertEquals(202, addDataResponse2.getStatusLine().getStatusCode()); + Map responseBody2 = entityAsMap(addDataResponse2); + assertEquals(5, responseBody2.get("processed_record_count")); + assertEquals(10, responseBody2.get("processed_field_count")); + assertEquals(442, responseBody2.get("input_bytes")); + assertEquals(15, responseBody2.get("input_field_count")); + assertEquals(0, responseBody2.get("invalid_date_count")); + assertEquals(0, responseBody2.get("missing_field_count")); + assertEquals(0, responseBody2.get("out_of_order_timestamp_count")); + assertEquals(1000, responseBody2.get("bucket_count")); + + // unintuitive: should return the earliest record timestamp of this feed??? + assertEquals(null, responseBody2.get("earliest_record_timestamp")); + assertEquals(1407082000000L, responseBody2.get("latest_record_timestamp")); + + assertEquals(Collections.singletonMap("closed", true), + entityAsMap(client().performRequest(closeRequest))); + + // counts should be summed up + Response statsResponse = client().performRequest(statsRequest); + + Map dataCountsDoc = (Map) + ((Map)((List) entityAsMap(statsResponse).get("jobs")).get(0)).get("data_counts"); + assertEquals(10, dataCountsDoc.get("processed_record_count")); + assertEquals(20, dataCountsDoc.get("processed_field_count")); + assertEquals(888, dataCountsDoc.get("input_bytes")); + assertEquals(30, dataCountsDoc.get("input_field_count")); + assertEquals(0, dataCountsDoc.get("invalid_date_count")); + assertEquals(0, dataCountsDoc.get("missing_field_count")); + assertEquals(0, dataCountsDoc.get("out_of_order_timestamp_count")); + assertEquals(1000, dataCountsDoc.get("bucket_count")); + assertEquals(1403481600000L, dataCountsDoc.get("earliest_record_timestamp")); + assertEquals(1407082000000L, dataCountsDoc.get("latest_record_timestamp")); + + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); + } + + private Response createDatafeed(String datafeedId, String jobId) throws Exception { + XContentBuilder xContentBuilder = jsonBuilder(); + xContentBuilder.startObject(); + xContentBuilder.field("job_id", jobId); + xContentBuilder.array("indexes", "airline-data"); + xContentBuilder.array("types", "response"); + xContentBuilder.field("_source", true); + xContentBuilder.endObject(); + Request request = new Request("PUT", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId); + request.setJsonEntity(Strings.toString(xContentBuilder)); + return client().performRequest(request); + } + + private Response createFarequoteJob(String jobId) throws Exception { + XContentBuilder xContentBuilder = jsonBuilder(); + xContentBuilder.startObject(); + { + xContentBuilder.field("job_id", jobId); + xContentBuilder.field("description", "Analysis of response time by airline"); + + xContentBuilder.startObject("analysis_config"); + { + xContentBuilder.field("bucket_span", "3600s"); + xContentBuilder.startArray("detectors"); + { + xContentBuilder.startObject(); + { + xContentBuilder.field("function", "metric"); + xContentBuilder.field("field_name", "responsetime"); + xContentBuilder.field("by_field_name", "airline"); + } + xContentBuilder.endObject(); + } + xContentBuilder.endArray(); + } + xContentBuilder.endObject(); + + xContentBuilder.startObject("data_description"); + { + xContentBuilder.field("format", "xcontent"); + xContentBuilder.field("time_field", "time"); + xContentBuilder.field("time_format", "epoch"); + } + xContentBuilder.endObject(); + } + xContentBuilder.endObject(); + + Request request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + URLEncoder.encode(jobId, "UTF-8")); + request.setJsonEntity(Strings.toString(xContentBuilder)); + return client().performRequest(request); + } + + private static void assertFlushResponse(Response response, boolean expectedFlushed, long expectedLastFinalizedBucketEnd) + throws IOException { + Map asMap = entityAsMap(response); + assertThat(asMap.size(), equalTo(2)); + assertThat(asMap.get("flushed"), is(true)); + assertThat(asMap.get("last_finalized_bucket_end"), equalTo(expectedLastFinalizedBucketEnd)); + } +} diff --git a/x-pack/plugin/ml/qa/build.gradle b/x-pack/plugin/ml/qa/build.gradle new file mode 100644 index 00000000000..5b3dcd7c850 --- /dev/null +++ b/x-pack/plugin/ml/qa/build.gradle @@ -0,0 +1,30 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +subprojects { + // HACK: please fix this + // we want to add the rest api specs for xpack to qa tests, but we + // need to wait until after the project is evaluated to only apply + // to those that rest tests. this used to be done automatically + // when xpack was a plugin, but now there is no place with xpack as a module. + // instead, we should package these and make them easy to use for rest tests, + // but currently, they must be copied into the resources of the test runner. + project.tasks.withType(RestIntegTestTask) { + File xpackResources = new File(xpackProject('plugin').projectDir, 'src/test/resources') + project.copyRestSpec.from(xpackResources) { + include 'rest-api-spec/api/**' + } + } +} + +gradle.projectsEvaluated { + subprojects { + Task assemble = project.tasks.findByName('assemble') + if (assemble) { + assemble.enabled = false + } + Task dependenciesInfo = project.tasks.findByName('dependenciesInfo') + if (dependenciesInfo) { + dependenciesInfo.enabled = false + } + } +} diff --git a/x-pack/qa/ml-disabled/build.gradle b/x-pack/plugin/ml/qa/disabled/build.gradle similarity index 80% rename from x-pack/qa/ml-disabled/build.gradle rename to x-pack/plugin/ml/qa/disabled/build.gradle index e914def3507..a24036651d5 100644 --- a/x-pack/qa/ml-disabled/build.gradle +++ b/x-pack/plugin/ml/qa/disabled/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('ml'), configuration: 'runtime') } diff --git a/x-pack/plugin/ml/qa/disabled/src/test/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java b/x-pack/plugin/ml/qa/disabled/src/test/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java new file mode 100644 index 00000000000..170b4f14486 --- /dev/null +++ b/x-pack/plugin/ml/qa/disabled/src/test/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.ml.MachineLearning; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; + +public class MlPluginDisabledIT extends ESRestTestCase { + + /** + * Check that when the ml plugin is disabled, you cannot create a job as the + * rest handler is not registered + */ + public void testActionsFail() throws Exception { + XContentBuilder xContentBuilder = jsonBuilder(); + xContentBuilder.startObject(); + { + xContentBuilder.field("actions-fail-job", "foo"); + xContentBuilder.field("description", "Analysis of response time by airline"); + + xContentBuilder.startObject("analysis_config"); + { + xContentBuilder.field("bucket_span", "3600s"); + xContentBuilder.startArray("detectors"); + { + xContentBuilder.startObject(); + { + xContentBuilder.field("function", "metric"); + xContentBuilder.field("field_name", "responsetime"); + xContentBuilder.field("by_field_name", "airline"); + } + xContentBuilder.endObject(); + } + xContentBuilder.endArray(); + } + xContentBuilder.endObject(); + + xContentBuilder.startObject("data_description"); + { + xContentBuilder.field("format", "xcontent"); + xContentBuilder.field("time_field", "time"); + xContentBuilder.field("time_format", "epoch"); + } + xContentBuilder.endObject(); + } + xContentBuilder.endObject(); + + Request request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/foo"); + request.setJsonEntity(Strings.toString(xContentBuilder)); + ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(exception.getMessage(), containsString("no handler found for uri [/_xpack/ml/anomaly_detectors/foo] and method [PUT]")); + } +} diff --git a/x-pack/qa/smoke-test-ml-with-security/build.gradle b/x-pack/plugin/ml/qa/ml-with-security/build.gradle similarity index 95% rename from x-pack/qa/smoke-test-ml-with-security/build.gradle rename to x-pack/plugin/ml/qa/ml-with-security/build.gradle index 84c23add254..abfed3fd878 100644 --- a/x-pack/qa/smoke-test-ml-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/ml-with-security/build.gradle @@ -2,7 +2,8 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackProject('plugin').path, configuration: 'testArtifacts') } @@ -90,7 +91,9 @@ integTestRunner { 'ml/validate/Test invalid job config', 'ml/validate/Test job config is invalid because model snapshot id set', 'ml/validate/Test job config that is invalid only because of the job ID', - 'ml/validate_detector/Test invalid detector' + 'ml/validate_detector/Test invalid detector', + 'ml/delete_forecast/Test delete on _all forecasts not allow no forecasts', + 'ml/delete_forecast/Test delete forecast on missing forecast' ].join(',') } diff --git a/x-pack/qa/smoke-test-ml-with-security/roles.yml b/x-pack/plugin/ml/qa/ml-with-security/roles.yml similarity index 100% rename from x-pack/qa/smoke-test-ml-with-security/roles.yml rename to x-pack/plugin/ml/qa/ml-with-security/roles.yml diff --git a/x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityIT.java b/x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityIT.java similarity index 100% rename from x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityIT.java rename to x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityIT.java diff --git a/x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityInsufficientRoleIT.java b/x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityInsufficientRoleIT.java similarity index 100% rename from x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityInsufficientRoleIT.java rename to x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityInsufficientRoleIT.java diff --git a/x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java b/x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java similarity index 87% rename from x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java rename to x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java index b103d30f282..9e31ddb131c 100644 --- a/x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java +++ b/x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java @@ -31,10 +31,13 @@ public class MlWithSecurityUserRoleIT extends MlWithSecurityIT { super.test(); // We should have got here if and only if the only ML endpoints in the test were GETs + // or the find_file_structure API, which is also available to the machine_learning_user + // role for (ExecutableSection section : testCandidate.getTestSection().getExecutableSections()) { if (section instanceof DoSection) { if (((DoSection) section).getApiCallSection().getApi().startsWith("xpack.ml.") && - ((DoSection) section).getApiCallSection().getApi().startsWith("xpack.ml.get_") == false) { + ((DoSection) section).getApiCallSection().getApi().startsWith("xpack.ml.get_") == false && + ((DoSection) section).getApiCallSection().getApi().equals("xpack.ml.find_file_structure") == false) { fail("should have failed because of missing role"); } } diff --git a/x-pack/qa/ml-native-multi-node-tests/build.gradle b/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle similarity index 95% rename from x-pack/qa/ml-native-multi-node-tests/build.gradle rename to x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle index b1893b20c46..0c4304b123e 100644 --- a/x-pack/qa/ml-native-multi-node-tests/build.gradle +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle @@ -4,7 +4,8 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('ml'), configuration: 'runtime') testCompile project(path: xpackModule('ml'), configuration: 'testArtifacts') diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CloseJobsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CloseJobsIT.java new file mode 100644 index 00000000000..95ec9728842 --- /dev/null +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CloseJobsIT.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.ml.MachineLearning; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.equalTo; + +public class CloseJobsIT extends ESRestTestCase { + + private static final String BASIC_AUTH_VALUE = basicAuthHeaderValue("x_pack_rest_user", + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); + + @Override + protected Settings restClientSettings() { + return Settings.builder().put(super.restClientSettings()).put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE).build(); + } + + public void testCloseJobsAcceptsOptionsFromPayload() throws Exception { + + Request request = new Request("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + "job-that-doesnot-exist*" + "/_close"); + request.setJsonEntity("{\"allow_no_jobs\":false}"); + request.setOptions(RequestOptions.DEFAULT); + ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(404)); + + request.setJsonEntity("{\"allow_no_jobs\":true}"); + Response response = client().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + String responseAsString = responseEntityToString(response); + assertEquals(responseAsString, "{\"closed\":true}"); + } + + private static String responseEntityToString(Response response) throws IOException { + try (BufferedReader reader = new BufferedReader(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8))) { + return reader.lines().collect(Collectors.joining("\n")); + } + } +} diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java similarity index 56% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java index 54d8090a7a4..7a93ecdd9e1 100644 --- a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.ml.integration; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHeader; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -22,10 +22,7 @@ import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; import org.junit.After; import org.junit.Before; -import java.io.BufferedReader; import java.io.IOException; -import java.io.InputStreamReader; -import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Collections; import java.util.Date; @@ -36,6 +33,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; public class DatafeedJobsRestIT extends ESRestTestCase { @@ -57,26 +55,24 @@ public class DatafeedJobsRestIT extends ESRestTestCase { } private void setupDataAccessRole(String index) throws IOException { - String json = "{" + Request request = new Request("PUT", "/_xpack/security/role/test_data_access"); + request.setJsonEntity("{" + " \"indices\" : [" + " { \"names\": [\"" + index + "\"], \"privileges\": [\"read\"] }" + " ]" - + "}"; - - client().performRequest("put", "_xpack/security/role/test_data_access", Collections.emptyMap(), - new StringEntity(json, ContentType.APPLICATION_JSON)); + + "}"); + client().performRequest(request); } private void setupUser(String user, List roles) throws IOException { String password = new String(SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING.getChars()); - String json = "{" + Request request = new Request("PUT", "/_xpack/security/user/" + user); + request.setJsonEntity("{" + " \"password\" : \"" + password + "\"," + " \"roles\" : [ " + roles.stream().map(unquoted -> "\"" + unquoted + "\"").collect(Collectors.joining(", ")) + " ]" - + "}"; - - client().performRequest("put", "_xpack/security/user/" + user, Collections.emptyMap(), - new StringEntity(json, ContentType.APPLICATION_JSON)); + + "}"); + client().performRequest(request); } @Before @@ -92,7 +88,10 @@ public class DatafeedJobsRestIT extends ESRestTestCase { } private void addAirlineData() throws IOException { - String mappings = "{" + StringBuilder bulk = new StringBuilder(); + + Request createEmptyAirlineDataRequest = new Request("PUT", "/airline-data-empty"); + createEmptyAirlineDataRequest.setJsonEntity("{" + " \"mappings\": {" + " \"response\": {" + " \"properties\": {" @@ -102,12 +101,12 @@ public class DatafeedJobsRestIT extends ESRestTestCase { + " }" + " }" + " }" - + "}"; - client().performRequest("put", "airline-data-empty", Collections.emptyMap(), - new StringEntity(mappings, ContentType.APPLICATION_JSON)); + + "}"); + client().performRequest(createEmptyAirlineDataRequest); // Create index with source = enabled, doc_values = enabled, stored = false + multi-field - mappings = "{" + Request createAirlineDataRequest = new Request("PUT", "/airline-data"); + createAirlineDataRequest.setJsonEntity("{" + " \"mappings\": {" + " \"response\": {" + " \"properties\": {" @@ -123,18 +122,17 @@ public class DatafeedJobsRestIT extends ESRestTestCase { + " }" + " }" + " }" - + "}"; - client().performRequest("put", "airline-data", Collections.emptyMap(), new StringEntity(mappings, ContentType.APPLICATION_JSON)); + + "}"); + client().performRequest(createAirlineDataRequest); - client().performRequest("put", "airline-data/response/1", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data/response/2", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}", - ContentType.APPLICATION_JSON)); + bulk.append("{\"index\": {\"_index\": \"airline-data\", \"_type\": \"response\", \"_id\": 1}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}\n"); + bulk.append("{\"index\": {\"_index\": \"airline-data\", \"_type\": \"response\", \"_id\": 2}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}\n"); // Create index with source = enabled, doc_values = disabled (except time), stored = false - mappings = "{" + Request createAirlineDataDisabledDocValues = new Request("PUT", "/airline-data-disabled-doc-values"); + createAirlineDataDisabledDocValues.setJsonEntity("{" + " \"mappings\": {" + " \"response\": {" + " \"properties\": {" @@ -144,19 +142,17 @@ public class DatafeedJobsRestIT extends ESRestTestCase { + " }" + " }" + " }" - + "}"; - client().performRequest("put", "airline-data-disabled-doc-values", Collections.emptyMap(), - new StringEntity(mappings, ContentType.APPLICATION_JSON)); + + "}"); + client().performRequest(createAirlineDataDisabledDocValues); - client().performRequest("put", "airline-data-disabled-doc-values/response/1", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data-disabled-doc-values/response/2", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}", - ContentType.APPLICATION_JSON)); + bulk.append("{\"index\": {\"_index\": \"airline-data-disabled-doc-values\", \"_type\": \"response\", \"_id\": 1}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}\n"); + bulk.append("{\"index\": {\"_index\": \"airline-data-disabled-doc-values\", \"_type\": \"response\", \"_id\": 2}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}\n"); // Create index with source = disabled, doc_values = enabled (except time), stored = true - mappings = "{" + Request createAirlineDataDisabledSource = new Request("PUT", "/airline-data-disabled-source"); + createAirlineDataDisabledSource.setJsonEntity("{" + " \"mappings\": {" + " \"response\": {" + " \"_source\":{\"enabled\":false}," @@ -167,19 +163,16 @@ public class DatafeedJobsRestIT extends ESRestTestCase { + " }" + " }" + " }" - + "}"; - client().performRequest("put", "airline-data-disabled-source", Collections.emptyMap(), - new StringEntity(mappings, ContentType.APPLICATION_JSON)); + + "}"); - client().performRequest("put", "airline-data-disabled-source/response/1", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data-disabled-source/response/2", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}", - ContentType.APPLICATION_JSON)); + bulk.append("{\"index\": {\"_index\": \"airline-data-disabled-source\", \"_type\": \"response\", \"_id\": 1}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}\n"); + bulk.append("{\"index\": {\"_index\": \"airline-data-disabled-source\", \"_type\": \"response\", \"_id\": 2}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}\n"); // Create index with nested documents - mappings = "{" + Request createAirlineDataNested = new Request("PUT", "/nested-data"); + createAirlineDataNested.setJsonEntity("{" + " \"mappings\": {" + " \"response\": {" + " \"properties\": {" @@ -187,18 +180,17 @@ public class DatafeedJobsRestIT extends ESRestTestCase { + " }" + " }" + " }" - + "}"; - client().performRequest("put", "nested-data", Collections.emptyMap(), new StringEntity(mappings, ContentType.APPLICATION_JSON)); + + "}"); + client().performRequest(createAirlineDataNested); - client().performRequest("put", "nested-data/response/1", Collections.emptyMap(), - new StringEntity("{\"time\":\"2016-06-01T00:00:00Z\", \"responsetime\":{\"millis\":135.22}}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "nested-data/response/2", Collections.emptyMap(), - new StringEntity("{\"time\":\"2016-06-01T01:59:00Z\",\"responsetime\":{\"millis\":222.0}}", - ContentType.APPLICATION_JSON)); + bulk.append("{\"index\": {\"_index\": \"nested-data\", \"_type\": \"response\", \"_id\": 1}}\n"); + bulk.append("{\"time\":\"2016-06-01T00:00:00Z\", \"responsetime\":{\"millis\":135.22}}\n"); + bulk.append("{\"index\": {\"_index\": \"nested-data\", \"_type\": \"response\", \"_id\": 2}}\n"); + bulk.append("{\"time\":\"2016-06-01T01:59:00Z\",\"responsetime\":{\"millis\":222.0}}\n"); // Create index with multiple docs per time interval for aggregation testing - mappings = "{" + Request createAirlineDataAggs = new Request("PUT", "/airline-data-aggs"); + createAirlineDataAggs.setJsonEntity("{" + " \"mappings\": {" + " \"response\": {" + " \"properties\": {" @@ -208,43 +200,33 @@ public class DatafeedJobsRestIT extends ESRestTestCase { + " }" + " }" + " }" - + "}"; - client().performRequest("put", "airline-data-aggs", Collections.emptyMap(), - new StringEntity(mappings, ContentType.APPLICATION_JSON)); + + "}"); + client().performRequest(createAirlineDataAggs); - client().performRequest("put", "airline-data-aggs/response/1", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":100.0}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data-aggs/response/2", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T00:01:00Z\",\"airline\":\"AAA\",\"responsetime\":200.0}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data-aggs/response/3", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"BBB\",\"responsetime\":1000.0}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data-aggs/response/4", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T00:01:00Z\",\"airline\":\"BBB\",\"responsetime\":2000.0}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data-aggs/response/5", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T01:00:00Z\",\"airline\":\"AAA\",\"responsetime\":300.0}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data-aggs/response/6", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T01:01:00Z\",\"airline\":\"AAA\",\"responsetime\":400.0}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data-aggs/response/7", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T01:00:00Z\",\"airline\":\"BBB\",\"responsetime\":3000.0}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data-aggs/response/8", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T01:01:00Z\",\"airline\":\"BBB\",\"responsetime\":4000.0}", - ContentType.APPLICATION_JSON)); + bulk.append("{\"index\": {\"_index\": \"airline-data-aggs\", \"_type\": \"response\", \"_id\": 1}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":100.0}\n"); + bulk.append("{\"index\": {\"_index\": \"airline-data-aggs\", \"_type\": \"response\", \"_id\": 2}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T00:01:00Z\",\"airline\":\"AAA\",\"responsetime\":200.0}\n"); + bulk.append("{\"index\": {\"_index\": \"airline-data-aggs\", \"_type\": \"response\", \"_id\": 3}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"BBB\",\"responsetime\":1000.0}\n"); + bulk.append("{\"index\": {\"_index\": \"airline-data-aggs\", \"_type\": \"response\", \"_id\": 4}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T00:01:00Z\",\"airline\":\"BBB\",\"responsetime\":2000.0}\n"); + bulk.append("{\"index\": {\"_index\": \"airline-data-aggs\", \"_type\": \"response\", \"_id\": 5}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T01:00:00Z\",\"airline\":\"AAA\",\"responsetime\":300.0}\n"); + bulk.append("{\"index\": {\"_index\": \"airline-data-aggs\", \"_type\": \"response\", \"_id\": 6}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T01:01:00Z\",\"airline\":\"AAA\",\"responsetime\":400.0}\n"); + bulk.append("{\"index\": {\"_index\": \"airline-data-aggs\", \"_type\": \"response\", \"_id\": 7}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T01:00:00Z\",\"airline\":\"BBB\",\"responsetime\":3000.0}\n"); + bulk.append("{\"index\": {\"_index\": \"airline-data-aggs\", \"_type\": \"response\", \"_id\": 8}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T01:01:00Z\",\"airline\":\"BBB\",\"responsetime\":4000.0}\n"); - // Ensure all data is searchable - client().performRequest("post", "_refresh"); + bulkIndex(bulk.toString()); } private void addNetworkData(String index) throws IOException { - // Create index with source = enabled, doc_values = enabled, stored = false + multi-field - String mappings = "{" + Request createIndexRequest = new Request("PUT", index); + createIndexRequest.setJsonEntity("{" + " \"mappings\": {" + " \"doc\": {" + " \"properties\": {" @@ -260,27 +242,25 @@ public class DatafeedJobsRestIT extends ESRestTestCase { + " }" + " }" + " }" - + "}"; - client().performRequest("put", index, Collections.emptyMap(), new StringEntity(mappings, ContentType.APPLICATION_JSON)); + + "}");; + client().performRequest(createIndexRequest); + StringBuilder bulk = new StringBuilder(); String docTemplate = "{\"timestamp\":%d,\"host\":\"%s\",\"network_bytes_out\":%d}"; Date date = new Date(1464739200735L); - for (int i=0; i<120; i++) { + for (int i = 0; i < 120; i++) { long byteCount = randomNonNegativeLong(); - String jsonDoc = String.format(Locale.ROOT, docTemplate, date.getTime(), "hostA", byteCount); - client().performRequest("post", index + "/doc", Collections.emptyMap(), - new StringEntity(jsonDoc, ContentType.APPLICATION_JSON)); + bulk.append("{\"index\": {\"_index\": \"").append(index).append("\", \"_type\": \"doc\"}}\n"); + bulk.append(String.format(Locale.ROOT, docTemplate, date.getTime(), "hostA", byteCount)).append('\n'); byteCount = randomNonNegativeLong(); - jsonDoc = String.format(Locale.ROOT, docTemplate, date.getTime(), "hostB", byteCount); - client().performRequest("post", index + "/doc", Collections.emptyMap(), - new StringEntity(jsonDoc, ContentType.APPLICATION_JSON)); + bulk.append("{\"index\": {\"_index\": \"").append(index).append("\", \"_type\": \"doc\"}}\n"); + bulk.append(String.format(Locale.ROOT, docTemplate, date.getTime(), "hostB", byteCount)).append('\n'); date = new Date(date.getTime() + 10_000); } - // Ensure all data is searchable - client().performRequest("post", "_refresh"); + bulkIndex(bulk.toString()); } public void testLookbackOnlyWithMixedTypes() throws Exception { @@ -314,11 +294,21 @@ public class DatafeedJobsRestIT extends ESRestTestCase { public void testLookbackOnlyWithNestedFields() throws Exception { String jobId = "test-lookback-only-with-nested-fields"; - String job = "{\"description\":\"Nested job\", \"analysis_config\" : {\"bucket_span\":\"1h\",\"detectors\" :" - + "[{\"function\":\"mean\",\"field_name\":\"responsetime.millis\"}]}, \"data_description\" : {\"time_field\":\"time\"}" - + "}"; - client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(), - new StringEntity(job, ContentType.APPLICATION_JSON)); + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity("{\n" + + " \"description\": \"Nested job\",\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"1h\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"responsetime.millis\"\n" + + " }\n" + + " ]\n" + + " }," + + " \"data_description\": {\"time_field\": \"time\"}\n" + + "}"); + client().performRequest(createJobRequest); String datafeedId = jobId + "-datafeed"; new DatafeedBuilder(datafeedId, jobId, "nested-data", "response").build(); @@ -326,8 +316,9 @@ public class DatafeedJobsRestIT extends ESRestTestCase { startDatafeedAndWaitUntilStopped(datafeedId); waitUntilJobIsClosed(jobId); - Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + Response jobStatsResponse = client().performRequest( + new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + String jobStatsResponseAsString = EntityUtils.toString(jobStatsResponse.getEntity()); assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":2")); assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":2")); assertThat(jobStatsResponseAsString, containsString("\"missing_field_count\":0")); @@ -340,14 +331,23 @@ public class DatafeedJobsRestIT extends ESRestTestCase { public void testInsufficientSearchPrivilegesOnPut() throws Exception { String jobId = "privs-put-job"; - String job = "{\"description\":\"Aggs job\",\"analysis_config\" :{\"bucket_span\":\"1h\"," - + "\"summary_count_field_name\":\"doc_count\"," - + "\"detectors\":[{\"function\":\"mean\"," - + "\"field_name\":\"responsetime\",\"by_field_name\":\"airline\"}]}," - + "\"data_description\" : {\"time_field\":\"time stamp\"}" - + "}"; - client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, - Collections.emptyMap(), new StringEntity(job, ContentType.APPLICATION_JSON)); + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity("{\n" + + " \"description\": \"Aggs job\",\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"1h\",\n " + + " \"summary_count_field_name\": \"doc_count\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"responsetime\",\n" + + " \"by_field_name\":\"airline\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\" : {\"time_field\": \"time stamp\"}\n" + + "}"); + client().performRequest(createJobRequest); String datafeedId = "datafeed-" + jobId; // This should be disallowed, because even though the ml_admin user has permission to @@ -365,14 +365,23 @@ public class DatafeedJobsRestIT extends ESRestTestCase { public void testInsufficientSearchPrivilegesOnPreview() throws Exception { String jobId = "privs-preview-job"; - String job = "{\"description\":\"Aggs job\",\"analysis_config\" :{\"bucket_span\":\"1h\"," - + "\"summary_count_field_name\":\"doc_count\"," - + "\"detectors\":[{\"function\":\"mean\"," - + "\"field_name\":\"responsetime\",\"by_field_name\":\"airline\"}]}," - + "\"data_description\" : {\"time_field\":\"time stamp\"}" - + "}"; - client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, - Collections.emptyMap(), new StringEntity(job, ContentType.APPLICATION_JSON)); + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity("{\n" + + " \"description\": \"Aggs job\",\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"1h\",\n" + + " \"summary_count_field_name\": \"doc_count\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"responsetime\",\n" + + " \"by_field_name\": \"airline\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\" : {\"time_field\": \"time stamp\"}\n" + + "}"); + client().performRequest(createJobRequest); String datafeedId = "datafeed-" + jobId; new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs", "response").build(); @@ -380,10 +389,11 @@ public class DatafeedJobsRestIT extends ESRestTestCase { // This should be disallowed, because ml_admin is trying to preview a datafeed created by // by another user (x_pack_rest_user in this case) that will reveal the content of an index they // don't have permission to search directly - ResponseException e = expectThrows(ResponseException.class, () -> - client().performRequest("get", - MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_preview", - new BasicHeader("Authorization", BASIC_AUTH_VALUE_ML_ADMIN))); + Request getFeed = new Request("GET", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_preview"); + RequestOptions.Builder options = getFeed.getOptions().toBuilder(); + options.addHeader("Authorization", BASIC_AUTH_VALUE_ML_ADMIN); + getFeed.setOptions(options); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(getFeed)); assertThat(e.getMessage(), containsString("[indices:data/read/field_caps] is unauthorized for user [ml_admin]")); @@ -391,13 +401,23 @@ public class DatafeedJobsRestIT extends ESRestTestCase { public void testLookbackOnlyGivenAggregationsWithHistogram() throws Exception { String jobId = "aggs-histogram-job"; - String job = "{\"description\":\"Aggs job\",\"analysis_config\" :{\"bucket_span\":\"1h\"," - + "\"summary_count_field_name\":\"doc_count\"," - + "\"detectors\":[{\"function\":\"mean\",\"field_name\":\"responsetime\",\"by_field_name\":\"airline\"}]}," - + "\"data_description\" : {\"time_field\":\"time stamp\"}" - + "}"; - client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(), - new StringEntity(job, ContentType.APPLICATION_JSON)); + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity("{\n" + + " \"description\": \"Aggs job\",\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"1h\",\n" + + " \"summary_count_field_name\": \"doc_count\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"responsetime\",\n" + + " \"by_field_name\": \"airline\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\": {\"time_field\": \"time stamp\"}\n" + + "}"); + client().performRequest(createJobRequest); String datafeedId = "datafeed-" + jobId; String aggregations = "{\"buckets\":{\"histogram\":{\"field\":\"time stamp\",\"interval\":3600000}," @@ -410,8 +430,9 @@ public class DatafeedJobsRestIT extends ESRestTestCase { startDatafeedAndWaitUntilStopped(datafeedId); waitUntilJobIsClosed(jobId); - Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + Response jobStatsResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + String jobStatsResponseAsString = EntityUtils.toString(jobStatsResponse.getEntity()); assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":4")); assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":4")); assertThat(jobStatsResponseAsString, containsString("\"missing_field_count\":0")); @@ -419,13 +440,23 @@ public class DatafeedJobsRestIT extends ESRestTestCase { public void testLookbackOnlyGivenAggregationsWithDateHistogram() throws Exception { String jobId = "aggs-date-histogram-job"; - String job = "{\"description\":\"Aggs job\",\"analysis_config\" :{\"bucket_span\":\"3600s\"," - + "\"summary_count_field_name\":\"doc_count\"," - + "\"detectors\":[{\"function\":\"mean\",\"field_name\":\"responsetime\",\"by_field_name\":\"airline\"}]}," - + "\"data_description\" : {\"time_field\":\"time stamp\"}" - + "}"; - client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(), - new StringEntity(job, ContentType.APPLICATION_JSON)); + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity("{\n" + + " \"description\": \"Aggs job\",\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"3600s\",\n" + + " \"summary_count_field_name\": \"doc_count\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"responsetime\",\n" + + " \"by_field_name\": \"airline\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\": {\"time_field\": \"time stamp\"}\n" + + "}"); + client().performRequest(createJobRequest); String datafeedId = "datafeed-" + jobId; String aggregations = "{\"time stamp\":{\"date_histogram\":{\"field\":\"time stamp\",\"interval\":\"1h\"}," @@ -438,8 +469,9 @@ public class DatafeedJobsRestIT extends ESRestTestCase { startDatafeedAndWaitUntilStopped(datafeedId); waitUntilJobIsClosed(jobId); - Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + Response jobStatsResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + String jobStatsResponseAsString = EntityUtils.toString(jobStatsResponse.getEntity()); assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":4")); assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":4")); assertThat(jobStatsResponseAsString, containsString("\"missing_field_count\":0")); @@ -447,13 +479,22 @@ public class DatafeedJobsRestIT extends ESRestTestCase { public void testLookbackUsingDerivativeAggWithLargerHistogramBucketThanDataRate() throws Exception { String jobId = "derivative-agg-network-job"; - String job = "{\"analysis_config\" :{\"bucket_span\":\"300s\"," - + "\"summary_count_field_name\":\"doc_count\"," - + "\"detectors\":[{\"function\":\"mean\",\"field_name\":\"bytes-delta\",\"by_field_name\":\"hostname\"}]}," - + "\"data_description\" : {\"time_field\":\"timestamp\"}" - + "}"; - client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(), - new StringEntity(job, ContentType.APPLICATION_JSON)); + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity("{\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"300s\",\n" + + " \"summary_count_field_name\": \"doc_count\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"bytes-delta\",\n" + + " \"by_field_name\": \"hostname\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\": {\"time_field\": \"timestamp\"}\n" + + "}"); + client().performRequest(createJobRequest); String datafeedId = "datafeed-" + jobId; String aggregations = @@ -471,8 +512,9 @@ public class DatafeedJobsRestIT extends ESRestTestCase { startDatafeedAndWaitUntilStopped(datafeedId); waitUntilJobIsClosed(jobId); - Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + Response jobStatsResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + String jobStatsResponseAsString = EntityUtils.toString(jobStatsResponse.getEntity()); assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":40")); assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":40")); assertThat(jobStatsResponseAsString, containsString("\"out_of_order_timestamp_count\":0")); @@ -483,13 +525,22 @@ public class DatafeedJobsRestIT extends ESRestTestCase { public void testLookbackUsingDerivativeAggWithSmallerHistogramBucketThanDataRate() throws Exception { String jobId = "derivative-agg-network-job"; - String job = "{\"analysis_config\" :{\"bucket_span\":\"300s\"," - + "\"summary_count_field_name\":\"doc_count\"," - + "\"detectors\":[{\"function\":\"mean\",\"field_name\":\"bytes-delta\",\"by_field_name\":\"hostname\"}]}," - + "\"data_description\" : {\"time_field\":\"timestamp\"}" - + "}"; - client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(), - new StringEntity(job, ContentType.APPLICATION_JSON)); + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity("{\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"300s\",\n" + + " \"summary_count_field_name\": \"doc_count\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"bytes-delta\",\n" + + " \"by_field_name\": \"hostname\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\": {\"time_field\": \"timestamp\"}\n" + + "}"); + client().performRequest(createJobRequest); String datafeedId = "datafeed-" + jobId; String aggregations = @@ -507,21 +558,31 @@ public class DatafeedJobsRestIT extends ESRestTestCase { startDatafeedAndWaitUntilStopped(datafeedId); waitUntilJobIsClosed(jobId); - Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + Response jobStatsResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + String jobStatsResponseAsString = EntityUtils.toString(jobStatsResponse.getEntity()); assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":240")); assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":240")); } public void testLookbackWithoutPermissions() throws Exception { String jobId = "permission-test-network-job"; - String job = "{\"analysis_config\" :{\"bucket_span\":\"300s\"," - + "\"summary_count_field_name\":\"doc_count\"," - + "\"detectors\":[{\"function\":\"mean\",\"field_name\":\"bytes-delta\",\"by_field_name\":\"hostname\"}]}," - + "\"data_description\" : {\"time_field\":\"timestamp\"}" - + "}"; - client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(), - new StringEntity(job, ContentType.APPLICATION_JSON)); + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity("{\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"300s\",\n" + + " \"summary_count_field_name\": \"doc_count\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"bytes-delta\",\n" + + " \"by_field_name\": \"hostname\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\": {\"time_field\": \"timestamp\"}\n" + + "}"); + client().performRequest(createJobRequest); String datafeedId = "datafeed-" + jobId; String aggregations = @@ -545,29 +606,39 @@ public class DatafeedJobsRestIT extends ESRestTestCase { startDatafeedAndWaitUntilStopped(datafeedId, BASIC_AUTH_VALUE_ML_ADMIN_WITH_SOME_DATA_ACCESS); waitUntilJobIsClosed(jobId); - Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + Response jobStatsResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + String jobStatsResponseAsString = EntityUtils.toString(jobStatsResponse.getEntity()); // We expect that no data made it through to the job assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":0")); assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":0")); // There should be a notification saying that there was a problem extracting data - client().performRequest("post", "_refresh"); - Response notificationsResponse = client().performRequest("get", AuditorField.NOTIFICATIONS_INDEX + "/_search?q=job_id:" + jobId); - String notificationsResponseAsString = responseEntityToString(notificationsResponse); + client().performRequest(new Request("POST", "/_refresh")); + Response notificationsResponse = client().performRequest( + new Request("GET", AuditorField.NOTIFICATIONS_INDEX + "/_search?q=job_id:" + jobId)); + String notificationsResponseAsString = EntityUtils.toString(notificationsResponse.getEntity()); assertThat(notificationsResponseAsString, containsString("\"message\":\"Datafeed is encountering errors extracting data: " + "action [indices:data/read/search] is unauthorized for user [ml_admin_plus_data]\"")); } public void testLookbackWithPipelineBucketAgg() throws Exception { String jobId = "pipeline-bucket-agg-job"; - String job = "{\"analysis_config\" :{\"bucket_span\":\"1h\"," - + "\"summary_count_field_name\":\"doc_count\"," - + "\"detectors\":[{\"function\":\"mean\",\"field_name\":\"percentile95_airlines_count\"}]}," - + "\"data_description\" : {\"time_field\":\"time stamp\"}" - + "}"; - client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(), - new StringEntity(job, ContentType.APPLICATION_JSON)); + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity("{\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"1h\",\n" + + " \"summary_count_field_name\": \"doc_count\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"percentile95_airlines_count\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\": {\"time_field\": \"time stamp\"}\n" + + "}"); + client().performRequest(createJobRequest); String datafeedId = "datafeed-" + jobId; String aggregations = "{\"buckets\":{\"date_histogram\":{\"field\":\"time stamp\",\"interval\":\"15m\"}," @@ -582,8 +653,9 @@ public class DatafeedJobsRestIT extends ESRestTestCase { startDatafeedAndWaitUntilStopped(datafeedId); waitUntilJobIsClosed(jobId); - Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + Response jobStatsResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + String jobStatsResponseAsString = EntityUtils.toString(jobStatsResponse.getEntity()); assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":2")); assertThat(jobStatsResponseAsString, containsString("\"input_field_count\":4")); assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":2")); @@ -599,15 +671,15 @@ public class DatafeedJobsRestIT extends ESRestTestCase { new DatafeedBuilder(datafeedId, jobId, "airline-data", "response").build(); openJob(client(), jobId); - Response response = client().performRequest("post", - MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start?start=2016-06-01T00:00:00Z"); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - assertThat(responseEntityToString(response), equalTo("{\"started\":true}")); + Request startRequest = new Request("POST", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start"); + startRequest.addParameter("start", "2016-06-01T00:00:00Z"); + Response response = client().performRequest(startRequest); + assertThat(EntityUtils.toString(response.getEntity()), equalTo("{\"started\":true}")); assertBusy(() -> { try { - Response getJobResponse = client().performRequest("get", - MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - String responseAsString = responseEntityToString(getJobResponse); + Response getJobResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + String responseAsString = EntityUtils.toString(getJobResponse.getEntity()); assertThat(responseAsString, containsString("\"processed_record_count\":2")); assertThat(responseAsString, containsString("\"state\":\"opened\"")); } catch (Exception e1) { @@ -619,9 +691,9 @@ public class DatafeedJobsRestIT extends ESRestTestCase { // test a model snapshot is present assertBusy(() -> { try { - Response getJobResponse = client().performRequest("get", - MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/model_snapshots"); - String responseAsString = responseEntityToString(getJobResponse); + Response getJobResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/model_snapshots")); + String responseAsString = EntityUtils.toString(getJobResponse.getEntity()); assertThat(responseAsString, containsString("\"count\":1")); } catch (Exception e1) { throw new RuntimeException(e1); @@ -629,25 +701,25 @@ public class DatafeedJobsRestIT extends ESRestTestCase { }); ResponseException e = expectThrows(ResponseException.class, - () -> client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); + () -> client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId))); response = e.getResponse(); assertThat(response.getStatusLine().getStatusCode(), equalTo(409)); - assertThat(responseEntityToString(response), containsString("Cannot delete job [" + jobId + "] because datafeed [" + datafeedId - + "] refers to it")); + assertThat(EntityUtils.toString(response.getEntity()), + containsString("Cannot delete job [" + jobId + "] because datafeed [" + datafeedId + "] refers to it")); - response = client().performRequest("post", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_stop"); + response = client().performRequest(new Request("POST", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_stop")); assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - assertThat(responseEntityToString(response), equalTo("{\"stopped\":true}")); + assertThat(EntityUtils.toString(response.getEntity()), equalTo("{\"stopped\":true}")); - client().performRequest("POST", "/_xpack/ml/anomaly_detectors/" + jobId + "/_close"); + client().performRequest(new Request("POST", "/_xpack/ml/anomaly_detectors/" + jobId + "/_close")); - response = client().performRequest("delete", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId); + response = client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId)); assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - assertThat(responseEntityToString(response), equalTo("{\"acknowledged\":true}")); + assertThat(EntityUtils.toString(response.getEntity()), equalTo("{\"acknowledged\":true}")); - response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + response = client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - assertThat(responseEntityToString(response), equalTo("{\"acknowledged\":true}")); + assertThat(EntityUtils.toString(response.getEntity()), equalTo("{\"acknowledged\":true}")); } public void testForceDeleteWhileDatafeedIsRunning() throws Exception { @@ -657,25 +729,26 @@ public class DatafeedJobsRestIT extends ESRestTestCase { new DatafeedBuilder(datafeedId, jobId, "airline-data", "response").build(); openJob(client(), jobId); - Response response = client().performRequest("post", - MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start?start=2016-06-01T00:00:00Z"); + Request startRequest = new Request("POST", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start"); + startRequest.addParameter("start", "2016-06-01T00:00:00Z"); + Response response = client().performRequest(startRequest); assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - assertThat(responseEntityToString(response), equalTo("{\"started\":true}")); + assertThat(EntityUtils.toString(response.getEntity()), equalTo("{\"started\":true}")); ResponseException e = expectThrows(ResponseException.class, - () -> client().performRequest("delete", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId)); + () -> client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId))); response = e.getResponse(); assertThat(response.getStatusLine().getStatusCode(), equalTo(409)); - assertThat(responseEntityToString(response), containsString("Cannot delete datafeed [" + datafeedId - + "] while its status is started")); + assertThat(EntityUtils.toString(response.getEntity()), + containsString("Cannot delete datafeed [" + datafeedId + "] while its status is started")); - response = client().performRequest("delete", - MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "?force=true"); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - assertThat(responseEntityToString(response), equalTo("{\"acknowledged\":true}")); + Request forceDeleteRequest = new Request("DELETE", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId); + forceDeleteRequest.addParameter("force", "true"); + response = client().performRequest(forceDeleteRequest); + assertThat(EntityUtils.toString(response.getEntity()), equalTo("{\"acknowledged\":true}")); expectThrows(ResponseException.class, - () -> client().performRequest("get", "/_xpack/ml/datafeeds/" + datafeedId)); + () -> client().performRequest(new Request("GET", "/_xpack/ml/datafeeds/" + datafeedId))); } private class LookbackOnlyTestHelper { @@ -727,9 +800,9 @@ public class DatafeedJobsRestIT extends ESRestTestCase { startDatafeedAndWaitUntilStopped(datafeedId); waitUntilJobIsClosed(jobId); - Response jobStatsResponse = client().performRequest("get", - MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + Response jobStatsResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + String jobStatsResponseAsString = EntityUtils.toString(jobStatsResponse.getEntity()); if (shouldSucceedInput) { assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":2")); } else { @@ -748,16 +821,20 @@ public class DatafeedJobsRestIT extends ESRestTestCase { } private void startDatafeedAndWaitUntilStopped(String datafeedId, String authHeader) throws Exception { - Response startDatafeedRequest = client().performRequest("post", - MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start?start=2016-06-01T00:00:00Z&end=2016-06-02T00:00:00Z", - new BasicHeader("Authorization", authHeader)); - assertThat(startDatafeedRequest.getStatusLine().getStatusCode(), equalTo(200)); - assertThat(responseEntityToString(startDatafeedRequest), equalTo("{\"started\":true}")); + Request request = new Request("POST", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start"); + request.addParameter("start", "2016-06-01T00:00:00Z"); + request.addParameter("end", "2016-06-02T00:00:00Z"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("Authorization", authHeader); + request.setOptions(options); + Response startDatafeedResponse = client().performRequest(request); + assertThat(EntityUtils.toString(startDatafeedResponse.getEntity()), equalTo("{\"started\":true}")); assertBusy(() -> { try { - Response datafeedStatsResponse = client().performRequest("get", - MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_stats"); - assertThat(responseEntityToString(datafeedStatsResponse), containsString("\"state\":\"stopped\"")); + Response datafeedStatsResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_stats")); + assertThat(EntityUtils.toString(datafeedStatsResponse.getEntity()), + containsString("\"state\":\"stopped\"")); } catch (Exception e) { throw new RuntimeException(e); } @@ -767,9 +844,9 @@ public class DatafeedJobsRestIT extends ESRestTestCase { private void waitUntilJobIsClosed(String jobId) throws Exception { assertBusy(() -> { try { - Response jobStatsResponse = client().performRequest("get", - MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - assertThat(responseEntityToString(jobStatsResponse), containsString("\"state\":\"closed\"")); + Response jobStatsResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + assertThat(EntityUtils.toString(jobStatsResponse.getEntity()), containsString("\"state\":\"closed\"")); } catch (Exception e) { throw new RuntimeException(e); } @@ -777,27 +854,30 @@ public class DatafeedJobsRestIT extends ESRestTestCase { } private Response createJob(String id, String airlineVariant) throws Exception { - String job = "{\n" + " \"description\":\"Analysis of response time by airline\",\n" - + " \"analysis_config\" : {\n" + " \"bucket_span\":\"1h\",\n" - + " \"detectors\" :[\n" - + " {\"function\":\"mean\",\"field_name\":\"responsetime\",\"by_field_name\":\"" + airlineVariant + "\"}]\n" - + " },\n" + " \"data_description\" : {\n" - + " \"format\":\"xcontent\",\n" - + " \"time_field\":\"time stamp\",\n" + " \"time_format\":\"yyyy-MM-dd'T'HH:mm:ssX\"\n" + " }\n" - + "}"; - return client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + id, - Collections.emptyMap(), new StringEntity(job, ContentType.APPLICATION_JSON)); - } - - private static String responseEntityToString(Response response) throws Exception { - try (BufferedReader reader = new BufferedReader(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8))) { - return reader.lines().collect(Collectors.joining("\n")); - } + Request request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + id); + request.setJsonEntity("{\n" + + " \"description\": \"Analysis of response time by airline\",\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"1h\",\n" + + " \"detectors\" :[\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"responsetime\",\n" + + " \"by_field_name\": \"" + airlineVariant + "\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\": {\n" + + " \"format\": \"xcontent\",\n" + + " \"time_field\": \"time stamp\",\n" + + " \"time_format\": \"yyyy-MM-dd'T'HH:mm:ssX\"\n" + + " }\n" + + "}"); + return client().performRequest(request); } public static void openJob(RestClient client, String jobId) throws IOException { - Response response = client.performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open"); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + client.performRequest(new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open")); } @After @@ -850,17 +930,28 @@ public class DatafeedJobsRestIT extends ESRestTestCase { } Response build() throws IOException { - String datafeedConfig = "{" + Request request = new Request("PUT", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId); + request.setJsonEntity("{" + "\"job_id\": \"" + jobId + "\",\"indexes\":[\"" + index + "\"],\"types\":[\"" + type + "\"]" + (source ? ",\"_source\":true" : "") + (scriptedFields == null ? "" : ",\"script_fields\":" + scriptedFields) + (aggregations == null ? "" : ",\"aggs\":" + aggregations) + (chunkingTimespan == null ? "" : ",\"chunking_config\":{\"mode\":\"MANUAL\",\"time_span\":\"" + chunkingTimespan + "\"}") - + "}"; - return client().performRequest("put", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId, Collections.emptyMap(), - new StringEntity(datafeedConfig, ContentType.APPLICATION_JSON), - new BasicHeader("Authorization", authHeader)); + + "}"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("Authorization", authHeader); + request.setOptions(options); + return client().performRequest(request); } } + + private void bulkIndex(String bulk) throws IOException { + Request bulkRequest = new Request("POST", "/_bulk"); + bulkRequest.setJsonEntity(bulk); + bulkRequest.addParameter("refresh", "true"); + bulkRequest.addParameter("pretty", null); + String bulkResponse = EntityUtils.toString(client().performRequest(bulkRequest).getEntity()); + assertThat(bulkResponse, not(containsString("\"errors\": false"))); + } } diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java similarity index 74% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java index 2f3ea6c83a5..2d8c6a4128b 100644 --- a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java @@ -7,7 +7,10 @@ package org.elasticsearch.xpack.ml.integration; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.ml.action.DeleteForecastAction; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.AnalysisLimits; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; @@ -276,6 +279,104 @@ public class ForecastIT extends MlNativeAutodetectIntegTestCase { } + public void testDelete() throws Exception { + Detector.Builder detector = new Detector.Builder("mean", "value"); + + TimeValue bucketSpan = TimeValue.timeValueHours(1); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + analysisConfig.setBucketSpan(bucketSpan); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeFormat("epoch"); + + Job.Builder job = new Job.Builder("forecast-it-test-delete"); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(dataDescription); + + registerJob(job); + putJob(job); + openJob(job.getId()); + + long now = Instant.now().getEpochSecond(); + long timestamp = now - 50 * bucketSpan.seconds(); + List data = new ArrayList<>(); + while (timestamp < now) { + data.add(createJsonRecord(createRecord(timestamp, 10.0))); + data.add(createJsonRecord(createRecord(timestamp, 30.0))); + timestamp += bucketSpan.seconds(); + } + + postData(job.getId(), data.stream().collect(Collectors.joining())); + flushJob(job.getId(), false); + String forecastIdDefaultDurationDefaultExpiry = forecast(job.getId(), null, null); + String forecastIdDuration1HourNoExpiry = forecast(job.getId(), TimeValue.timeValueHours(1), TimeValue.ZERO); + waitForecastToFinish(job.getId(), forecastIdDefaultDurationDefaultExpiry); + waitForecastToFinish(job.getId(), forecastIdDuration1HourNoExpiry); + closeJob(job.getId()); + + { + ForecastRequestStats forecastStats = getForecastStats(job.getId(), forecastIdDefaultDurationDefaultExpiry); + assertNotNull(forecastStats); + ForecastRequestStats otherStats = getForecastStats(job.getId(), forecastIdDuration1HourNoExpiry); + assertNotNull(otherStats); + } + + { + DeleteForecastAction.Request request = new DeleteForecastAction.Request(job.getId(), + forecastIdDefaultDurationDefaultExpiry + "," + forecastIdDuration1HourNoExpiry); + AcknowledgedResponse response = client().execute(DeleteForecastAction.INSTANCE, request).actionGet(); + assertTrue(response.isAcknowledged()); + } + + { + ForecastRequestStats forecastStats = getForecastStats(job.getId(), forecastIdDefaultDurationDefaultExpiry); + assertNull(forecastStats); + ForecastRequestStats otherStats = getForecastStats(job.getId(), forecastIdDuration1HourNoExpiry); + assertNull(otherStats); + } + + { + DeleteForecastAction.Request request = new DeleteForecastAction.Request(job.getId(), "forecast-does-not-exist"); + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> client().execute(DeleteForecastAction.INSTANCE, request).actionGet()); + assertThat(e.getMessage(), + equalTo("No forecast(s) [forecast-does-not-exist] exists for job [forecast-it-test-delete]")); + } + + { + DeleteForecastAction.Request request = new DeleteForecastAction.Request(job.getId(), MetaData.ALL); + AcknowledgedResponse response = client().execute(DeleteForecastAction.INSTANCE, request).actionGet(); + assertTrue(response.isAcknowledged()); + } + + { + Job.Builder otherJob = new Job.Builder("forecasts-delete-with-all-and-allow-no-forecasts"); + otherJob.setAnalysisConfig(analysisConfig); + otherJob.setDataDescription(dataDescription); + + registerJob(otherJob); + putJob(otherJob); + DeleteForecastAction.Request request = new DeleteForecastAction.Request(otherJob.getId(), MetaData.ALL); + AcknowledgedResponse response = client().execute(DeleteForecastAction.INSTANCE, request).actionGet(); + assertTrue(response.isAcknowledged()); + } + + { + Job.Builder otherJob = new Job.Builder("forecasts-delete-with-all-and-not-allow-no-forecasts"); + otherJob.setAnalysisConfig(analysisConfig); + otherJob.setDataDescription(dataDescription); + + registerJob(otherJob); + putJob(otherJob); + + DeleteForecastAction.Request request = new DeleteForecastAction.Request(otherJob.getId(), MetaData.ALL); + request.setAllowNoForecasts(false); + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> client().execute(DeleteForecastAction.INSTANCE, request).actionGet()); + assertThat(e.getMessage(), + equalTo("No forecast(s) [_all] exists for job [forecasts-delete-with-all-and-not-allow-no-forecasts]")); + } + } + private void createDataWithLotsOfClientIps(TimeValue bucketSpan, Job.Builder job) throws IOException { long now = Instant.now().getEpochSecond(); long timestamp = now - 15 * bucketSpan.seconds(); diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InterimResultsDeletedAfterReopeningJobIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InterimResultsDeletedAfterReopeningJobIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InterimResultsDeletedAfterReopeningJobIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InterimResultsDeletedAfterReopeningJobIT.java diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java new file mode 100644 index 00000000000..5fc204cbf1f --- /dev/null +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java @@ -0,0 +1,607 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.ml.integration.MlRestTestStateCleaner; +import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; +import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; +import org.junit.After; + +import java.io.IOException; +import java.util.Locale; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public class MlJobIT extends ESRestTestCase { + + private static final String BASIC_AUTH_VALUE = basicAuthHeaderValue("x_pack_rest_user", + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); + + @Override + protected Settings restClientSettings() { + return Settings.builder().put(super.restClientSettings()).put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE).build(); + } + + @Override + protected boolean preserveTemplatesUponCompletion() { + return true; + } + + public void testPutJob_GivenFarequoteConfig() throws Exception { + Response response = createFarequoteJob("given-farequote-config-job"); + String responseAsString = EntityUtils.toString(response.getEntity()); + assertThat(responseAsString, containsString("\"job_id\":\"given-farequote-config-job\"")); + } + + public void testGetJob_GivenNoSuchJob() throws Exception { + ResponseException e = expectThrows(ResponseException.class, () -> + client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/non-existing-job/_stats"))); + + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(404)); + assertThat(e.getMessage(), containsString("No known job with id 'non-existing-job'")); + } + + public void testGetJob_GivenJobExists() throws Exception { + createFarequoteJob("get-job_given-job-exists-job"); + + Response response = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/get-job_given-job-exists-job/_stats")); + String responseAsString = EntityUtils.toString(response.getEntity()); + assertThat(responseAsString, containsString("\"count\":1")); + assertThat(responseAsString, containsString("\"job_id\":\"get-job_given-job-exists-job\"")); + } + + public void testGetJobs_GivenSingleJob() throws Exception { + String jobId = "get-jobs_given-single-job-job"; + createFarequoteJob(jobId); + + // Explicit _all + String explictAll = EntityUtils.toString( + client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/_all")).getEntity()); + assertThat(explictAll, containsString("\"count\":1")); + assertThat(explictAll, containsString("\"job_id\":\"" + jobId + "\"")); + + // Implicit _all + String implicitAll = EntityUtils.toString( + client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors")).getEntity()); + assertThat(implicitAll, containsString("\"count\":1")); + assertThat(implicitAll, containsString("\"job_id\":\"" + jobId + "\"")); + } + + public void testGetJobs_GivenMultipleJobs() throws Exception { + createFarequoteJob("given-multiple-jobs-job-1"); + createFarequoteJob("given-multiple-jobs-job-2"); + createFarequoteJob("given-multiple-jobs-job-3"); + + // Explicit _all + String explicitAll = EntityUtils.toString( + client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/_all")).getEntity()); + assertThat(explicitAll, containsString("\"count\":3")); + assertThat(explicitAll, containsString("\"job_id\":\"given-multiple-jobs-job-1\"")); + assertThat(explicitAll, containsString("\"job_id\":\"given-multiple-jobs-job-2\"")); + assertThat(explicitAll, containsString("\"job_id\":\"given-multiple-jobs-job-3\"")); + + // Implicit _all + String implicitAll = EntityUtils.toString( + client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors")).getEntity()); + assertThat(implicitAll, containsString("\"count\":3")); + assertThat(implicitAll, containsString("\"job_id\":\"given-multiple-jobs-job-1\"")); + assertThat(implicitAll, containsString("\"job_id\":\"given-multiple-jobs-job-2\"")); + assertThat(implicitAll, containsString("\"job_id\":\"given-multiple-jobs-job-3\"")); + } + + private Response createFarequoteJob(String jobId) throws IOException { + Request request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + request.setJsonEntity( + "{\n" + + " \"description\":\"Analysis of response time by airline\",\n" + + " \"analysis_config\" : {\n" + + " \"bucket_span\": \"3600s\",\n" + + " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"responsetime\",\"by_field_name\":\"airline\"}]\n" + + " },\n" + " \"data_description\" : {\n" + + " \"field_delimiter\":\",\",\n" + + " \"time_field\":\"time\",\n" + + " \"time_format\":\"yyyy-MM-dd HH:mm:ssX\"\n" + + " }\n" + + "}"); + return client().performRequest(request); + } + + public void testCantCreateJobWithSameID() throws Exception { + String jobTemplate = "{\n" + + " \"analysis_config\" : {\n" + + " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"responsetime\"}]\n" + + " },\n" + + " \"data_description\": {},\n" + + " \"results_index_name\" : \"%s\"}"; + + String jobId = "cant-create-job-with-same-id-job"; + Request createJob1 = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJob1.setJsonEntity(String.format(Locale.ROOT, jobTemplate, "index-1")); + client().performRequest(createJob1); + + Request createJob2 = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJob2.setJsonEntity(String.format(Locale.ROOT, jobTemplate, "index-2")); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(createJob2)); + + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400)); + assertThat(e.getMessage(), containsString("The job cannot be created with the Id '" + jobId + "'. The Id is already used.")); + } + + public void testCreateJobsWithIndexNameOption() throws Exception { + String jobTemplate = "{\n" + + " \"analysis_config\" : {\n" + + " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"responsetime\"}]\n" + + " },\n" + + " \"data_description\": {},\n" + + " \"results_index_name\" : \"%s\"}"; + + String jobId1 = "create-jobs-with-index-name-option-job-1"; + String indexName = "non-default-index"; + Request createJob1 = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1); + createJob1.setJsonEntity(String.format(Locale.ROOT, jobTemplate, indexName)); + client().performRequest(createJob1); + + String jobId2 = "create-jobs-with-index-name-option-job-2"; + Request createJob2 = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId2); + createJob2.setEntity(createJob1.getEntity()); + client().performRequest(createJob2); + + // With security enabled GET _aliases throws an index_not_found_exception + // if no aliases have been created. In multi-node tests the alias may not + // appear immediately so wait here. + assertBusy(() -> { + try { + String aliasesResponse = EntityUtils.toString(client().performRequest(new Request("GET", "/_aliases")).getEntity()); + assertThat(aliasesResponse, + containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName("custom-" + indexName) + "\":{\"aliases\":{")); + assertThat(aliasesResponse, containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + + "\":{\"filter\":{\"term\":{\"job_id\":{\"value\":\"" + jobId1 + "\",\"boost\":1.0}}}}")); + assertThat(aliasesResponse, containsString("\"" + AnomalyDetectorsIndex.resultsWriteAlias(jobId1) + "\":{}")); + assertThat(aliasesResponse, containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName(jobId2) + + "\":{\"filter\":{\"term\":{\"job_id\":{\"value\":\"" + jobId2 + "\",\"boost\":1.0}}}}")); + assertThat(aliasesResponse, containsString("\"" + AnomalyDetectorsIndex.resultsWriteAlias(jobId2) + "\":{}")); + } catch (ResponseException e) { + throw new AssertionError(e); + } + }); + + String responseAsString = EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices")).getEntity()); + assertThat(responseAsString, + containsString(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName)); + assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId1)))); + assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId2)))); + + String id = String.format(Locale.ROOT, "%s_bucket_%s_%s", jobId1, "1234", 300); + Request createResultRequest = new Request("PUT", AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + "/doc/" + id); + createResultRequest.setJsonEntity(String.format(Locale.ROOT, + "{\"job_id\":\"%s\", \"timestamp\": \"%s\", \"result_type\":\"bucket\", \"bucket_span\": \"%s\"}", + jobId1, "1234", 1)); + client().performRequest(createResultRequest); + + id = String.format(Locale.ROOT, "%s_bucket_%s_%s", jobId1, "1236", 300); + createResultRequest = new Request("PUT", AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + "/doc/" + id); + createResultRequest.setJsonEntity(String.format(Locale.ROOT, + "{\"job_id\":\"%s\", \"timestamp\": \"%s\", \"result_type\":\"bucket\", \"bucket_span\": \"%s\"}", + jobId1, "1236", 1)); + client().performRequest(createResultRequest); + + client().performRequest(new Request("POST", "/_refresh")); + + responseAsString = EntityUtils.toString(client().performRequest( + new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1 + "/results/buckets")).getEntity()); + assertThat(responseAsString, containsString("\"count\":2")); + + responseAsString = EntityUtils.toString(client().performRequest( + new Request("GET", AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + "/_search")).getEntity()); + assertThat(responseAsString, containsString("\"total\":2")); + + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1)); + + // check that indices still exist, but are empty and aliases are gone + responseAsString = EntityUtils.toString(client().performRequest(new Request("GET", "/_aliases")).getEntity()); + assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId1)))); + assertThat(responseAsString, containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId2))); //job2 still exists + + responseAsString = EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices")).getEntity()); + assertThat(responseAsString, containsString(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName)); + + client().performRequest(new Request("POST", "/_refresh")); + + responseAsString = EntityUtils.toString(client().performRequest( + new Request("GET", AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName + "/_count")).getEntity()); + assertThat(responseAsString, containsString("\"count\":0")); + } + + public void testCreateJobInSharedIndexUpdatesMapping() throws Exception { + String jobTemplate = "{\n" + + " \"analysis_config\" : {\n" + + " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"metric\", \"by_field_name\":\"%s\"}]\n" + + " },\n" + + " \"data_description\": {}\n" + + "}"; + + String jobId1 = "create-job-in-shared-index-updates-mapping-job-1"; + String byFieldName1 = "responsetime"; + String jobId2 = "create-job-in-shared-index-updates-mapping-job-2"; + String byFieldName2 = "cpu-usage"; + + Request createJob1Request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1); + createJob1Request.setJsonEntity(String.format(Locale.ROOT, jobTemplate, byFieldName1)); + client().performRequest(createJob1Request); + + // Check the index mapping contains the first by_field_name + Request getResultsMappingRequest = new Request("GET", + AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT + "/_mapping"); + getResultsMappingRequest.addParameter("pretty", null); + String resultsMappingAfterJob1 = EntityUtils.toString(client().performRequest(getResultsMappingRequest).getEntity()); + assertThat(resultsMappingAfterJob1, containsString(byFieldName1)); + assertThat(resultsMappingAfterJob1, not(containsString(byFieldName2))); + + Request createJob2Request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId2); + createJob2Request.setJsonEntity(String.format(Locale.ROOT, jobTemplate, byFieldName2)); + client().performRequest(createJob2Request); + + // Check the index mapping now contains both fields + String resultsMappingAfterJob2 = EntityUtils.toString(client().performRequest(getResultsMappingRequest).getEntity()); + assertThat(resultsMappingAfterJob2, containsString(byFieldName1)); + assertThat(resultsMappingAfterJob2, containsString(byFieldName2)); + } + + public void testCreateJobInCustomSharedIndexUpdatesMapping() throws Exception { + String jobTemplate = "{\n" + + " \"analysis_config\" : {\n" + + " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"metric\", \"by_field_name\":\"%s\"}]\n" + + " },\n" + + " \"data_description\": {},\n" + + " \"results_index_name\" : \"shared-index\"}"; + + String jobId1 = "create-job-in-custom-shared-index-updates-mapping-job-1"; + String byFieldName1 = "responsetime"; + String jobId2 = "create-job-in-custom-shared-index-updates-mapping-job-2"; + String byFieldName2 = "cpu-usage"; + + Request createJob1Request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1); + createJob1Request.setJsonEntity(String.format(Locale.ROOT, jobTemplate, byFieldName1)); + client().performRequest(createJob1Request); + + // Check the index mapping contains the first by_field_name + Request getResultsMappingRequest = new Request("GET", + AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-shared-index/_mapping"); + getResultsMappingRequest.addParameter("pretty", null); + String resultsMappingAfterJob1 = EntityUtils.toString(client().performRequest(getResultsMappingRequest).getEntity()); + assertThat(resultsMappingAfterJob1, containsString(byFieldName1)); + assertThat(resultsMappingAfterJob1, not(containsString(byFieldName2))); + + Request createJob2Request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId2); + createJob2Request.setJsonEntity(String.format(Locale.ROOT, jobTemplate, byFieldName2)); + client().performRequest(createJob2Request); + + // Check the index mapping now contains both fields + String resultsMappingAfterJob2 = EntityUtils.toString(client().performRequest(getResultsMappingRequest).getEntity()); + assertThat(resultsMappingAfterJob2, containsString(byFieldName1)); + assertThat(resultsMappingAfterJob2, containsString(byFieldName2)); + } + + public void testCreateJob_WithClashingFieldMappingsFails() throws Exception { + String jobTemplate = "{\n" + + " \"analysis_config\" : {\n" + + " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"metric\", \"by_field_name\":\"%s\"}]\n" + + " },\n" + + " \"data_description\": {}\n" + + "}"; + + String jobId1 = "job-with-response-field"; + String byFieldName1; + String jobId2 = "job-will-fail-with-mapping-error-on-response-field"; + String byFieldName2; + // we should get the friendly advice nomatter which way around the clashing fields are seen + if (randomBoolean()) { + byFieldName1 = "response"; + byFieldName2 = "response.time"; + } else { + byFieldName1 = "response.time"; + byFieldName2 = "response"; + } + + Request createJob1Request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1); + createJob1Request.setJsonEntity(String.format(Locale.ROOT, jobTemplate, byFieldName1)); + client().performRequest(createJob1Request); + + Request createJob2Request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId2); + createJob2Request.setJsonEntity(String.format(Locale.ROOT, jobTemplate, byFieldName2)); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(createJob2Request)); + assertThat(e.getMessage(), + containsString("This job would cause a mapping clash with existing field [response] - " + + "avoid the clash by assigning a dedicated results index")); + } + + public void testDeleteJob() throws Exception { + String jobId = "delete-job-job"; + String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; + createFarequoteJob(jobId); + + String indicesBeforeDelete = EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices")).getEntity()); + assertThat(indicesBeforeDelete, containsString(indexName)); + + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); + + // check that the index still exists (it's shared by default) + String indicesAfterDelete = EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices")).getEntity()); + assertThat(indicesAfterDelete, containsString(indexName)); + + assertBusy(() -> { + try { + String count = EntityUtils.toString(client().performRequest(new Request("GET", indexName + "/_count")).getEntity()); + assertThat(count, containsString("\"count\":0")); + } catch (Exception e) { + fail(e.getMessage()); + } + }); + + // check that the job itself is gone + expectThrows(ResponseException.class, () -> + client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"))); + } + + public void testDeleteJobAfterMissingIndex() throws Exception { + String jobId = "delete-job-after-missing-index-job"; + String aliasName = AnomalyDetectorsIndex.jobResultsAliasedName(jobId); + String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; + createFarequoteJob(jobId); + + String indicesBeforeDelete = EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices")).getEntity()); + assertThat(indicesBeforeDelete, containsString(indexName)); + + // Manually delete the index so that we can test that deletion proceeds + // normally anyway + client().performRequest(new Request("DELETE", indexName)); + + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); + + // check index was deleted + String indicesAfterDelete = EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices")).getEntity()); + assertThat(indicesAfterDelete, not(containsString(aliasName))); + assertThat(indicesAfterDelete, not(containsString(indexName))); + + expectThrows(ResponseException.class, () -> + client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"))); + } + + public void testDeleteJobAfterMissingAliases() throws Exception { + String jobId = "delete-job-after-missing-alias-job"; + String readAliasName = AnomalyDetectorsIndex.jobResultsAliasedName(jobId); + String writeAliasName = AnomalyDetectorsIndex.resultsWriteAlias(jobId); + String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; + createFarequoteJob(jobId); + + // With security enabled cat aliases throws an index_not_found_exception + // if no aliases have been created. In multi-node tests the alias may not + // appear immediately so wait here. + assertBusy(() -> { + try { + String aliases = EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/aliases")).getEntity()); + assertThat(aliases, containsString(readAliasName)); + assertThat(aliases, containsString(writeAliasName)); + } catch (ResponseException e) { + throw new AssertionError(e); + } + }); + + // Manually delete the aliases so that we can test that deletion proceeds + // normally anyway + client().performRequest(new Request("DELETE", indexName + "/_alias/" + readAliasName)); + client().performRequest(new Request("DELETE", indexName + "/_alias/" + writeAliasName)); + + // check aliases were deleted + expectThrows(ResponseException.class, () -> client().performRequest(new Request("GET", indexName + "/_alias/" + readAliasName))); + expectThrows(ResponseException.class, () -> client().performRequest(new Request("GET", indexName + "/_alias/" + writeAliasName))); + + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); + } + + public void testMultiIndexDelete() throws Exception { + String jobId = "multi-index-delete-job"; + String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; + createFarequoteJob(jobId); + + client().performRequest(new Request("PUT", indexName + "-001")); + client().performRequest(new Request("PUT", indexName + "-002")); + + String indicesBeforeDelete = EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices")).getEntity()); + assertThat(indicesBeforeDelete, containsString(indexName)); + assertThat(indicesBeforeDelete, containsString(indexName + "-001")); + assertThat(indicesBeforeDelete, containsString(indexName + "-002")); + + // Add some documents to each index to make sure the DBQ clears them out + Request createDoc0 = new Request("PUT", indexName + "/doc/" + 123); + createDoc0.setJsonEntity(String.format(Locale.ROOT, + "{\"job_id\":\"%s\", \"timestamp\": \"%s\", \"bucket_span\":%d, \"result_type\":\"record\"}", + jobId, 123, 1)); + client().performRequest(createDoc0); + Request createDoc1 = new Request("PUT", indexName + "-001/doc/" + 123); + createDoc1.setEntity(createDoc0.getEntity()); + client().performRequest(createDoc1); + Request createDoc2 = new Request("PUT", indexName + "-002/doc/" + 123); + createDoc2.setEntity(createDoc0.getEntity()); + client().performRequest(createDoc2); + + // Also index a few through the alias for the first job + Request createDoc3 = new Request("PUT", indexName + "/doc/" + 456); + createDoc3.setEntity(createDoc0.getEntity()); + client().performRequest(createDoc3); + + client().performRequest(new Request("POST", "/_refresh")); + + // check for the documents + assertThat(EntityUtils.toString(client().performRequest(new Request("GET", indexName+ "/_count")).getEntity()), + containsString("\"count\":2")); + assertThat(EntityUtils.toString(client().performRequest(new Request("GET", indexName+ "-001/_count")).getEntity()), + containsString("\"count\":1")); + assertThat(EntityUtils.toString(client().performRequest(new Request("GET", indexName+ "-002/_count")).getEntity()), + containsString("\"count\":1")); + + // Delete + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); + + client().performRequest(new Request("POST", "/_refresh")); + + // check that the indices still exist but are empty + String indicesAfterDelete = EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices")).getEntity()); + assertThat(indicesAfterDelete, containsString(indexName)); + assertThat(indicesAfterDelete, containsString(indexName + "-001")); + assertThat(indicesAfterDelete, containsString(indexName + "-002")); + + assertThat(EntityUtils.toString(client().performRequest(new Request("GET", indexName+ "/_count")).getEntity()), + containsString("\"count\":0")); + assertThat(EntityUtils.toString(client().performRequest(new Request("GET", indexName+ "-001/_count")).getEntity()), + containsString("\"count\":0")); + assertThat(EntityUtils.toString(client().performRequest(new Request("GET", indexName+ "-002/_count")).getEntity()), + containsString("\"count\":0")); + + + expectThrows(ResponseException.class, () -> + client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"))); + } + + public void testDelete_multipleRequest() throws Exception { + String jobId = "delete-job-mulitple-times"; + createFarequoteJob(jobId); + + ConcurrentMapLong responses = ConcurrentCollections.newConcurrentMapLong(); + ConcurrentMapLong responseExceptions = ConcurrentCollections.newConcurrentMapLong(); + AtomicReference ioe = new AtomicReference<>(); + AtomicInteger recreationGuard = new AtomicInteger(0); + AtomicReference recreationResponse = new AtomicReference<>(); + AtomicReference recreationException = new AtomicReference<>(); + + Runnable deleteJob = () -> { + try { + boolean forceDelete = randomBoolean(); + String url = MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId; + if (forceDelete) { + url += "?force=true"; + } + Response response = client().performRequest(new Request("DELETE", url)); + responses.put(Thread.currentThread().getId(), response); + } catch (ResponseException re) { + responseExceptions.put(Thread.currentThread().getId(), re); + } catch (IOException e) { + ioe.set(e); + } + + // Immediately after the first deletion finishes, recreate the job. This should pick up + // race conditions where another delete request deletes part of the newly created job. + if (recreationGuard.getAndIncrement() == 0) { + try { + recreationResponse.set(createFarequoteJob(jobId)); + } catch (ResponseException re) { + recreationException.set(re); + } catch (IOException e) { + ioe.set(e); + } + } + }; + + // The idea is to hit the situation where one request waits for + // the other to complete. This is difficult to schedule but + // hopefully it will happen in CI + int numThreads = 5; + Thread [] threads = new Thread[numThreads]; + for (int i=0; i { try { - client().performRequest("POST", "/_refresh"); + client().performRequest(new Request("POST", "/_refresh")); - Response response = client().performRequest("GET", - MachineLearning.BASE_PATH + "anomaly_detectors/hrd-split-job/results/records"); + Response response = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/hrd-split-job/results/records")); String responseBody = EntityUtils.toString(response.getEntity()); if (responseBody.contains("\"count\":2")) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index af7cb4242f1..cd13b2c8bb6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -62,9 +62,11 @@ import org.elasticsearch.xpack.core.ml.action.DeleteCalendarEventAction; import org.elasticsearch.xpack.core.ml.action.DeleteDatafeedAction; import org.elasticsearch.xpack.core.ml.action.DeleteExpiredDataAction; import org.elasticsearch.xpack.core.ml.action.DeleteFilterAction; +import org.elasticsearch.xpack.core.ml.action.DeleteForecastAction; import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; import org.elasticsearch.xpack.core.ml.action.DeleteModelSnapshotAction; import org.elasticsearch.xpack.core.ml.action.FinalizeJobExecutionAction; +import org.elasticsearch.xpack.core.ml.action.FindFileStructureAction; import org.elasticsearch.xpack.core.ml.action.FlushJobAction; import org.elasticsearch.xpack.core.ml.action.ForecastJobAction; import org.elasticsearch.xpack.core.ml.action.GetBucketsAction; @@ -114,9 +116,11 @@ import org.elasticsearch.xpack.ml.action.TransportDeleteCalendarEventAction; import org.elasticsearch.xpack.ml.action.TransportDeleteDatafeedAction; import org.elasticsearch.xpack.ml.action.TransportDeleteExpiredDataAction; import org.elasticsearch.xpack.ml.action.TransportDeleteFilterAction; +import org.elasticsearch.xpack.ml.action.TransportDeleteForecastAction; import org.elasticsearch.xpack.ml.action.TransportDeleteJobAction; import org.elasticsearch.xpack.ml.action.TransportDeleteModelSnapshotAction; import org.elasticsearch.xpack.ml.action.TransportFinalizeJobExecutionAction; +import org.elasticsearch.xpack.ml.action.TransportFindFileStructureAction; import org.elasticsearch.xpack.ml.action.TransportFlushJobAction; import org.elasticsearch.xpack.ml.action.TransportForecastJobAction; import org.elasticsearch.xpack.ml.action.TransportGetBucketsAction; @@ -178,6 +182,7 @@ import org.elasticsearch.xpack.ml.job.process.normalizer.NormalizerFactory; import org.elasticsearch.xpack.ml.job.process.normalizer.NormalizerProcessFactory; import org.elasticsearch.xpack.ml.notifications.Auditor; import org.elasticsearch.xpack.ml.rest.RestDeleteExpiredDataAction; +import org.elasticsearch.xpack.ml.rest.RestFindFileStructureAction; import org.elasticsearch.xpack.ml.rest.RestMlInfoAction; import org.elasticsearch.xpack.ml.rest.calendar.RestDeleteCalendarAction; import org.elasticsearch.xpack.ml.rest.calendar.RestDeleteCalendarEventAction; @@ -200,6 +205,7 @@ import org.elasticsearch.xpack.ml.rest.filter.RestGetFiltersAction; import org.elasticsearch.xpack.ml.rest.filter.RestPutFilterAction; import org.elasticsearch.xpack.ml.rest.filter.RestUpdateFilterAction; import org.elasticsearch.xpack.ml.rest.job.RestCloseJobAction; +import org.elasticsearch.xpack.ml.rest.job.RestDeleteForecastAction; import org.elasticsearch.xpack.ml.rest.job.RestDeleteJobAction; import org.elasticsearch.xpack.ml.rest.job.RestFlushJobAction; import org.elasticsearch.xpack.ml.rest.job.RestForecastJobAction; @@ -489,6 +495,7 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu new RestDeleteModelSnapshotAction(settings, restController), new RestDeleteExpiredDataAction(settings, restController), new RestForecastJobAction(settings, restController), + new RestDeleteForecastAction(settings, restController), new RestGetCalendarsAction(settings, restController), new RestPutCalendarAction(settings, restController), new RestDeleteCalendarAction(settings, restController), @@ -496,7 +503,8 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu new RestDeleteCalendarJobAction(settings, restController), new RestPutCalendarJobAction(settings, restController), new RestGetCalendarEventsAction(settings, restController), - new RestPostCalendarEventAction(settings, restController) + new RestPostCalendarEventAction(settings, restController), + new RestFindFileStructureAction(settings, restController) ); } @@ -545,6 +553,7 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu new ActionHandler<>(UpdateProcessAction.INSTANCE, TransportUpdateProcessAction.class), new ActionHandler<>(DeleteExpiredDataAction.INSTANCE, TransportDeleteExpiredDataAction.class), new ActionHandler<>(ForecastJobAction.INSTANCE, TransportForecastJobAction.class), + new ActionHandler<>(DeleteForecastAction.INSTANCE, TransportDeleteForecastAction.class), new ActionHandler<>(GetCalendarsAction.INSTANCE, TransportGetCalendarsAction.class), new ActionHandler<>(PutCalendarAction.INSTANCE, TransportPutCalendarAction.class), new ActionHandler<>(DeleteCalendarAction.INSTANCE, TransportDeleteCalendarAction.class), @@ -552,7 +561,8 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu new ActionHandler<>(UpdateCalendarJobAction.INSTANCE, TransportUpdateCalendarJobAction.class), new ActionHandler<>(GetCalendarEventsAction.INSTANCE, TransportGetCalendarEventsAction.class), new ActionHandler<>(PostCalendarEventsAction.INSTANCE, TransportPostCalendarEventsAction.class), - new ActionHandler<>(PersistJobAction.INSTANCE, TransportPersistJobAction.class) + new ActionHandler<>(PersistJobAction.INSTANCE, TransportPersistJobAction.class), + new ActionHandler<>(FindFileStructureAction.INSTANCE, TransportFindFileStructureAction.class) ); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarAction.java index 2facfd4678e..4c923f2f77c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarAction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.ml.action; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -18,7 +17,6 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.reindex.DeleteByQueryAction; import org.elasticsearch.index.reindex.DeleteByQueryRequest; -import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.MlMetaIndex; @@ -76,15 +74,12 @@ public class TransportDeleteCalendarAction extends HandledTransportAction { + + private final Client client; + private static final int MAX_FORECAST_TO_SEARCH = 10_000; + + private static final Set DELETABLE_STATUSES = + EnumSet.of(ForecastRequestStatus.FINISHED, ForecastRequestStatus.FAILED); + + @Inject + public TransportDeleteForecastAction(Settings settings, TransportService transportService, ActionFilters actionFilters, Client client) { + super(settings, DeleteForecastAction.NAME, transportService, actionFilters, DeleteForecastAction.Request::new); + this.client = client; + } + + @Override + protected void doExecute(Task task, DeleteForecastAction.Request request, ActionListener listener) { + final String jobId = request.getJobId(); + final String forecastsExpression = request.getForecastId(); + ActionListener forecastStatsHandler = ActionListener.wrap( + searchResponse -> deleteForecasts(searchResponse, request, listener), + e -> listener.onFailure(new ElasticsearchException("An error occurred while searching forecasts to delete", e))); + + SearchSourceBuilder source = new SearchSourceBuilder(); + + BoolQueryBuilder builder = QueryBuilders.boolQuery(); + BoolQueryBuilder innerBool = QueryBuilders.boolQuery().must( + QueryBuilders.termQuery(Result.RESULT_TYPE.getPreferredName(), ForecastRequestStats.RESULT_TYPE_VALUE)); + + if (MetaData.ALL.equals(request.getForecastId()) == false) { + Set forcastIds = new HashSet<>(Arrays.asList(Strings.tokenizeToStringArray(forecastsExpression, ","))); + innerBool.must(QueryBuilders.termsQuery(Forecast.FORECAST_ID.getPreferredName(), forcastIds)); + } + + source.query(builder.filter(innerBool)); + + SearchRequest searchRequest = new SearchRequest(AnomalyDetectorsIndex.jobResultsAliasedName(jobId)); + searchRequest.source(source); + + executeAsyncWithOrigin(client, ML_ORIGIN, SearchAction.INSTANCE, searchRequest, forecastStatsHandler); + } + + private void deleteForecasts(SearchResponse searchResponse, + DeleteForecastAction.Request request, + ActionListener listener) { + final String jobId = request.getJobId(); + Set forecastsToDelete; + try { + forecastsToDelete = parseForecastsFromSearch(searchResponse); + } catch (IOException e) { + listener.onFailure(e); + return; + } + + if (forecastsToDelete.isEmpty()) { + if (MetaData.ALL.equals(request.getForecastId()) && + request.isAllowNoForecasts()) { + listener.onResponse(new AcknowledgedResponse(true)); + } else { + listener.onFailure( + new ResourceNotFoundException(Messages.getMessage(Messages.REST_NO_SUCH_FORECAST, request.getForecastId(), jobId))); + } + return; + } + List badStatusForecasts = forecastsToDelete.stream() + .filter((f) -> !DELETABLE_STATUSES.contains(f.getStatus())) + .map(ForecastRequestStats::getForecastId).collect(Collectors.toList()); + if (badStatusForecasts.size() > 0) { + listener.onFailure( + ExceptionsHelper.conflictStatusException( + Messages.getMessage(Messages.REST_CANNOT_DELETE_FORECAST_IN_CURRENT_STATE, badStatusForecasts, jobId))); + return; + } + + final List forecastIds = forecastsToDelete.stream().map(ForecastRequestStats::getForecastId).collect(Collectors.toList()); + DeleteByQueryRequest deleteByQueryRequest = buildDeleteByQuery(jobId, forecastIds); + + executeAsyncWithOrigin(client, ML_ORIGIN, DeleteByQueryAction.INSTANCE, deleteByQueryRequest, ActionListener.wrap( + response -> { + if (response.isTimedOut()) { + listener.onFailure( + new TimeoutException("Delete request timed out. Successfully deleted " + + response.getDeleted() + " forecast documents from job [" + jobId + "]")); + return; + } + if ((response.getBulkFailures().isEmpty() && response.getSearchFailures().isEmpty()) == false) { + Tuple statusAndReason = getStatusAndReason(response); + listener.onFailure( + new ElasticsearchStatusException(statusAndReason.v2().getMessage(), statusAndReason.v1(), statusAndReason.v2())); + return; + } + logger.info("Deleted forecast(s) [{}] from job [{}]", forecastIds, jobId); + listener.onResponse(new AcknowledgedResponse(true)); + }, + listener::onFailure)); + } + + private static Tuple getStatusAndReason(final BulkByScrollResponse response) { + RestStatus status = RestStatus.OK; + Throwable reason = new Exception("Unknown error"); + //Getting the max RestStatus is sort of arbitrary, would the user care about 5xx over 4xx? + //Unsure of a better way to return an appropriate and possibly actionable cause to the user. + for (BulkItemResponse.Failure failure : response.getBulkFailures()) { + if (failure.getStatus().getStatus() > status.getStatus()) { + status = failure.getStatus(); + reason = failure.getCause(); + } + } + + for (ScrollableHitSource.SearchFailure failure : response.getSearchFailures()) { + RestStatus failureStatus = org.elasticsearch.ExceptionsHelper.status(failure.getReason()); + if (failureStatus.getStatus() > status.getStatus()) { + status = failureStatus; + reason = failure.getReason(); + } + } + return new Tuple<>(status, reason); + } + + private static Set parseForecastsFromSearch(SearchResponse searchResponse) throws IOException { + SearchHits hits = searchResponse.getHits(); + List allStats = new ArrayList<>(hits.getHits().length); + for (SearchHit hit : hits) { + try (InputStream stream = hit.getSourceRef().streamInput(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser( + NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, stream)) { + allStats.add(ForecastRequestStats.STRICT_PARSER.apply(parser, null)); + } + } + return new HashSet<>(allStats); + } + + private DeleteByQueryRequest buildDeleteByQuery(String jobId, List forecastsToDelete) { + DeleteByQueryRequest request = new DeleteByQueryRequest() + .setAbortOnVersionConflict(false) //since these documents are not updated, a conflict just means it was deleted previously + .setSize(MAX_FORECAST_TO_SEARCH) + .setSlices(5); + + request.indices(AnomalyDetectorsIndex.jobResultsAliasedName(jobId)); + BoolQueryBuilder innerBoolQuery = QueryBuilders.boolQuery(); + innerBoolQuery + .must(QueryBuilders.termsQuery(Result.RESULT_TYPE.getPreferredName(), + ForecastRequestStats.RESULT_TYPE_VALUE, Forecast.RESULT_TYPE_VALUE)) + .must(QueryBuilders.termsQuery(Forecast.FORECAST_ID.getPreferredName(), + forecastsToDelete)); + + QueryBuilder query = QueryBuilders.boolQuery().filter(innerBoolQuery); + request.setQuery(query); + return request; + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFindFileStructureAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFindFileStructureAction.java new file mode 100644 index 00000000000..66d07f5111c --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFindFileStructureAction.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ml.action.FindFileStructureAction; +import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.filestructurefinder.FileStructureFinder; +import org.elasticsearch.xpack.ml.filestructurefinder.FileStructureFinderManager; + +public class TransportFindFileStructureAction + extends HandledTransportAction { + + private final ThreadPool threadPool; + + @Inject + public TransportFindFileStructureAction(Settings settings, TransportService transportService, ActionFilters actionFilters, + ThreadPool threadPool) { + super(settings, FindFileStructureAction.NAME, transportService, actionFilters, FindFileStructureAction.Request::new); + this.threadPool = threadPool; + } + + @Override + protected void doExecute(Task task, FindFileStructureAction.Request request, + ActionListener listener) { + + // As determining the file structure might take a while, we run + // in a different thread to avoid blocking the network thread. + threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(() -> { + try { + listener.onResponse(buildFileStructureResponse(request)); + } catch (Exception e) { + listener.onFailure(e); + } + }); + } + + private FindFileStructureAction.Response buildFileStructureResponse(FindFileStructureAction.Request request) throws Exception { + + FileStructureFinderManager structureFinderManager = new FileStructureFinderManager(); + + FileStructureFinder fileStructureFinder = + structureFinderManager.findFileStructure(request.getLinesToSample(), request.getSample().streamInput()); + + return new FindFileStructureAction.Response(fileStructureFinder.getStructure()); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetOverallBucketsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetOverallBucketsAction.java index c0792a45b29..4a17a2654c6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetOverallBucketsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetOverallBucketsAction.java @@ -19,8 +19,8 @@ import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.metrics.max.Max; -import org.elasticsearch.search.aggregations.metrics.min.Min; +import org.elasticsearch.search.aggregations.metrics.Max; +import org.elasticsearch.search.aggregations.metrics.Min; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportIsolateDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportIsolateDatafeedAction.java index 3ca3c315450..252cf97d0c5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportIsolateDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportIsolateDatafeedAction.java @@ -5,8 +5,6 @@ */ package org.elasticsearch.xpack.ml.action; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; @@ -53,11 +51,6 @@ public class TransportIsolateDatafeedAction extends TransportTasksAction { - if (response.isViolated()) { + if (response.isSuccess() == false) { listener.onFailure(createUnlicensedError(datafeed.getId(), response)); } else { createDataExtractor(job, datafeed, params, waitForTaskListener); } }, - e -> listener.onFailure(createUnknownLicenseError(datafeed.getId(), - MlRemoteLicenseChecker.remoteIndices(datafeed.getIndices()), e)) + e -> listener.onFailure( + createUnknownLicenseError( + datafeed.getId(), RemoteClusterLicenseChecker.remoteIndices(datafeed.getIndices()), e)) )); } else { createDataExtractor(job, datafeed, params, waitForTaskListener); @@ -232,23 +236,35 @@ public class TransportStartDatafeedAction extends TransportMasterNodeAction remoteIndices, - Exception cause) { - String message = "Cannot start datafeed [" + datafeedId + "] as it is configured to use" - + " indices on a remote cluster " + remoteIndices - + " but the license type could not be verified"; + private ElasticsearchStatusException createUnknownLicenseError( + final String datafeedId, final List remoteIndices, final Exception cause) { + final int numberOfRemoteClusters = RemoteClusterLicenseChecker.remoteClusterAliases(remoteIndices).size(); + assert numberOfRemoteClusters > 0; + final String remoteClusterQualifier = numberOfRemoteClusters == 1 ? "a remote cluster" : "remote clusters"; + final String licenseTypeQualifier = numberOfRemoteClusters == 1 ? "" : "s"; + final String message = String.format( + Locale.ROOT, + "cannot start datafeed [%s] as it uses indices on %s %s but the license type%s could not be verified", + datafeedId, + remoteClusterQualifier, + remoteIndices, + licenseTypeQualifier); - return new ElasticsearchStatusException(message, RestStatus.BAD_REQUEST, new Exception(cause.getMessage())); + return new ElasticsearchStatusException(message, RestStatus.BAD_REQUEST, cause); } public static class StartDatafeedPersistentTasksExecutor extends PersistentTasksExecutor { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java index a6be0476486..ce3f611b222 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.license.RemoteClusterLicenseChecker; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; @@ -92,7 +93,7 @@ public class DatafeedNodeSelector { List indices = datafeed.getIndices(); for (String index : indices) { - if (MlRemoteLicenseChecker.isRemoteIndex(index)) { + if (RemoteClusterLicenseChecker.isRemoteIndex(index)) { // We cannot verify remote indices continue; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseChecker.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseChecker.java deleted file mode 100644 index b0eeed2c800..00000000000 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseChecker.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.ml.datafeed; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.license.License; -import org.elasticsearch.protocol.xpack.XPackInfoRequest; -import org.elasticsearch.protocol.xpack.XPackInfoResponse; -import org.elasticsearch.protocol.xpack.license.LicenseStatus; -import org.elasticsearch.transport.ActionNotFoundTransportException; -import org.elasticsearch.transport.RemoteClusterAware; -import org.elasticsearch.xpack.core.action.XPackInfoAction; - -import java.util.EnumSet; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; - -/** - * ML datafeeds can use cross cluster search to access data in a remote cluster. - * The remote cluster should be licenced for ML this class performs that check - * using the _xpack (info) endpoint. - */ -public class MlRemoteLicenseChecker { - - private final Client client; - - public static class RemoteClusterLicenseInfo { - private final String clusterName; - private final XPackInfoResponse.LicenseInfo licenseInfo; - - RemoteClusterLicenseInfo(String clusterName, XPackInfoResponse.LicenseInfo licenseInfo) { - this.clusterName = clusterName; - this.licenseInfo = licenseInfo; - } - - public String getClusterName() { - return clusterName; - } - - public XPackInfoResponse.LicenseInfo getLicenseInfo() { - return licenseInfo; - } - } - - public class LicenseViolation { - private final RemoteClusterLicenseInfo licenseInfo; - - private LicenseViolation(@Nullable RemoteClusterLicenseInfo licenseInfo) { - this.licenseInfo = licenseInfo; - } - - public boolean isViolated() { - return licenseInfo != null; - } - - public RemoteClusterLicenseInfo get() { - return licenseInfo; - } - } - - public MlRemoteLicenseChecker(Client client) { - this.client = client; - } - - /** - * Check each cluster is licensed for ML. - * This function evaluates lazily and will terminate when the first cluster - * that is not licensed is found or an error occurs. - * - * @param clusterNames List of remote cluster names - * @param listener Response listener - */ - public void checkRemoteClusterLicenses(List clusterNames, ActionListener listener) { - final Iterator itr = clusterNames.iterator(); - if (itr.hasNext() == false) { - listener.onResponse(new LicenseViolation(null)); - return; - } - - final AtomicReference clusterName = new AtomicReference<>(itr.next()); - - ActionListener infoListener = new ActionListener() { - @Override - public void onResponse(XPackInfoResponse xPackInfoResponse) { - if (licenseSupportsML(xPackInfoResponse.getLicenseInfo()) == false) { - listener.onResponse(new LicenseViolation( - new RemoteClusterLicenseInfo(clusterName.get(), xPackInfoResponse.getLicenseInfo()))); - return; - } - - if (itr.hasNext()) { - clusterName.set(itr.next()); - remoteClusterLicense(clusterName.get(), this); - } else { - listener.onResponse(new LicenseViolation(null)); - } - } - - @Override - public void onFailure(Exception e) { - String message = "Could not determine the X-Pack licence type for cluster [" + clusterName.get() + "]"; - if (e instanceof ActionNotFoundTransportException) { - // This is likely to be because x-pack is not installed in the target cluster - message += ". Is X-Pack installed on the target cluster?"; - } - listener.onFailure(new ElasticsearchException(message, e)); - } - }; - - remoteClusterLicense(clusterName.get(), infoListener); - } - - private void remoteClusterLicense(String clusterName, ActionListener listener) { - Client remoteClusterClient = client.getRemoteClusterClient(clusterName); - ThreadContext threadContext = remoteClusterClient.threadPool().getThreadContext(); - try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - // we stash any context here since this is an internal execution and should not leak any - // existing context information. - threadContext.markAsSystemContext(); - - XPackInfoRequest request = new XPackInfoRequest(); - request.setCategories(EnumSet.of(XPackInfoRequest.Category.LICENSE)); - remoteClusterClient.execute(XPackInfoAction.INSTANCE, request, listener); - } - } - - static boolean licenseSupportsML(XPackInfoResponse.LicenseInfo licenseInfo) { - License.OperationMode mode = License.OperationMode.resolve(licenseInfo.getMode()); - return licenseInfo.getStatus() == LicenseStatus.ACTIVE && - (mode == License.OperationMode.PLATINUM || mode == License.OperationMode.TRIAL); - } - - public static boolean isRemoteIndex(String index) { - return index.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR) != -1; - } - - public static boolean containsRemoteIndex(List indices) { - return indices.stream().anyMatch(MlRemoteLicenseChecker::isRemoteIndex); - } - - /** - * Get any remote indices used in cross cluster search. - * Remote indices are of the form {@code cluster_name:index_name} - * @return List of remote cluster indices - */ - public static List remoteIndices(List indices) { - return indices.stream().filter(MlRemoteLicenseChecker::isRemoteIndex).collect(Collectors.toList()); - } - - /** - * Extract the list of remote cluster names from the list of indices. - * @param indices List of indices. Remote cluster indices are prefixed - * with {@code cluster-name:} - * @return Every cluster name found in {@code indices} - */ - public static List remoteClusterNames(List indices) { - return indices.stream() - .filter(MlRemoteLicenseChecker::isRemoteIndex) - .map(index -> index.substring(0, index.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR))) - .distinct() - .collect(Collectors.toList()); - } - - public static String buildErrorMessage(RemoteClusterLicenseInfo clusterLicenseInfo) { - StringBuilder error = new StringBuilder(); - if (clusterLicenseInfo.licenseInfo.getStatus() != LicenseStatus.ACTIVE) { - error.append("The license on cluster [").append(clusterLicenseInfo.clusterName) - .append("] is not active. "); - } else { - License.OperationMode mode = License.OperationMode.resolve(clusterLicenseInfo.licenseInfo.getMode()); - if (mode != License.OperationMode.PLATINUM && mode != License.OperationMode.TRIAL) { - error.append("The license mode [").append(mode) - .append("] on cluster [") - .append(clusterLicenseInfo.clusterName) - .append("] does not enable Machine Learning. "); - } - } - - error.append(Strings.toString(clusterLicenseInfo.licenseInfo)); - return error.toString(); - } -} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java index e481986504d..864a83afae7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java @@ -15,9 +15,9 @@ import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; -import org.elasticsearch.search.aggregations.metrics.max.Max; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; +import org.elasticsearch.search.aggregations.metrics.Max; +import org.elasticsearch.search.aggregations.metrics.Percentile; +import org.elasticsearch.search.aggregations.metrics.Percentiles; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.joda.time.DateTime; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractor.java index 3b772544821..b1daff2b7e7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractor.java @@ -13,8 +13,8 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.Aggregations; -import org.elasticsearch.search.aggregations.metrics.max.Max; -import org.elasticsearch.search.aggregations.metrics.min.Min; +import org.elasticsearch.search.aggregations.metrics.Max; +import org.elasticsearch.search.aggregations.metrics.Min; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; import org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils; diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/SeparatedValuesLogStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java similarity index 92% rename from x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/SeparatedValuesLogStructureFinder.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java index fd9d34096b2..625858c867a 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/SeparatedValuesLogStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java @@ -3,10 +3,12 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.xpack.ml.logstructurefinder.TimestampFormatFinder.TimestampMatch; +import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; +import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; +import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import org.supercsv.exception.SuperCsvException; import org.supercsv.io.CsvListReader; import org.supercsv.prefs.CsvPreference; @@ -29,17 +31,16 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.IntStream; -public class SeparatedValuesLogStructureFinder implements LogStructureFinder { +public class DelimitedFileStructureFinder implements FileStructureFinder { private static final int MAX_LEVENSHTEIN_COMPARISONS = 100; private final List sampleMessages; - private final LogStructure structure; + private final FileStructure structure; - static SeparatedValuesLogStructureFinder makeSeparatedValuesLogStructureFinder(List explanation, String sample, - String charsetName, Boolean hasByteOrderMarker, - CsvPreference csvPreference, boolean trimFields) - throws IOException { + static DelimitedFileStructureFinder makeDelimitedFileStructureFinder(List explanation, String sample, String charsetName, + Boolean hasByteOrderMarker, CsvPreference csvPreference, + boolean trimFields) throws IOException { Tuple>, List> parsed = readRows(sample, csvPreference); List> rows = parsed.v1(); @@ -73,20 +74,21 @@ public class SeparatedValuesLogStructureFinder implements LogStructureFinder { String preamble = Pattern.compile("\n").splitAsStream(sample).limit(lineNumbers.get(1)).collect(Collectors.joining("\n", "", "\n")); char delimiter = (char) csvPreference.getDelimiterChar(); - LogStructure.Builder structureBuilder = new LogStructure.Builder(LogStructure.Format.fromSeparator(delimiter)) + FileStructure.Builder structureBuilder = new FileStructure.Builder(FileStructure.Format.DELIMITED) .setCharset(charsetName) .setHasByteOrderMarker(hasByteOrderMarker) .setSampleStart(preamble) .setNumLinesAnalyzed(lineNumbers.get(lineNumbers.size() - 1)) .setNumMessagesAnalyzed(sampleRecords.size()) .setHasHeaderRow(isHeaderInFile) + .setDelimiter(delimiter) .setInputFields(Arrays.stream(headerWithNamedBlanks).collect(Collectors.toList())); if (trimFields) { structureBuilder.setShouldTrimFields(true); } - Tuple timeField = LogStructureUtils.guessTimestampField(explanation, sampleRecords); + Tuple timeField = FileStructureUtils.guessTimestampField(explanation, sampleRecords); if (timeField != null) { String timeLineRegex = null; StringBuilder builder = new StringBuilder("^"); @@ -123,18 +125,25 @@ public class SeparatedValuesLogStructureFinder implements LogStructureFinder { .setMultilineStartPattern(timeLineRegex); } - SortedMap mappings = LogStructureUtils.guessMappings(explanation, sampleRecords); - mappings.put(LogStructureUtils.DEFAULT_TIMESTAMP_FIELD, Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "date")); + Tuple, SortedMap> mappingsAndFieldStats = + FileStructureUtils.guessMappingsAndCalculateFieldStats(explanation, sampleRecords); - LogStructure structure = structureBuilder + SortedMap mappings = mappingsAndFieldStats.v1(); + mappings.put(FileStructureUtils.DEFAULT_TIMESTAMP_FIELD, Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "date")); + + if (mappingsAndFieldStats.v2() != null) { + structureBuilder.setFieldStats(mappingsAndFieldStats.v2()); + } + + FileStructure structure = structureBuilder .setMappings(mappings) .setExplanation(explanation) .build(); - return new SeparatedValuesLogStructureFinder(sampleMessages, structure); + return new DelimitedFileStructureFinder(sampleMessages, structure); } - private SeparatedValuesLogStructureFinder(List sampleMessages, LogStructure structure) { + private DelimitedFileStructureFinder(List sampleMessages, FileStructure structure) { this.sampleMessages = Collections.unmodifiableList(sampleMessages); this.structure = structure; } @@ -145,7 +154,7 @@ public class SeparatedValuesLogStructureFinder implements LogStructureFinder { } @Override - public LogStructure getStructure() { + public FileStructure getStructure() { return structure; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderFactory.java new file mode 100644 index 00000000000..0bbe13e3b05 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderFactory.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.filestructurefinder; + +import org.supercsv.prefs.CsvPreference; + +import java.io.IOException; +import java.util.List; +import java.util.Locale; + +public class DelimitedFileStructureFinderFactory implements FileStructureFinderFactory { + + private final CsvPreference csvPreference; + private final int minFieldsPerRow; + private final boolean trimFields; + + DelimitedFileStructureFinderFactory(char delimiter, int minFieldsPerRow, boolean trimFields) { + csvPreference = new CsvPreference.Builder('"', delimiter, "\n").build(); + this.minFieldsPerRow = minFieldsPerRow; + this.trimFields = trimFields; + } + + /** + * Rules are: + * - It must contain at least two complete records + * - There must be a minimum number of fields per record (otherwise files with no commas could be treated as CSV!) + * - Every record except the last must have the same number of fields + * The reason the last record is allowed to have fewer fields than the others is that + * it could have been truncated when the file was sampled. + */ + @Override + public boolean canCreateFromSample(List explanation, String sample) { + String formatName; + switch ((char) csvPreference.getDelimiterChar()) { + case ',': + formatName = "CSV"; + break; + case '\t': + formatName = "TSV"; + break; + default: + formatName = Character.getName(csvPreference.getDelimiterChar()).toLowerCase(Locale.ROOT) + " delimited values"; + break; + } + return DelimitedFileStructureFinder.canCreateFromSample(explanation, sample, minFieldsPerRow, csvPreference, formatName); + } + + @Override + public FileStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) + throws IOException { + return DelimitedFileStructureFinder.makeDelimitedFileStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, + csvPreference, trimFields); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FieldStatsCalculator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FieldStatsCalculator.java new file mode 100644 index 00000000000..130a37dbc19 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FieldStatsCalculator.java @@ -0,0 +1,184 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.filestructurefinder; + +import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Comparator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.stream.Collectors; + +/** + * Calculate statistics for a set of scalar field values. + * Count, cardinality (distinct count) and top hits (most common values) are always calculated. + * Extra statistics are calculated if the field is numeric: min, max, mean and median. + */ +public class FieldStatsCalculator { + + private long count; + private SortedMap countsByStringValue = new TreeMap<>(); + private SortedMap countsByNumericValue = new TreeMap<>(); + + /** + * Add a collection of values to the calculator. + * The values to be added can be combined by the caller and added in a + * single call to this method or added in multiple calls to this method. + * @param fieldValues Zero or more values to add. May not be null. + */ + public void accept(Collection fieldValues) { + + count += fieldValues.size(); + + for (String fieldValue : fieldValues) { + + countsByStringValue.compute(fieldValue, (k, v) -> (v == null) ? 1 : (1 + v)); + + if (countsByNumericValue != null) { + + try { + countsByNumericValue.compute(Double.valueOf(fieldValue), (k, v) -> (v == null) ? 1 : (1 + v)); + } catch (NumberFormatException e) { + countsByNumericValue = null; + } + } + } + } + + /** + * Calculate field statistics based on the previously accepted values. + * @param numTopHits The maximum number of entries to include in the top hits. + * @return The calculated field statistics. + */ + public FieldStats calculate(int numTopHits) { + + if (countsByNumericValue != null && countsByNumericValue.isEmpty() == false) { + return new FieldStats(count, countsByNumericValue.size(), countsByNumericValue.firstKey(), countsByNumericValue.lastKey(), + calculateMean(), calculateMedian(), findNumericTopHits(numTopHits)); + } else { + return new FieldStats(count, countsByStringValue.size(), findStringTopHits(numTopHits)); + } + } + + Double calculateMean() { + + assert countsByNumericValue != null; + + if (countsByNumericValue.isEmpty()) { + return null; + } + + double runningCount = 0.0; + double runningMean = Double.NaN; + + for (Map.Entry entry : countsByNumericValue.entrySet()) { + + double entryCount = (double) entry.getValue(); + double newRunningCount = runningCount + entryCount; + + // Updating a running mean like this is more numerically stable than using (sum / count) + if (runningCount > 0.0) { + runningMean = runningMean * (runningCount / newRunningCount) + entry.getKey() * (entryCount / newRunningCount); + } else if (entryCount > 0.0) { + runningMean = entry.getKey(); + } + + runningCount = newRunningCount; + } + + return runningMean; + } + + Double calculateMedian() { + + assert countsByNumericValue != null; + + if (count % 2 == 1) { + + // Simple case - median is middle value + long targetCount = count / 2 + 1; + long currentUpperBound = 0; + + for (Map.Entry entry : countsByNumericValue.entrySet()) { + + currentUpperBound += entry.getValue(); + + if (currentUpperBound >= targetCount) { + return entry.getKey(); + } + } + + } else { + + // More complicated case - median is average of two middle values + long target1Count = count / 2; + long target2Count = target1Count + 1; + double target1Value = Double.NaN; + long prevUpperBound = -1; + long currentUpperBound = 0; + + for (Map.Entry entry : countsByNumericValue.entrySet()) { + + currentUpperBound += entry.getValue(); + + if (currentUpperBound >= target2Count) { + + if (prevUpperBound < target1Count) { + // Both target values are the same + return entry.getKey(); + } else { + return (target1Value + entry.getKey()) / 2.0; + } + } + + if (currentUpperBound >= target1Count) { + target1Value = entry.getKey(); + } + + prevUpperBound = currentUpperBound; + } + } + + return null; + } + + List> findNumericTopHits(int numTopHits) { + assert countsByNumericValue != null; + return findTopHits(numTopHits, countsByNumericValue, Comparator.comparing(Map.Entry::getKey)); + } + + List> findStringTopHits(int numTopHits) { + return findTopHits(numTopHits, countsByStringValue, Comparator.comparing(Map.Entry::getKey)); + } + + /** + * Order by descending count, with a secondary sort to ensure reproducibility of results. + */ + private static List> findTopHits(int numTopHits, Map countsByValue, + Comparator> secondarySort) { + + List> sortedByCount = countsByValue.entrySet().stream() + .sorted(Comparator.comparing(Map.Entry::getValue, Comparator.reverseOrder()).thenComparing(secondarySort)) + .limit(numTopHits).collect(Collectors.toList()); + + List> topHits = new ArrayList<>(sortedByCount.size()); + + for (Map.Entry entry : sortedByCount) { + + Map topHit = new LinkedHashMap<>(3); + topHit.put("value", entry.getKey()); + topHit.put("count", entry.getValue()); + topHits.add(topHit); + } + + return topHits; + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinder.java new file mode 100644 index 00000000000..c09978b6bcb --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinder.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.filestructurefinder; + +import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; + +import java.util.List; + +public interface FileStructureFinder { + + /** + * The (possibly multi-line) messages that the sampled lines were combined into. + * @return A list of messages. + */ + List getSampleMessages(); + + /** + * Retrieve the structure of the file used to instantiate the finder. + * @return The file structure. + */ + FileStructure getStructure(); +} diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderFactory.java similarity index 67% rename from x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderFactory.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderFactory.java index af322ee4bf0..4b6fce322ee 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderFactory.java @@ -3,33 +3,33 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; import java.util.List; -public interface LogStructureFinderFactory { +public interface FileStructureFinderFactory { /** - * Given a sample of a log file, decide whether this factory will be able + * Given a sample of a file, decide whether this factory will be able * to create an appropriate object to represent its ingestion configs. * @param explanation List of reasons for making decisions. May contain items when passed and new reasons * can be appended by this method. - * @param sample A sample from the log file to be ingested. - * @return true if this factory can create an appropriate log + * @param sample A sample from the file to be ingested. + * @return true if this factory can create an appropriate * file structure given the sample; otherwise false. */ boolean canCreateFromSample(List explanation, String sample); /** - * Create an object representing the structure of a log file. + * Create an object representing the structure of a file. * @param explanation List of reasons for making decisions. May contain items when passed and new reasons * can be appended by this method. - * @param sample A sample from the log file to be ingested. + * @param sample A sample from the file to be ingested. * @param charsetName The name of the character set in which the sample was provided. * @param hasByteOrderMarker Did the sample have a byte order marker? null means "not relevant". - * @return A log file structure object suitable for ingesting the supplied sample. + * @return A file structure object suitable for ingesting the supplied sample. * @throws Exception if something goes wrong during creation. */ - LogStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) + FileStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) throws Exception; } diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManager.java similarity index 82% rename from x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManager.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManager.java index 7f18445e505..d0ce68aff25 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManager.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; import com.ibm.icu.text.CharsetDetector; import com.ibm.icu.text.CharsetMatch; @@ -26,15 +26,16 @@ import java.util.Optional; import java.util.Set; /** - * Runs the high-level steps needed to create ingest configs for the specified log file. In order: + * Runs the high-level steps needed to create ingest configs for the specified file. In order: * 1. Determine the most likely character set (UTF-8, UTF-16LE, ISO-8859-2, etc.) * 2. Load a sample of the file, consisting of the first 1000 lines of the file * 3. Determine the most likely file structure - one of ND-JSON, XML, CSV, TSV or semi-structured text * 4. Create an appropriate structure object and delegate writing configs to it */ -public final class LogStructureFinderManager { +public final class FileStructureFinderManager { public static final int MIN_SAMPLE_LINE_COUNT = 2; + public static final int DEFAULT_IDEAL_SAMPLE_LINE_COUNT = 1000; static final Set FILEBEAT_SUPPORTED_ENCODINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( "866", "ansi_x3.4-1968", "arabic", "ascii", "asmo-708", "big5", "big5-hkscs", "chinese", "cn-big5", "cp1250", "cp1251", "cp1252", @@ -65,33 +66,35 @@ public final class LogStructureFinderManager { /** * These need to be ordered so that the more generic formats come after the more specific ones */ - private static final List ORDERED_STRUCTURE_FACTORIES = Collections.unmodifiableList(Arrays.asList( - new JsonLogStructureFinderFactory(), - new XmlLogStructureFinderFactory(), + private static final List ORDERED_STRUCTURE_FACTORIES = Collections.unmodifiableList(Arrays.asList( + new JsonFileStructureFinderFactory(), + new XmlFileStructureFinderFactory(), // ND-JSON will often also be valid (although utterly weird) CSV, so JSON must come before CSV - new CsvLogStructureFinderFactory(), - new TsvLogStructureFinderFactory(), - new SemiColonSeparatedValuesLogStructureFinderFactory(), - new PipeSeparatedValuesLogStructureFinderFactory(), - new TextLogStructureFinderFactory() + new DelimitedFileStructureFinderFactory(',', 2, false), + new DelimitedFileStructureFinderFactory('\t', 2, false), + new DelimitedFileStructureFinderFactory(';', 4, false), + new DelimitedFileStructureFinderFactory('|', 5, true), + new TextLogFileStructureFinderFactory() )); private static final int BUFFER_SIZE = 8192; /** - * Given a stream of data from some log file, determine its structure. + * Given a stream of data from some file, determine its structure. * @param idealSampleLineCount Ideally, how many lines from the stream will be read to determine the structure? * If the stream has fewer lines then an attempt will still be made, providing at - * least {@link #MIN_SAMPLE_LINE_COUNT} lines can be read. + * least {@link #MIN_SAMPLE_LINE_COUNT} lines can be read. If null + * the value of {@link #DEFAULT_IDEAL_SAMPLE_LINE_COUNT} will be used. * @param fromFile A stream from which the sample will be read. - * @return A {@link LogStructureFinder} object from which the structure and messages can be queried. + * @return A {@link FileStructureFinder} object from which the structure and messages can be queried. * @throws Exception A variety of problems could occur at various stages of the structure finding process. */ - public LogStructureFinder findLogStructure(int idealSampleLineCount, InputStream fromFile) throws Exception { - return findLogStructure(new ArrayList<>(), idealSampleLineCount, fromFile); + public FileStructureFinder findFileStructure(Integer idealSampleLineCount, InputStream fromFile) throws Exception { + return findFileStructure(new ArrayList<>(), (idealSampleLineCount == null) ? DEFAULT_IDEAL_SAMPLE_LINE_COUNT : idealSampleLineCount, + fromFile); } - public LogStructureFinder findLogStructure(List explanation, int idealSampleLineCount, InputStream fromFile) + public FileStructureFinder findFileStructure(List explanation, int idealSampleLineCount, InputStream fromFile) throws Exception { CharsetMatch charsetMatch = findCharset(explanation, fromFile); @@ -159,13 +162,19 @@ public final class LogStructureFinderManager { String name = charsetMatch.getName(); if (Charset.isSupported(name) && FILEBEAT_SUPPORTED_ENCODINGS.contains(name.toLowerCase(Locale.ROOT))) { - // This extra test is to avoid trying to read binary files as text. Running the log config - // deduction algorithms on binary files is very slow as the binary files generally appear to + // This extra test is to avoid trying to read binary files as text. Running the structure + // finding algorithms on binary files is very slow as the binary files generally appear to // have very long lines. boolean spaceEncodingContainsZeroByte = false; - byte[] spaceBytes = " ".getBytes(name); - for (int i = 0; i < spaceBytes.length && spaceEncodingContainsZeroByte == false; ++i) { - spaceEncodingContainsZeroByte = (spaceBytes[i] == 0); + Charset charset = Charset.forName(name); + // Some character sets cannot be encoded. These are extremely rare so it's likely that + // they've been chosen based on incorrectly provided binary data. Therefore, err on + // the side of rejecting binary data. + if (charset.canEncode()) { + byte[] spaceBytes = " ".getBytes(charset); + for (int i = 0; i < spaceBytes.length && spaceEncodingContainsZeroByte == false; ++i) { + spaceEncodingContainsZeroByte = (spaceBytes[i] == 0); + } } if (containsZeroBytes && spaceEncodingContainsZeroByte == false) { explanation.add("Character encoding [" + name + "] matched the input with [" + charsetMatch.getConfidence() + @@ -186,10 +195,10 @@ public final class LogStructureFinderManager { (containsZeroBytes ? " - could it be binary data?" : "")); } - LogStructureFinder makeBestStructureFinder(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) + FileStructureFinder makeBestStructureFinder(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) throws Exception { - for (LogStructureFinderFactory factory : ORDERED_STRUCTURE_FACTORIES) { + for (FileStructureFinderFactory factory : ORDERED_STRUCTURE_FACTORIES) { if (factory.canCreateFromSample(explanation, sample)) { return factory.createFromSample(explanation, sample, charsetName, hasByteOrderMarker); } diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureUtils.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java similarity index 73% rename from x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureUtils.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java index b1dfee22ee6..0341e03a20b 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureUtils.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java @@ -3,11 +3,12 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.grok.Grok; -import org.elasticsearch.xpack.ml.logstructurefinder.TimestampFormatFinder.TimestampMatch; +import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; +import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import java.util.ArrayList; import java.util.Arrays; @@ -16,25 +17,27 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; import java.util.stream.Collectors; import java.util.stream.Stream; -final class LogStructureUtils { +public final class FileStructureUtils { - static final String DEFAULT_TIMESTAMP_FIELD = "@timestamp"; - static final String MAPPING_TYPE_SETTING = "type"; - static final String MAPPING_FORMAT_SETTING = "format"; - static final String MAPPING_PROPERTIES_SETTING = "properties"; + public static final String DEFAULT_TIMESTAMP_FIELD = "@timestamp"; + public static final String MAPPING_TYPE_SETTING = "type"; + public static final String MAPPING_FORMAT_SETTING = "format"; + public static final String MAPPING_PROPERTIES_SETTING = "properties"; + private static final int NUM_TOP_HITS = 10; // NUMBER Grok pattern doesn't support scientific notation, so we extend it private static final Grok NUMBER_GROK = new Grok(Grok.getBuiltinPatterns(), "^%{NUMBER}(?:[eE][+-]?[0-3]?[0-9]{1,2})?$"); private static final Grok IP_GROK = new Grok(Grok.getBuiltinPatterns(), "^%{IP}$"); private static final int KEYWORD_MAX_LEN = 256; private static final int KEYWORD_MAX_SPACES = 5; - private LogStructureUtils() { + private FileStructureUtils() { } /** @@ -44,10 +47,10 @@ final class LogStructureUtils { * - Must have the same timestamp format in every record * If multiple fields meet these criteria then the one that occurred first in the first sample record * is chosen. - * @param explanation List of reasons for choosing the overall log structure. This list + * @param explanation List of reasons for choosing the overall file structure. This list * may be non-empty when the method is called, and this method may * append to it. - * @param sampleRecords List of records derived from the provided log sample. + * @param sampleRecords List of records derived from the provided sample. * @return A tuple of (field name, timestamp format) if one can be found, or null if * there is no consistent timestamp. */ @@ -112,26 +115,39 @@ final class LogStructureUtils { * @param sampleRecords The sampled records. * @return A map of field name to mapping settings. */ - static SortedMap guessMappings(List explanation, List> sampleRecords) { + static Tuple, SortedMap> + guessMappingsAndCalculateFieldStats(List explanation, List> sampleRecords) { SortedMap mappings = new TreeMap<>(); + SortedMap fieldStats = new TreeMap<>(); - for (Map sampleRecord : sampleRecords) { - for (String fieldName : sampleRecord.keySet()) { - mappings.computeIfAbsent(fieldName, key -> guessMapping(explanation, fieldName, - sampleRecords.stream().flatMap(record -> { - Object fieldValue = record.get(fieldName); - return (fieldValue == null) ? Stream.empty() : Stream.of(fieldValue); - } - ).collect(Collectors.toList()))); + Set uniqueFieldNames = sampleRecords.stream().flatMap(record -> record.keySet().stream()).collect(Collectors.toSet()); + + for (String fieldName : uniqueFieldNames) { + + List fieldValues = sampleRecords.stream().flatMap(record -> { + Object fieldValue = record.get(fieldName); + return (fieldValue == null) ? Stream.empty() : Stream.of(fieldValue); + } + ).collect(Collectors.toList()); + + Tuple, FieldStats> mappingAndFieldStats = + guessMappingAndCalculateFieldStats(explanation, fieldName, fieldValues); + if (mappingAndFieldStats != null) { + if (mappingAndFieldStats.v1() != null) { + mappings.put(fieldName, mappingAndFieldStats.v1()); + } + if (mappingAndFieldStats.v2() != null) { + fieldStats.put(fieldName, mappingAndFieldStats.v2()); + } } } - return mappings; + return new Tuple<>(mappings, fieldStats); } - static Map guessMapping(List explanation, String fieldName, List fieldValues) { - + static Tuple, FieldStats> guessMappingAndCalculateFieldStats(List explanation, + String fieldName, List fieldValues) { if (fieldValues == null || fieldValues.isEmpty()) { // We can get here if all the records that contained a given field had a null value for it. // In this case it's best not to make any statement about what the mapping type should be. @@ -140,7 +156,7 @@ final class LogStructureUtils { if (fieldValues.stream().anyMatch(value -> value instanceof Map)) { if (fieldValues.stream().allMatch(value -> value instanceof Map)) { - return Collections.singletonMap(MAPPING_TYPE_SETTING, "object"); + return new Tuple<>(Collections.singletonMap(MAPPING_TYPE_SETTING, "object"), null); } throw new IllegalArgumentException("Field [" + fieldName + "] has both object and non-object values - this is not supported by Elasticsearch"); @@ -148,11 +164,12 @@ final class LogStructureUtils { if (fieldValues.stream().anyMatch(value -> value instanceof List || value instanceof Object[])) { // Elasticsearch fields can be either arrays or single values, but array values must all have the same type - return guessMapping(explanation, fieldName, - fieldValues.stream().flatMap(LogStructureUtils::flatten).collect(Collectors.toList())); + return guessMappingAndCalculateFieldStats(explanation, fieldName, + fieldValues.stream().flatMap(FileStructureUtils::flatten).collect(Collectors.toList())); } - return guessScalarMapping(explanation, fieldName, fieldValues.stream().map(Object::toString).collect(Collectors.toList())); + Collection fieldValuesAsStrings = fieldValues.stream().map(Object::toString).collect(Collectors.toList()); + return new Tuple<>(guessScalarMapping(explanation, fieldName, fieldValuesAsStrings), calculateFieldStats(fieldValuesAsStrings)); } private static Stream flatten(Object value) { @@ -170,7 +187,7 @@ final class LogStructureUtils { /** * Given some sample values for a field, guess the most appropriate index mapping for the * field. - * @param explanation List of reasons for choosing the overall log structure. This list + * @param explanation List of reasons for choosing the overall file structure. This list * may be non-empty when the method is called, and this method may * append to it. * @param fieldName Name of the field for which mappings are to be guessed. @@ -220,13 +237,25 @@ final class LogStructureUtils { return Collections.singletonMap(MAPPING_TYPE_SETTING, "ip"); } - if (fieldValues.stream().anyMatch(LogStructureUtils::isMoreLikelyTextThanKeyword)) { + if (fieldValues.stream().anyMatch(FileStructureUtils::isMoreLikelyTextThanKeyword)) { return Collections.singletonMap(MAPPING_TYPE_SETTING, "text"); } return Collections.singletonMap(MAPPING_TYPE_SETTING, "keyword"); } + /** + * Calculate stats for a set of field values. + * @param fieldValues Values of the field for which field stats are to be calculated. + * @return The stats calculated from the field values. + */ + static FieldStats calculateFieldStats(Collection fieldValues) { + + FieldStatsCalculator calculator = new FieldStatsCalculator(); + calculator.accept(fieldValues); + return calculator.calculate(NUM_TOP_HITS); + } + /** * The thinking is that the longer the field value and the more spaces it contains, * the more likely it is that it should be indexed as text rather than keyword. diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/GrokPatternCreator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreator.java similarity index 92% rename from x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/GrokPatternCreator.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreator.java index 186477507ac..292d0b8e8b3 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/GrokPatternCreator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreator.java @@ -3,11 +3,12 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.grok.Grok; -import org.elasticsearch.xpack.ml.logstructurefinder.TimestampFormatFinder.TimestampMatch; +import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; +import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import java.util.ArrayList; import java.util.Arrays; @@ -119,6 +120,7 @@ public final class GrokPatternCreator { * Both this class and other classes will update it. */ private final Map mappings; + private final Map fieldStats; private final Map fieldNameCountStore = new HashMap<>(); private final StringBuilder overallGrokPatternBuilder = new StringBuilder(); @@ -128,22 +130,26 @@ public final class GrokPatternCreator { * can be appended by the methods of this class. * @param sampleMessages Sample messages that any Grok pattern found must match. * @param mappings Will be updated with mappings appropriate for the returned pattern, if non-null. + * @param fieldStats Will be updated with field stats for the fields in the returned pattern, if non-null. */ - public GrokPatternCreator(List explanation, Collection sampleMessages, Map mappings) { + public GrokPatternCreator(List explanation, Collection sampleMessages, Map mappings, + Map fieldStats) { this.explanation = explanation; this.sampleMessages = Collections.unmodifiableCollection(sampleMessages); this.mappings = mappings; + this.fieldStats = fieldStats; } /** * This method attempts to find a Grok pattern that will match all of the sample messages in their entirety. + * It will also update mappings and field stats if they are non-null. * @return A tuple of (time field name, Grok string), or null if no suitable Grok pattern was found. */ public Tuple findFullLineGrokPattern() { for (FullMatchGrokPatternCandidate candidate : FULL_MATCH_GROK_PATTERNS) { if (candidate.matchesAll(sampleMessages)) { - return candidate.processMatch(explanation, sampleMessages, mappings); + return candidate.processMatch(explanation, sampleMessages, mappings, fieldStats); } } @@ -186,7 +192,8 @@ public final class GrokPatternCreator { Collection prefaces = new ArrayList<>(); Collection epilogues = new ArrayList<>(); - String patternBuilderContent = chosenPattern.processCaptures(fieldNameCountStore, snippets, prefaces, epilogues, mappings); + String patternBuilderContent = + chosenPattern.processCaptures(fieldNameCountStore, snippets, prefaces, epilogues, mappings, fieldStats); appendBestGrokMatchForStrings(false, prefaces, ignoreKeyValueCandidateLeft, ignoreValueOnlyCandidatesLeft); overallGrokPatternBuilder.append(patternBuilderContent); appendBestGrokMatchForStrings(isLast, epilogues, ignoreKeyValueCandidateRight, ignoreValueOnlyCandidatesRight); @@ -375,11 +382,12 @@ public final class GrokPatternCreator { /** * After it has been determined that this Grok pattern candidate matches a collection of strings, * return collections of the bits that come before (prefaces) and after (epilogues) the bit - * that matches. Also update mappings with the most appropriate field name and type. + * that matches. Also update mappings with the most appropriate field name and type, and + * calculate field stats. * @return The string that needs to be incorporated into the overall Grok pattern for the line. */ String processCaptures(Map fieldNameCountStore, Collection snippets, Collection prefaces, - Collection epilogues, Map mappings); + Collection epilogues, Map mappings, Map fieldStats); } /** @@ -436,8 +444,8 @@ public final class GrokPatternCreator { */ @Override public String processCaptures(Map fieldNameCountStore, Collection snippets, Collection prefaces, - Collection epilogues, Map mappings) { - String sampleValue = null; + Collection epilogues, Map mappings, Map fieldStats) { + Collection values = new ArrayList<>(); for (String snippet : snippets) { Map captures = grok.captures(snippet); // If the pattern doesn't match then captures will be null @@ -445,22 +453,24 @@ public final class GrokPatternCreator { throw new IllegalStateException("[%{" + grokPatternName + "}] does not match snippet [" + snippet + "]"); } prefaces.add(captures.getOrDefault(PREFACE, "").toString()); - if (sampleValue == null) { - sampleValue = captures.get(VALUE).toString(); - } + values.add(captures.getOrDefault(VALUE, "").toString()); epilogues.add(captures.getOrDefault(EPILOGUE, "").toString()); } String adjustedFieldName = buildFieldName(fieldNameCountStore, fieldName); if (mappings != null) { - Map fullMappingType = Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, mappingType); + Map fullMappingType = Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, mappingType); if ("date".equals(mappingType)) { - TimestampMatch timestampMatch = TimestampFormatFinder.findFirstFullMatch(sampleValue); + assert values.isEmpty() == false; + TimestampMatch timestampMatch = TimestampFormatFinder.findFirstFullMatch(values.iterator().next()); if (timestampMatch != null) { fullMappingType = timestampMatch.getEsDateMappingTypeWithFormat(); } } mappings.put(adjustedFieldName, fullMappingType); } + if (fieldStats != null) { + fieldStats.put(adjustedFieldName, FileStructureUtils.calculateFieldStats(values)); + } return "%{" + grokPatternName + ":" + adjustedFieldName + "}"; } } @@ -505,7 +515,7 @@ public final class GrokPatternCreator { @Override public String processCaptures(Map fieldNameCountStore, Collection snippets, Collection prefaces, - Collection epilogues, Map mappings) { + Collection epilogues, Map mappings, Map fieldStats) { if (fieldName == null) { throw new IllegalStateException("Cannot process KV matches until a field name has been determined"); } @@ -524,7 +534,10 @@ public final class GrokPatternCreator { } String adjustedFieldName = buildFieldName(fieldNameCountStore, fieldName); if (mappings != null) { - mappings.put(adjustedFieldName, LogStructureUtils.guessScalarMapping(explanation, adjustedFieldName, values)); + mappings.put(adjustedFieldName, FileStructureUtils.guessScalarMapping(explanation, adjustedFieldName, values)); + } + if (fieldStats != null) { + fieldStats.put(adjustedFieldName, FileStructureUtils.calculateFieldStats(values)); } return "\\b" + fieldName + "=%{USER:" + adjustedFieldName + "}"; } @@ -541,8 +554,8 @@ public final class GrokPatternCreator { @Override public String processCaptures(Map fieldNameCountStore, Collection snippets, Collection prefaces, - Collection epilogues, Map mappings) { - return super.processCaptures(fieldNameCountStore, snippets, prefaces, epilogues, null); + Collection epilogues, Map mappings, Map fieldStats) { + return super.processCaptures(fieldNameCountStore, snippets, prefaces, epilogues, null, fieldStats); } } @@ -570,11 +583,11 @@ public final class GrokPatternCreator { * @return A tuple of (time field name, Grok string). */ public Tuple processMatch(List explanation, Collection sampleMessages, - Map mappings) { + Map mappings, Map fieldStats) { explanation.add("A full message Grok pattern [" + grokString.substring(2, grokString.length() - 1) + "] looks appropriate"); - if (mappings != null) { + if (mappings != null || fieldStats != null) { Map> valuesPerField = new HashMap<>(); for (String sampleMessage : sampleMessages) { @@ -604,8 +617,14 @@ public final class GrokPatternCreator { for (Map.Entry> valuesForField : valuesPerField.entrySet()) { String fieldName = valuesForField.getKey(); - mappings.put(fieldName, - LogStructureUtils.guessScalarMapping(explanation, fieldName, valuesForField.getValue())); + if (mappings != null) { + mappings.put(fieldName, + FileStructureUtils.guessScalarMapping(explanation, fieldName, valuesForField.getValue())); + } + if (fieldStats != null) { + fieldStats.put(fieldName, + FileStructureUtils.calculateFieldStats(valuesForField.getValue())); + } } } diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinder.java similarity index 57% rename from x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinder.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinder.java index 98e8a0213fb..a488549bc52 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinder.java @@ -3,13 +3,15 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.ml.logstructurefinder.TimestampFormatFinder.TimestampMatch; +import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; +import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; +import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import java.io.IOException; import java.util.ArrayList; @@ -25,13 +27,13 @@ import static org.elasticsearch.common.xcontent.json.JsonXContent.jsonXContent; /** * Really ND-JSON. */ -public class JsonLogStructureFinder implements LogStructureFinder { +public class JsonFileStructureFinder implements FileStructureFinder { private final List sampleMessages; - private final LogStructure structure; + private final FileStructure structure; - static JsonLogStructureFinder makeJsonLogStructureFinder(List explanation, String sample, String charsetName, - Boolean hasByteOrderMarker) throws IOException { + static JsonFileStructureFinder makeJsonFileStructureFinder(List explanation, String sample, String charsetName, + Boolean hasByteOrderMarker) throws IOException { List> sampleRecords = new ArrayList<>(); @@ -42,32 +44,39 @@ public class JsonLogStructureFinder implements LogStructureFinder { sampleRecords.add(parser.mapOrdered()); } - LogStructure.Builder structureBuilder = new LogStructure.Builder(LogStructure.Format.JSON) + FileStructure.Builder structureBuilder = new FileStructure.Builder(FileStructure.Format.JSON) .setCharset(charsetName) .setHasByteOrderMarker(hasByteOrderMarker) .setSampleStart(sampleMessages.stream().limit(2).collect(Collectors.joining("\n", "", "\n"))) .setNumLinesAnalyzed(sampleMessages.size()) .setNumMessagesAnalyzed(sampleRecords.size()); - Tuple timeField = LogStructureUtils.guessTimestampField(explanation, sampleRecords); + Tuple timeField = FileStructureUtils.guessTimestampField(explanation, sampleRecords); if (timeField != null) { structureBuilder.setTimestampField(timeField.v1()) .setTimestampFormats(timeField.v2().dateFormats) .setNeedClientTimezone(timeField.v2().hasTimezoneDependentParsing()); } - SortedMap mappings = LogStructureUtils.guessMappings(explanation, sampleRecords); - mappings.put(LogStructureUtils.DEFAULT_TIMESTAMP_FIELD, Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "date")); + Tuple, SortedMap> mappingsAndFieldStats = + FileStructureUtils.guessMappingsAndCalculateFieldStats(explanation, sampleRecords); - LogStructure structure = structureBuilder + SortedMap mappings = mappingsAndFieldStats.v1(); + mappings.put(FileStructureUtils.DEFAULT_TIMESTAMP_FIELD, Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "date")); + + if (mappingsAndFieldStats.v2() != null) { + structureBuilder.setFieldStats(mappingsAndFieldStats.v2()); + } + + FileStructure structure = structureBuilder .setMappings(mappings) .setExplanation(explanation) .build(); - return new JsonLogStructureFinder(sampleMessages, structure); + return new JsonFileStructureFinder(sampleMessages, structure); } - private JsonLogStructureFinder(List sampleMessages, LogStructure structure) { + private JsonFileStructureFinder(List sampleMessages, FileStructure structure) { this.sampleMessages = Collections.unmodifiableList(sampleMessages); this.structure = structure; } @@ -78,7 +87,7 @@ public class JsonLogStructureFinder implements LogStructureFinder { } @Override - public LogStructure getStructure() { + public FileStructure getStructure() { return structure; } } diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinderFactory.java similarity index 88% rename from x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderFactory.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinderFactory.java index c5da103eb05..02be3c1cf19 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinderFactory.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -16,7 +16,7 @@ import java.util.Locale; import static org.elasticsearch.common.xcontent.json.JsonXContent.jsonXContent; -public class JsonLogStructureFinderFactory implements LogStructureFinderFactory { +public class JsonFileStructureFinderFactory implements FileStructureFinderFactory { /** * This format matches if the sample consists of one or more JSON documents. @@ -61,9 +61,9 @@ public class JsonLogStructureFinderFactory implements LogStructureFinderFactory } @Override - public LogStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) + public FileStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) throws IOException { - return JsonLogStructureFinder.makeJsonLogStructureFinder(explanation, sample, charsetName, hasByteOrderMarker); + return JsonFileStructureFinder.makeJsonFileStructureFinder(explanation, sample, charsetName, hasByteOrderMarker); } private static class ContextPrintingStringReader extends StringReader { diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java similarity index 84% rename from x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinder.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java index 722751a4cf4..95e0a5dc69d 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java @@ -3,10 +3,12 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.xpack.ml.logstructurefinder.TimestampFormatFinder.TimestampMatch; +import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; +import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; +import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import java.util.ArrayList; import java.util.Collection; @@ -20,20 +22,20 @@ import java.util.SortedMap; import java.util.TreeMap; import java.util.regex.Pattern; -public class TextLogStructureFinder implements LogStructureFinder { +public class TextLogFileStructureFinder implements FileStructureFinder { private final List sampleMessages; - private final LogStructure structure; + private final FileStructure structure; - static TextLogStructureFinder makeTextLogStructureFinder(List explanation, String sample, String charsetName, - Boolean hasByteOrderMarker) { + static TextLogFileStructureFinder makeTextLogFileStructureFinder(List explanation, String sample, String charsetName, + Boolean hasByteOrderMarker) { String[] sampleLines = sample.split("\n"); Tuple> bestTimestamp = mostLikelyTimestamp(sampleLines); if (bestTimestamp == null) { // Is it appropriate to treat a file that is neither structured nor has // a regular pattern of timestamps as a log file? Probably not... - throw new IllegalArgumentException("Could not find a timestamp in the log sample provided"); + throw new IllegalArgumentException("Could not find a timestamp in the sample provided"); } explanation.add("Most likely timestamp format is [" + bestTimestamp.v1() + "]"); @@ -70,7 +72,7 @@ public class TextLogStructureFinder implements LogStructureFinder { } // Don't add the last message, as it might be partial and mess up subsequent pattern finding - LogStructure.Builder structureBuilder = new LogStructure.Builder(LogStructure.Format.SEMI_STRUCTURED_TEXT) + FileStructure.Builder structureBuilder = new FileStructure.Builder(FileStructure.Format.SEMI_STRUCTURED_TEXT) .setCharset(charsetName) .setHasByteOrderMarker(hasByteOrderMarker) .setSampleStart(preamble.toString()) @@ -79,13 +81,15 @@ public class TextLogStructureFinder implements LogStructureFinder { .setMultilineStartPattern(multiLineRegex); SortedMap mappings = new TreeMap<>(); - mappings.put("message", Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "text")); - mappings.put(LogStructureUtils.DEFAULT_TIMESTAMP_FIELD, Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "date")); + mappings.put("message", Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "text")); + mappings.put(FileStructureUtils.DEFAULT_TIMESTAMP_FIELD, Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "date")); + + SortedMap fieldStats = new TreeMap<>(); // We can't parse directly into @timestamp using Grok, so parse to some other time field, which the date filter will then remove String interimTimestampField; String grokPattern; - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, fieldStats); Tuple timestampFieldAndFullMatchGrokPattern = grokPatternCreator.findFullLineGrokPattern(); if (timestampFieldAndFullMatchGrokPattern != null) { interimTimestampField = timestampFieldAndFullMatchGrokPattern.v1(); @@ -95,19 +99,20 @@ public class TextLogStructureFinder implements LogStructureFinder { grokPattern = grokPatternCreator.createGrokPatternFromExamples(bestTimestamp.v1().grokPatternName, interimTimestampField); } - LogStructure structure = structureBuilder + FileStructure structure = structureBuilder .setTimestampField(interimTimestampField) .setTimestampFormats(bestTimestamp.v1().dateFormats) .setNeedClientTimezone(bestTimestamp.v1().hasTimezoneDependentParsing()) .setGrokPattern(grokPattern) .setMappings(mappings) + .setFieldStats(fieldStats) .setExplanation(explanation) .build(); - return new TextLogStructureFinder(sampleMessages, structure); + return new TextLogFileStructureFinder(sampleMessages, structure); } - private TextLogStructureFinder(List sampleMessages, LogStructure structure) { + private TextLogFileStructureFinder(List sampleMessages, FileStructure structure) { this.sampleMessages = Collections.unmodifiableList(sampleMessages); this.structure = structure; } @@ -118,7 +123,7 @@ public class TextLogStructureFinder implements LogStructureFinder { } @Override - public LogStructure getStructure() { + public FileStructure getStructure() { return structure; } diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderFactory.java similarity index 73% rename from x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderFactory.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderFactory.java index d129ba95bd8..5f737eeb9b8 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderFactory.java @@ -3,12 +3,12 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; import java.util.List; import java.util.regex.Pattern; -public class TextLogStructureFinderFactory implements LogStructureFinderFactory { +public class TextLogFileStructureFinderFactory implements FileStructureFinderFactory { // This works because, by default, dot doesn't match newlines private static final Pattern TWO_NON_BLANK_LINES_PATTERN = Pattern.compile(".\n+."); @@ -33,7 +33,7 @@ public class TextLogStructureFinderFactory implements LogStructureFinderFactory } @Override - public LogStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) { - return TextLogStructureFinder.makeTextLogStructureFinder(explanation, sample, charsetName, hasByteOrderMarker); + public FileStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) { + return TextLogFileStructureFinder.makeTextLogFileStructureFinder(explanation, sample, charsetName, hasByteOrderMarker); } } diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/TimestampFormatFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java similarity index 98% rename from x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/TimestampFormatFinder.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java index 30c94378f9e..81e490878a0 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/TimestampFormatFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.grok.Grok; @@ -334,10 +334,10 @@ public final class TimestampFormatFinder { public Map getEsDateMappingTypeWithFormat() { if (dateFormats.contains("TAI64N")) { // There's no format for TAI64N in the date formats used in mappings - return Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "keyword"); + return Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"); } Map mapping = new LinkedHashMap<>(); - mapping.put(LogStructureUtils.MAPPING_TYPE_SETTING, "date"); + mapping.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); String formats = dateFormats.stream().flatMap(format -> { switch (format) { case "ISO8601": @@ -351,7 +351,7 @@ public final class TimestampFormatFinder { } }).collect(Collectors.joining("||")); if (formats.isEmpty() == false) { - mapping.put(LogStructureUtils.MAPPING_FORMAT_SETTING, formats); + mapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, formats); } return mapping; } diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinder.java similarity index 75% rename from x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinder.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinder.java index d664a9ccb82..570f36f59c0 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinder.java @@ -3,10 +3,12 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.xpack.ml.logstructurefinder.TimestampFormatFinder.TimestampMatch; +import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; +import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; +import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import org.w3c.dom.Document; import org.w3c.dom.NamedNodeMap; import org.w3c.dom.Node; @@ -30,13 +32,13 @@ import java.util.SortedMap; import java.util.TreeMap; import java.util.regex.Pattern; -public class XmlLogStructureFinder implements LogStructureFinder { +public class XmlFileStructureFinder implements FileStructureFinder { private final List sampleMessages; - private final LogStructure structure; + private final FileStructure structure; - static XmlLogStructureFinder makeXmlLogStructureFinder(List explanation, String sample, String charsetName, - Boolean hasByteOrderMarker) + static XmlFileStructureFinder makeXmlFileStructureFinder(List explanation, String sample, String charsetName, + Boolean hasByteOrderMarker) throws IOException, ParserConfigurationException, SAXException { String messagePrefix; @@ -80,7 +82,7 @@ public class XmlLogStructureFinder implements LogStructureFinder { assert messagePrefix.charAt(0) == '<'; String topLevelTag = messagePrefix.substring(1); - LogStructure.Builder structureBuilder = new LogStructure.Builder(LogStructure.Format.XML) + FileStructure.Builder structureBuilder = new FileStructure.Builder(FileStructure.Format.XML) .setCharset(charsetName) .setHasByteOrderMarker(hasByteOrderMarker) .setSampleStart(preamble.toString()) @@ -88,31 +90,38 @@ public class XmlLogStructureFinder implements LogStructureFinder { .setNumMessagesAnalyzed(sampleRecords.size()) .setMultilineStartPattern("^\\s*<" + topLevelTag); - Tuple timeField = LogStructureUtils.guessTimestampField(explanation, sampleRecords); + Tuple timeField = FileStructureUtils.guessTimestampField(explanation, sampleRecords); if (timeField != null) { structureBuilder.setTimestampField(timeField.v1()) .setTimestampFormats(timeField.v2().dateFormats) .setNeedClientTimezone(timeField.v2().hasTimezoneDependentParsing()); } - SortedMap innerMappings = LogStructureUtils.guessMappings(explanation, sampleRecords); + Tuple, SortedMap> mappingsAndFieldStats = + FileStructureUtils.guessMappingsAndCalculateFieldStats(explanation, sampleRecords); + + if (mappingsAndFieldStats.v2() != null) { + structureBuilder.setFieldStats(mappingsAndFieldStats.v2()); + } + + SortedMap innerMappings = mappingsAndFieldStats.v1(); Map secondLevelProperties = new LinkedHashMap<>(); - secondLevelProperties.put(LogStructureUtils.MAPPING_TYPE_SETTING, "object"); - secondLevelProperties.put(LogStructureUtils.MAPPING_PROPERTIES_SETTING, innerMappings); + secondLevelProperties.put(FileStructureUtils.MAPPING_TYPE_SETTING, "object"); + secondLevelProperties.put(FileStructureUtils.MAPPING_PROPERTIES_SETTING, innerMappings); SortedMap outerMappings = new TreeMap<>(); outerMappings.put(topLevelTag, secondLevelProperties); - outerMappings.put(LogStructureUtils.DEFAULT_TIMESTAMP_FIELD, - Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "date")); + outerMappings.put(FileStructureUtils.DEFAULT_TIMESTAMP_FIELD, + Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "date")); - LogStructure structure = structureBuilder + FileStructure structure = structureBuilder .setMappings(outerMappings) .setExplanation(explanation) .build(); - return new XmlLogStructureFinder(sampleMessages, structure); + return new XmlFileStructureFinder(sampleMessages, structure); } - private XmlLogStructureFinder(List sampleMessages, LogStructure structure) { + private XmlFileStructureFinder(List sampleMessages, FileStructure structure) { this.sampleMessages = Collections.unmodifiableList(sampleMessages); this.structure = structure; } @@ -123,7 +132,7 @@ public class XmlLogStructureFinder implements LogStructureFinder { } @Override - public LogStructure getStructure() { + public FileStructure getStructure() { return structure; } diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderFactory.java similarity index 92% rename from x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderFactory.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderFactory.java index c7577ff07de..f8536d14375 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderFactory.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; import org.xml.sax.SAXException; @@ -17,11 +17,11 @@ import java.io.Reader; import java.io.StringReader; import java.util.List; -public class XmlLogStructureFinderFactory implements LogStructureFinderFactory { +public class XmlFileStructureFinderFactory implements FileStructureFinderFactory { private final XMLInputFactory xmlFactory; - public XmlLogStructureFinderFactory() { + public XmlFileStructureFinderFactory() { xmlFactory = XMLInputFactory.newInstance(); xmlFactory.setProperty(XMLInputFactory.IS_NAMESPACE_AWARE, Boolean.FALSE); xmlFactory.setProperty(XMLInputFactory.IS_VALIDATING, Boolean.FALSE); @@ -115,8 +115,8 @@ public class XmlLogStructureFinderFactory implements LogStructureFinderFactory { } @Override - public LogStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) + public FileStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) throws IOException, ParserConfigurationException, SAXException { - return XmlLogStructureFinder.makeXmlLogStructureFinder(explanation, sample, charsetName, hasByteOrderMarker); + return XmlFileStructureFinder.makeXmlFileStructureFinder(explanation, sample, charsetName, hasByteOrderMarker); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreator.java index 04280261b26..a0e00ebf733 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreator.java @@ -25,14 +25,15 @@ import java.util.regex.Pattern; */ public final class GrokPatternCreator { - private static String PREFACE = "preface"; - private static String EPILOGUE = "epilogue"; + private static final String PREFACE = "preface"; + private static final String EPILOGUE = "epilogue"; /** * The first match in this list will be chosen, so it needs to be ordered * such that more generic patterns come after more specific patterns. */ private static final List ORDERED_CANDIDATE_GROK_PATTERNS = Arrays.asList( + new GrokPatternCandidate("TOMCAT_DATESTAMP", "timestamp"), new GrokPatternCandidate("TIMESTAMP_ISO8601", "timestamp"), new GrokPatternCandidate("DATESTAMP_RFC822", "timestamp"), new GrokPatternCandidate("DATESTAMP_RFC2822", "timestamp"), @@ -41,7 +42,6 @@ public final class GrokPatternCreator { new GrokPatternCandidate("SYSLOGTIMESTAMP", "timestamp"), new GrokPatternCandidate("HTTPDATE", "timestamp"), new GrokPatternCandidate("CATALINA_DATESTAMP", "timestamp"), - new GrokPatternCandidate("TOMCAT_DATESTAMP", "timestamp"), new GrokPatternCandidate("CISCOTIMESTAMP", "timestamp"), new GrokPatternCandidate("DATE", "date"), new GrokPatternCandidate("TIME", "time"), @@ -56,12 +56,10 @@ public final class GrokPatternCreator { new GrokPatternCandidate("IP", "ipaddress"), // This already includes pre/post break conditions new GrokPatternCandidate("QUOTEDSTRING", "field", "", ""), - // Can't use \b as the break before, because it doesn't work for negative numbers (the - // minus sign is not a "word" character) - new GrokPatternCandidate("NUMBER", "field", "(? forecastsToDelete) { - SearchRequest searchRequest = new SearchRequest(); - // We need to create the DeleteByQueryRequest before we modify the SearchRequest - // because the constructor of the former wipes the latter - DeleteByQueryRequest request = new DeleteByQueryRequest(searchRequest); + DeleteByQueryRequest request = new DeleteByQueryRequest(); request.setSlices(5); - searchRequest.indices(RESULTS_INDEX_PATTERN); + request.indices(RESULTS_INDEX_PATTERN); BoolQueryBuilder boolQuery = QueryBuilders.boolQuery().minimumShouldMatch(1); boolQuery.must(QueryBuilders.termsQuery(Result.RESULT_TYPE.getPreferredName(), ForecastRequestStats.RESULT_TYPE_VALUE, Forecast.RESULT_TYPE_VALUE)); @@ -157,7 +154,7 @@ public class ExpiredForecastsRemover implements MlDataRemover { .must(QueryBuilders.termQuery(Forecast.FORECAST_ID.getPreferredName(), forecastToDelete.getForecastId()))); } QueryBuilder query = QueryBuilders.boolQuery().filter(boolQuery); - searchRequest.source(new SearchSourceBuilder().query(query)); + request.setQuery(query); return request; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java index f59fdddedec..c882c901168 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.ml.job.retention; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.logging.Loggers; @@ -17,7 +16,6 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.DeleteByQueryAction; import org.elasticsearch.index.reindex.DeleteByQueryRequest; -import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; @@ -87,19 +85,16 @@ public class ExpiredResultsRemover extends AbstractExpiredJobDataRemover { } private DeleteByQueryRequest createDBQRequest(Job job, long cutoffEpochMs) { - SearchRequest searchRequest = new SearchRequest(); - // We need to create the DeleteByQueryRequest before we modify the SearchRequest - // because the constructor of the former wipes the latter - DeleteByQueryRequest request = new DeleteByQueryRequest(searchRequest); + DeleteByQueryRequest request = new DeleteByQueryRequest(); request.setSlices(5); - searchRequest.indices(AnomalyDetectorsIndex.jobResultsAliasedName(job.getId())); + request.indices(AnomalyDetectorsIndex.jobResultsAliasedName(job.getId())); QueryBuilder excludeFilter = QueryBuilders.termsQuery(Result.RESULT_TYPE.getPreferredName(), ModelSizeStats.RESULT_TYPE_VALUE, ForecastRequestStats.RESULT_TYPE_VALUE, Forecast.RESULT_TYPE_VALUE); QueryBuilder query = createQuery(job.getId(), cutoffEpochMs) .filter(QueryBuilders.existsQuery(Result.RESULT_TYPE.getPreferredName())) .mustNot(excludeFilter); - searchRequest.source(new SearchSourceBuilder().query(query)); + request.setQuery(query); return request; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestFindFileStructureAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestFindFileStructureAction.java new file mode 100644 index 00000000000..83293c7d60e --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestFindFileStructureAction.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.rest; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.ml.action.FindFileStructureAction; +import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; +import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.filestructurefinder.FileStructureFinderManager; + +import java.io.IOException; +import java.util.Collections; +import java.util.Set; + +public class RestFindFileStructureAction extends BaseRestHandler { + + public RestFindFileStructureAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST, MachineLearning.BASE_PATH + "find_file_structure", this); + } + + @Override + public String getName() { + return "xpack_ml_find_file_structure_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + + FindFileStructureAction.Request request = new FindFileStructureAction.Request(); + request.setLinesToSample(restRequest.paramAsInt(FindFileStructureAction.Request.LINES_TO_SAMPLE.getPreferredName(), + FileStructureFinderManager.DEFAULT_IDEAL_SAMPLE_LINE_COUNT)); + if (restRequest.hasContent()) { + request.setSample(restRequest.content()); + } else { + throw new ElasticsearchParseException("request body is required"); + } + + return channel -> client.execute(FindFileStructureAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } + + @Override + protected Set responseParams() { + return Collections.singleton(FileStructure.EXPLAIN); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestCloseJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestCloseJobAction.java index fc0638048ce..ae6257d5385 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestCloseJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestCloseJobAction.java @@ -12,10 +12,10 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.CloseJobAction.Request; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.ml.MachineLearning; import java.io.IOException; @@ -34,16 +34,21 @@ public class RestCloseJobAction extends BaseRestHandler { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - Request request = new Request(restRequest.param(Job.ID.getPreferredName())); - if (restRequest.hasParam(Request.TIMEOUT.getPreferredName())) { - request.setCloseTimeout(TimeValue.parseTimeValue( + Request request; + if (restRequest.hasContentOrSourceParam()) { + request = Request.parseRequest(restRequest.param(Job.ID.getPreferredName()), restRequest.contentParser()); + } else { + request = new Request(restRequest.param(Job.ID.getPreferredName())); + if (restRequest.hasParam(Request.TIMEOUT.getPreferredName())) { + request.setCloseTimeout(TimeValue.parseTimeValue( restRequest.param(Request.TIMEOUT.getPreferredName()), Request.TIMEOUT.getPreferredName())); - } - if (restRequest.hasParam(Request.FORCE.getPreferredName())) { - request.setForce(restRequest.paramAsBoolean(Request.FORCE.getPreferredName(), request.isForce())); - } - if (restRequest.hasParam(Request.ALLOW_NO_JOBS.getPreferredName())) { - request.setAllowNoJobs(restRequest.paramAsBoolean(Request.ALLOW_NO_JOBS.getPreferredName(), request.allowNoJobs())); + } + if (restRequest.hasParam(Request.FORCE.getPreferredName())) { + request.setForce(restRequest.paramAsBoolean(Request.FORCE.getPreferredName(), request.isForce())); + } + if (restRequest.hasParam(Request.ALLOW_NO_JOBS.getPreferredName())) { + request.setAllowNoJobs(restRequest.paramAsBoolean(Request.ALLOW_NO_JOBS.getPreferredName(), request.allowNoJobs())); + } } return channel -> client.execute(CloseJobAction.INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestDeleteForecastAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestDeleteForecastAction.java new file mode 100644 index 00000000000..e42a73204eb --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestDeleteForecastAction.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.rest.job; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.ml.action.DeleteForecastAction; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.results.Forecast; +import org.elasticsearch.xpack.ml.MachineLearning; + +import java.io.IOException; + +public class RestDeleteForecastAction extends BaseRestHandler { + + public RestDeleteForecastAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.DELETE, + MachineLearning.BASE_PATH + + "anomaly_detectors/{" + Job.ID.getPreferredName() + + "}/_forecast/{" + Forecast.FORECAST_ID.getPreferredName() + "}", + this); + } + + @Override + public String getName() { + return "xpack_ml_delete_forecast_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String jobId = restRequest.param(Job.ID.getPreferredName()); + String forecastId = restRequest.param(Forecast.FORECAST_ID.getPreferredName(), MetaData.ALL); + final DeleteForecastAction.Request request = new DeleteForecastAction.Request(jobId, forecastId); + request.timeout(restRequest.paramAsTime("timeout", request.timeout())); + request.setAllowNoForecasts(restRequest.paramAsBoolean("allow_no_forecasts", request.isAllowNoForecasts())); + return channel -> client.execute(DeleteForecastAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/results/RestGetRecordsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/results/RestGetRecordsAction.java index 4c5fc6c3346..c7e571f2cdc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/results/RestGetRecordsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/results/RestGetRecordsAction.java @@ -12,11 +12,10 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.core.ml.action.GetRecordsAction; import org.elasticsearch.xpack.core.ml.action.util.PageParams; import org.elasticsearch.xpack.core.ml.job.config.Job; -import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; +import org.elasticsearch.xpack.ml.MachineLearning; import java.io.IOException; @@ -54,8 +53,7 @@ public class RestGetRecordsAction extends BaseRestHandler { request.setRecordScore( Double.parseDouble(restRequest.param(GetRecordsAction.Request.RECORD_SCORE_FILTER.getPreferredName(), String.valueOf(request.getRecordScoreFilter())))); - request.setSort(restRequest.param(GetRecordsAction.Request.SORT.getPreferredName(), - AnomalyRecord.RECORD_SCORE.getPreferredName())); + request.setSort(restRequest.param(GetRecordsAction.Request.SORT.getPreferredName(), request.getSort())); request.setDescending(restRequest.paramAsBoolean(GetRecordsAction.Request.DESCENDING.getPreferredName(), request.isDescending())); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java index 02bfb1b326f..58b60273b0e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.ml.MlMetaIndex; @@ -66,6 +67,7 @@ import java.util.function.Function; import static org.elasticsearch.xpack.core.ml.job.config.JobTests.buildJobBuilder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -423,33 +425,6 @@ public class TransportOpenJobActionTests extends ESTestCase { assertNull(result.getExecutorNode()); } - public void testSelectLeastLoadedMlNode_noNodesPriorTo_V_5_5() { - Map nodeAttr = new HashMap<>(); - nodeAttr.put(MachineLearning.ML_ENABLED_NODE_ATTR, "true"); - DiscoveryNodes nodes = DiscoveryNodes.builder() - .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), - nodeAttr, Collections.emptySet(), Version.V_5_4_0)) - .add(new DiscoveryNode("_node_name2", "_node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), - nodeAttr, Collections.emptySet(), Version.V_5_4_0)) - .build(); - - PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); - addJobTask("incompatible_type_job", "_node_id1", null, tasksBuilder); - PersistentTasksCustomMetaData tasks = tasksBuilder.build(); - - ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); - MetaData.Builder metaData = MetaData.builder(); - RoutingTable.Builder routingTable = RoutingTable.builder(); - addJobAndIndices(metaData, routingTable, "incompatible_type_job"); - cs.nodes(nodes); - metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks); - cs.metaData(metaData); - cs.routingTable(routingTable.build()); - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("incompatible_type_job", cs.build(), 2, 10, 30, logger); - assertThat(result.getExplanation(), containsString("because this node does not support jobs of version [" + Version.CURRENT + "]")); - assertNull(result.getExecutorNode()); - } - public void testSelectLeastLoadedMlNode_noNodesMatchingModelSnapshotMinVersion() { Map nodeAttr = new HashMap<>(); nodeAttr.put(MachineLearning.ML_ENABLED_NODE_ATTR, "true"); @@ -559,7 +534,7 @@ public class TransportOpenJobActionTests extends ESTestCase { } else { Index index = new Index(indexToRemove, "_uuid"); ShardId shardId = new ShardId(index, 0); - ShardRouting shardRouting = ShardRouting.newUnassigned(shardId, true, RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE, + ShardRouting shardRouting = ShardRouting.newUnassigned(shardId, true, RecoverySource.EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")); shardRouting = shardRouting.initialize("node_id", null, 0L); routingTable.add(IndexRoutingTable.builder(index) @@ -606,12 +581,6 @@ public class TransportOpenJobActionTests extends ESTestCase { assertArrayEquals(indices, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, Version.CURRENT, logger)); } - public void testMappingRequiresUpdateOldMappingVersion() throws IOException { - ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_54", Version.V_5_4_0.toString())); - String[] indices = new String[] { "version_54" }; - assertArrayEquals(indices, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, Version.CURRENT, logger)); - } - public void testMappingRequiresUpdateBogusMappingVersion() throws IOException { ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_bogus", "0.0")); String[] indices = new String[] { "version_bogus" }; @@ -632,21 +601,6 @@ public class TransportOpenJobActionTests extends ESTestCase { TransportOpenJobAction.mappingRequiresUpdate(cs, indices, VersionUtils.getPreviousMinorVersion(), logger)); } - public void testMappingRequiresUpdateSomeVersionMix() throws IOException { - Map versionMix = new HashMap<>(); - versionMix.put("version_54", Version.V_5_4_0); - versionMix.put("version_current", Version.CURRENT); - versionMix.put("version_null", null); - versionMix.put("version_current2", Version.CURRENT); - versionMix.put("version_bogus", "0.0.0"); - versionMix.put("version_current3", Version.CURRENT); - versionMix.put("version_bogus2", "0.0.0"); - - ClusterState cs = getClusterStateWithMappingsWithMetaData(versionMix); - String[] indices = new String[] { "version_54", "version_null", "version_bogus", "version_bogus2" }; - assertArrayEquals(indices, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, Version.CURRENT, logger)); - } - public void testNodeNameAndVersion() { TransportAddress ta = new TransportAddress(InetAddress.getLoopbackAddress(), 9300); Map attributes = new HashMap<>(); @@ -674,6 +628,24 @@ public class TransportOpenJobActionTests extends ESTestCase { assertEquals("{_node_name1}{ml.machine_memory=5}{node.ml=true}", TransportOpenJobAction.nodeNameAndMlAttributes(node)); } + public void testJobTaskMatcherMatch() { + Task nonJobTask1 = mock(Task.class); + Task nonJobTask2 = mock(Task.class); + TransportOpenJobAction.JobTask jobTask1 = new TransportOpenJobAction.JobTask("ml-1", + 0, "persistent", "", null, null); + TransportOpenJobAction.JobTask jobTask2 = new TransportOpenJobAction.JobTask("ml-2", + 1, "persistent", "", null, null); + + assertThat(OpenJobAction.JobTaskMatcher.match(nonJobTask1, "_all"), is(false)); + assertThat(OpenJobAction.JobTaskMatcher.match(nonJobTask2, "_all"), is(false)); + assertThat(OpenJobAction.JobTaskMatcher.match(jobTask1, "_all"), is(true)); + assertThat(OpenJobAction.JobTaskMatcher.match(jobTask2, "_all"), is(true)); + assertThat(OpenJobAction.JobTaskMatcher.match(jobTask1, "ml-1"), is(true)); + assertThat(OpenJobAction.JobTaskMatcher.match(jobTask2, "ml-1"), is(false)); + assertThat(OpenJobAction.JobTaskMatcher.match(jobTask1, "ml-2"), is(false)); + assertThat(OpenJobAction.JobTaskMatcher.match(jobTask2, "ml-2"), is(true)); + } + public static void addJobTask(String jobId, String nodeId, JobState jobState, PersistentTasksCustomMetaData.Builder builder) { builder.addTask(MlTasks.jobTaskId(jobId), OpenJobAction.TASK_NAME, new OpenJobAction.JobParams(jobId), new Assignment(nodeId, "test assignment")); @@ -704,7 +676,7 @@ public class TransportOpenJobActionTests extends ESTestCase { metaData.put(indexMetaData); Index index = new Index(indexName, "_uuid"); ShardId shardId = new ShardId(index, 0); - ShardRouting shardRouting = ShardRouting.newUnassigned(shardId, true, RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE, + ShardRouting shardRouting = ShardRouting.newUnassigned(shardId, true, RecoverySource.EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")); shardRouting = shardRouting.initialize("node_id", null, 0L); shardRouting = shardRouting.moveToStarted(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java index ab3fe083d5f..50a016f6e5e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.action.PreviewDatafeedAction; import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig; @@ -122,4 +122,4 @@ public class TransportPreviewDatafeedActionTests extends ESTestCase { assertThat(capturedFailure.getMessage(), equalTo("failed")); verify(dataExtractor).cancel(); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java index 72c8d361dd8..610a5c1b92f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java @@ -3,10 +3,12 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.ml.action; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.MlMetadata; @@ -14,7 +16,6 @@ import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.xpack.ml.datafeed.DatafeedManager; import org.elasticsearch.xpack.ml.datafeed.DatafeedManagerTests; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobValidatorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobValidatorTests.java index 180727e88f2..35fd9bb98ab 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobValidatorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobValidatorTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedJobValidator; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java index 3a6082c6cf0..4b8ad1d08ae 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java @@ -349,7 +349,7 @@ public class DatafeedNodeSelectorTests extends ESTestCase { true, ShardRoutingState.RELOCATING); } else { shardRouting = ShardRouting.newUnassigned(shardId, true, - RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE, + RecoverySource.EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedStateTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedStateTests.java index 8b3e68b1e57..32699f60cbd 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedStateTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedStateTests.java @@ -5,19 +5,8 @@ */ package org.elasticsearch.xpack.ml.datafeed; -import org.elasticsearch.Version; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; -import org.mockito.ArgumentCaptor; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; - -import java.io.IOException; - -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class DatafeedStateTests extends ESTestCase { @@ -37,35 +26,4 @@ public class DatafeedStateTests extends ESTestCase { assertEquals(2, DatafeedState.STARTING.ordinal()); assertEquals(3, DatafeedState.STOPPING.ordinal()); } - - @SuppressWarnings("unchecked") - public void testStreaming_v54BackwardsCompatibility() throws IOException { - StreamOutput out = mock(StreamOutput.class); - when(out.getVersion()).thenReturn(Version.V_5_4_0); - ArgumentCaptor enumCaptor = ArgumentCaptor.forClass(Enum.class); - - doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock invocationOnMock) { - return null; - } - }).when(out).writeEnum(enumCaptor.capture()); - - // STARTING & STOPPING states were introduced in v5.5. - // Pre v5.5 STARTING translated as STOPPED - DatafeedState.STARTING.writeTo(out); - assertEquals(DatafeedState.STOPPED, enumCaptor.getValue()); - - // Pre v5.5 STOPPING means the datafeed is STARTED - DatafeedState.STOPPING.writeTo(out); - assertEquals(DatafeedState.STARTED, enumCaptor.getValue()); - - // POST 5.5 enums a written as is - when(out.getVersion()).thenReturn(Version.V_5_5_0); - - DatafeedState.STARTING.writeTo(out); - assertEquals(DatafeedState.STARTING, enumCaptor.getValue()); - DatafeedState.STOPPING.writeTo(out); - assertEquals(DatafeedState.STOPPING, enumCaptor.getValue()); - } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseCheckerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseCheckerTests.java deleted file mode 100644 index 81e4c75cfad..00000000000 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseCheckerTests.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.ml.datafeed; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.protocol.xpack.XPackInfoResponse; -import org.elasticsearch.protocol.xpack.license.LicenseStatus; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.action.XPackInfoAction; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; - -import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.is; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyString; -import static org.mockito.Matchers.same; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class MlRemoteLicenseCheckerTests extends ESTestCase { - - public void testIsRemoteIndex() { - List indices = Arrays.asList("local-index1", "local-index2"); - assertFalse(MlRemoteLicenseChecker.containsRemoteIndex(indices)); - indices = Arrays.asList("local-index1", "remote-cluster:remote-index2"); - assertTrue(MlRemoteLicenseChecker.containsRemoteIndex(indices)); - } - - public void testRemoteIndices() { - List indices = Collections.singletonList("local-index"); - assertThat(MlRemoteLicenseChecker.remoteIndices(indices), is(empty())); - indices = Arrays.asList("local-index", "remote-cluster:index1", "local-index2", "remote-cluster2:index1"); - assertThat(MlRemoteLicenseChecker.remoteIndices(indices), containsInAnyOrder("remote-cluster:index1", "remote-cluster2:index1")); - } - - public void testRemoteClusterNames() { - List indices = Arrays.asList("local-index1", "local-index2"); - assertThat(MlRemoteLicenseChecker.remoteClusterNames(indices), empty()); - indices = Arrays.asList("local-index1", "remote-cluster1:remote-index2"); - assertThat(MlRemoteLicenseChecker.remoteClusterNames(indices), contains("remote-cluster1")); - indices = Arrays.asList("remote-cluster1:index2", "index1", "remote-cluster2:index1"); - assertThat(MlRemoteLicenseChecker.remoteClusterNames(indices), contains("remote-cluster1", "remote-cluster2")); - indices = Arrays.asList("remote-cluster1:index2", "index1", "remote-cluster2:index1", "remote-cluster2:index2"); - assertThat(MlRemoteLicenseChecker.remoteClusterNames(indices), contains("remote-cluster1", "remote-cluster2")); - } - - public void testLicenseSupportsML() { - XPackInfoResponse.LicenseInfo licenseInfo = new XPackInfoResponse.LicenseInfo("uid", "trial", "trial", - LicenseStatus.ACTIVE, randomNonNegativeLong()); - assertTrue(MlRemoteLicenseChecker.licenseSupportsML(licenseInfo)); - - licenseInfo = new XPackInfoResponse.LicenseInfo("uid", "trial", "trial", LicenseStatus.EXPIRED, randomNonNegativeLong()); - assertFalse(MlRemoteLicenseChecker.licenseSupportsML(licenseInfo)); - - licenseInfo = new XPackInfoResponse.LicenseInfo("uid", "GOLD", "GOLD", LicenseStatus.ACTIVE, randomNonNegativeLong()); - assertFalse(MlRemoteLicenseChecker.licenseSupportsML(licenseInfo)); - - licenseInfo = new XPackInfoResponse.LicenseInfo("uid", "PLATINUM", "PLATINUM", LicenseStatus.ACTIVE, randomNonNegativeLong()); - assertTrue(MlRemoteLicenseChecker.licenseSupportsML(licenseInfo)); - } - - public void testCheckRemoteClusterLicenses_givenValidLicenses() { - final AtomicInteger index = new AtomicInteger(0); - final List responses = new ArrayList<>(); - - Client client = createMockClient(); - doAnswer(invocationMock -> { - @SuppressWarnings("raw_types") - ActionListener listener = (ActionListener) invocationMock.getArguments()[2]; - listener.onResponse(responses.get(index.getAndIncrement())); - return null; - }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); - - - List remoteClusterNames = Arrays.asList("valid1", "valid2", "valid3"); - responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); - responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); - responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); - - MlRemoteLicenseChecker licenseChecker = new MlRemoteLicenseChecker(client); - AtomicReference licCheckResponse = new AtomicReference<>(); - - licenseChecker.checkRemoteClusterLicenses(remoteClusterNames, - new ActionListener() { - @Override - public void onResponse(MlRemoteLicenseChecker.LicenseViolation response) { - licCheckResponse.set(response); - } - - @Override - public void onFailure(Exception e) { - fail(e.getMessage()); - } - }); - - verify(client, times(3)).execute(same(XPackInfoAction.INSTANCE), any(), any()); - assertNotNull(licCheckResponse.get()); - assertFalse(licCheckResponse.get().isViolated()); - assertNull(licCheckResponse.get().get()); - } - - public void testCheckRemoteClusterLicenses_givenInvalidLicense() { - final AtomicInteger index = new AtomicInteger(0); - List remoteClusterNames = Arrays.asList("good", "cluster-with-basic-license", "good2"); - final List responses = new ArrayList<>(); - responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); - responses.add(new XPackInfoResponse(null, createBasicLicenseResponse(), null)); - responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); - - Client client = createMockClient(); - doAnswer(invocationMock -> { - @SuppressWarnings("raw_types") - ActionListener listener = (ActionListener) invocationMock.getArguments()[2]; - listener.onResponse(responses.get(index.getAndIncrement())); - return null; - }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); - - MlRemoteLicenseChecker licenseChecker = new MlRemoteLicenseChecker(client); - AtomicReference licCheckResponse = new AtomicReference<>(); - - licenseChecker.checkRemoteClusterLicenses(remoteClusterNames, - new ActionListener() { - @Override - public void onResponse(MlRemoteLicenseChecker.LicenseViolation response) { - licCheckResponse.set(response); - } - - @Override - public void onFailure(Exception e) { - fail(e.getMessage()); - } - }); - - verify(client, times(2)).execute(same(XPackInfoAction.INSTANCE), any(), any()); - assertNotNull(licCheckResponse.get()); - assertTrue(licCheckResponse.get().isViolated()); - assertEquals("cluster-with-basic-license", licCheckResponse.get().get().getClusterName()); - assertEquals("BASIC", licCheckResponse.get().get().getLicenseInfo().getType()); - } - - public void testBuildErrorMessage() { - XPackInfoResponse.LicenseInfo platinumLicence = createPlatinumLicenseResponse(); - MlRemoteLicenseChecker.RemoteClusterLicenseInfo info = - new MlRemoteLicenseChecker.RemoteClusterLicenseInfo("platinum-cluster", platinumLicence); - assertEquals(Strings.toString(platinumLicence), MlRemoteLicenseChecker.buildErrorMessage(info)); - - XPackInfoResponse.LicenseInfo basicLicense = createBasicLicenseResponse(); - info = new MlRemoteLicenseChecker.RemoteClusterLicenseInfo("basic-cluster", basicLicense); - String expected = "The license mode [BASIC] on cluster [basic-cluster] does not enable Machine Learning. " - + Strings.toString(basicLicense); - assertEquals(expected, MlRemoteLicenseChecker.buildErrorMessage(info)); - - XPackInfoResponse.LicenseInfo expiredLicense = createExpiredLicenseResponse(); - info = new MlRemoteLicenseChecker.RemoteClusterLicenseInfo("expired-cluster", expiredLicense); - expected = "The license on cluster [expired-cluster] is not active. " + Strings.toString(expiredLicense); - assertEquals(expected, MlRemoteLicenseChecker.buildErrorMessage(info)); - } - - private Client createMockClient() { - Client client = mock(Client.class); - ThreadPool threadPool = mock(ThreadPool.class); - when(client.threadPool()).thenReturn(threadPool); - when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); - when(client.getRemoteClusterClient(anyString())).thenReturn(client); - return client; - } - - private XPackInfoResponse.LicenseInfo createPlatinumLicenseResponse() { - return new XPackInfoResponse.LicenseInfo("uid", "PLATINUM", "PLATINUM", LicenseStatus.ACTIVE, randomNonNegativeLong()); - } - - private XPackInfoResponse.LicenseInfo createBasicLicenseResponse() { - return new XPackInfoResponse.LicenseInfo("uid", "BASIC", "BASIC", LicenseStatus.ACTIVE, randomNonNegativeLong()); - } - - private XPackInfoResponse.LicenseInfo createExpiredLicenseResponse() { - return new XPackInfoResponse.LicenseInfo("uid", "PLATINUM", "PLATINUM", LicenseStatus.EXPIRED, randomNonNegativeLong()); - } -} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java index 52e38a70abd..11ff693bad7 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig; @@ -173,4 +173,4 @@ public class DataExtractorFactoryTests extends ESTestCase { fieldCapsMap.put(type, fieldCaps); when(fieldsCapabilities.getField(field)).thenReturn(fieldCapsMap); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationTestUtils.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationTestUtils.java index 16b62cc23de..47d2eb828c6 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationTestUtils.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationTestUtils.java @@ -11,9 +11,9 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; -import org.elasticsearch.search.aggregations.metrics.max.Max; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; +import org.elasticsearch.search.aggregations.metrics.Max; +import org.elasticsearch.search.aggregations.metrics.Percentile; +import org.elasticsearch.search.aggregations.metrics.Percentiles; import java.util.ArrayList; import java.util.Collections; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessorTests.java index ffadcfab43c..bf283b5be51 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessorTests.java @@ -11,7 +11,7 @@ import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.max.Max; +import org.elasticsearch.search.aggregations.metrics.Max; import org.elasticsearch.test.ESTestCase; import java.io.ByteArrayOutputStream; @@ -457,4 +457,4 @@ public class AggregationToJsonProcessorTests extends ESTestCase { keyValuePairsWritten = processor.getKeyValueCount(); return outputStream.toString(StandardCharsets.UTF_8.name()); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java index 18c35155b6f..e85b1e3a6df 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java @@ -18,8 +18,8 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.Aggregations; -import org.elasticsearch.search.aggregations.metrics.max.Max; -import org.elasticsearch.search.aggregations.metrics.min.Min; +import org.elasticsearch.search.aggregations.metrics.Max; +import org.elasticsearch.search.aggregations.metrics.Min; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderFactoryTests.java new file mode 100644 index 00000000000..6bcb827be94 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderFactoryTests.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.filestructurefinder; + +public class DelimitedFileStructureFinderFactoryTests extends FileStructureTestCase { + + private FileStructureFinderFactory csvFactory = new DelimitedFileStructureFinderFactory(',', 2, false); + private FileStructureFinderFactory tsvFactory = new DelimitedFileStructureFinderFactory('\t', 2, false); + private FileStructureFinderFactory semiColonDelimitedfactory = new DelimitedFileStructureFinderFactory(';', 4, false); + private FileStructureFinderFactory pipeDelimitedFactory = new DelimitedFileStructureFinderFactory('|', 5, true); + + // CSV - no need to check JSON or XML because they come earlier in the order we check formats + + public void testCanCreateCsvFromSampleGivenCsv() { + + assertTrue(csvFactory.canCreateFromSample(explanation, CSV_SAMPLE)); + } + + public void testCanCreateCsvFromSampleGivenTsv() { + + assertFalse(csvFactory.canCreateFromSample(explanation, TSV_SAMPLE)); + } + + public void testCanCreateCsvFromSampleGivenSemiColonDelimited() { + + assertFalse(csvFactory.canCreateFromSample(explanation, SEMI_COLON_DELIMITED_SAMPLE)); + } + + public void testCanCreateCsvFromSampleGivenPipeDelimited() { + + assertFalse(csvFactory.canCreateFromSample(explanation, PIPE_DELIMITED_SAMPLE)); + } + + public void testCanCreateCsvFromSampleGivenText() { + + assertFalse(csvFactory.canCreateFromSample(explanation, TEXT_SAMPLE)); + } + + // TSV - no need to check JSON, XML or CSV because they come earlier in the order we check formats + + public void testCanCreateTsvFromSampleGivenTsv() { + + assertTrue(tsvFactory.canCreateFromSample(explanation, TSV_SAMPLE)); + } + + public void testCanCreateTsvFromSampleGivenSemiColonDelimited() { + + assertFalse(tsvFactory.canCreateFromSample(explanation, SEMI_COLON_DELIMITED_SAMPLE)); + } + + public void testCanCreateTsvFromSampleGivenPipeDelimited() { + + assertFalse(tsvFactory.canCreateFromSample(explanation, PIPE_DELIMITED_SAMPLE)); + } + + public void testCanCreateTsvFromSampleGivenText() { + + assertFalse(tsvFactory.canCreateFromSample(explanation, TEXT_SAMPLE)); + } + + // Semi-colon delimited - no need to check JSON, XML, CSV or TSV because they come earlier in the order we check formats + + public void testCanCreateSemiColonDelimitedFromSampleGivenSemiColonDelimited() { + + assertTrue(semiColonDelimitedfactory.canCreateFromSample(explanation, SEMI_COLON_DELIMITED_SAMPLE)); + } + + public void testCanCreateSemiColonDelimitedFromSampleGivenPipeDelimited() { + + assertFalse(semiColonDelimitedfactory.canCreateFromSample(explanation, PIPE_DELIMITED_SAMPLE)); + } + + public void testCanCreateSemiColonDelimitedFromSampleGivenText() { + + assertFalse(semiColonDelimitedfactory.canCreateFromSample(explanation, TEXT_SAMPLE)); + } + + // Pipe delimited - no need to check JSON, XML, CSV, TSV or semi-colon delimited + // values because they come earlier in the order we check formats + + public void testCanCreatePipeDelimitedFromSampleGivenPipeDelimited() { + + assertTrue(pipeDelimitedFactory.canCreateFromSample(explanation, PIPE_DELIMITED_SAMPLE)); + } + + public void testCanCreatePipeDelimitedFromSampleGivenText() { + + assertFalse(pipeDelimitedFactory.canCreateFromSample(explanation, TEXT_SAMPLE)); + } +} diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/SeparatedValuesLogStructureFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java similarity index 63% rename from x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/SeparatedValuesLogStructureFinderTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java index b62832a0a19..6d1f039399e 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/SeparatedValuesLogStructureFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java @@ -3,36 +3,37 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; import org.supercsv.prefs.CsvPreference; import java.io.IOException; import java.util.Arrays; import java.util.Collections; -import static org.elasticsearch.xpack.ml.logstructurefinder.SeparatedValuesLogStructureFinder.levenshteinFieldwiseCompareRows; -import static org.elasticsearch.xpack.ml.logstructurefinder.SeparatedValuesLogStructureFinder.levenshteinDistance; +import static org.elasticsearch.xpack.ml.filestructurefinder.DelimitedFileStructureFinder.levenshteinFieldwiseCompareRows; +import static org.elasticsearch.xpack.ml.filestructurefinder.DelimitedFileStructureFinder.levenshteinDistance; import static org.hamcrest.Matchers.arrayContaining; -public class SeparatedValuesLogStructureFinderTests extends LogStructureTestCase { +public class DelimitedFileStructureFinderTests extends FileStructureTestCase { - private LogStructureFinderFactory factory = new CsvLogStructureFinderFactory(); + private FileStructureFinderFactory csvFactory = new DelimitedFileStructureFinderFactory(',', 2, false); public void testCreateConfigsGivenCompleteCsv() throws Exception { String sample = "time,message\n" + "2018-05-17T13:41:23,hello\n" + "2018-05-17T13:41:32,hello again\n"; - assertTrue(factory.canCreateFromSample(explanation, sample)); + assertTrue(csvFactory.canCreateFromSample(explanation, sample)); String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - LogStructureFinder structureFinder = factory.createFromSample(explanation, sample, charset, hasByteOrderMarker); + FileStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker); - LogStructure structure = structureFinder.getStructure(); + FileStructure structure = structureFinder.getStructure(); - assertEquals(LogStructure.Format.CSV, structure.getFormat()); + assertEquals(FileStructure.Format.DELIMITED, structure.getFormat()); assertEquals(charset, structure.getCharset()); if (hasByteOrderMarker == null) { assertNull(structure.getHasByteOrderMarker()); @@ -41,7 +42,7 @@ public class SeparatedValuesLogStructureFinderTests extends LogStructureTestCase } assertEquals("^\"?time\"?,\"?message\"?", structure.getExcludeLinesPattern()); assertEquals("^\"?\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); - assertEquals(Character.valueOf(','), structure.getSeparator()); + assertEquals(Character.valueOf(','), structure.getDelimiter()); assertTrue(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); assertEquals(Arrays.asList("time", "message"), structure.getInputFields()); @@ -55,15 +56,15 @@ public class SeparatedValuesLogStructureFinderTests extends LogStructureTestCase "\"hello\n" + "world\",2018-05-17T13:41:23,1\n" + "\"hello again\n"; // note that this last record is truncated - assertTrue(factory.canCreateFromSample(explanation, sample)); + assertTrue(csvFactory.canCreateFromSample(explanation, sample)); String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - LogStructureFinder structureFinder = factory.createFromSample(explanation, sample, charset, hasByteOrderMarker); + FileStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker); - LogStructure structure = structureFinder.getStructure(); + FileStructure structure = structureFinder.getStructure(); - assertEquals(LogStructure.Format.CSV, structure.getFormat()); + assertEquals(FileStructure.Format.DELIMITED, structure.getFormat()); assertEquals(charset, structure.getCharset()); if (hasByteOrderMarker == null) { assertNull(structure.getHasByteOrderMarker()); @@ -72,7 +73,7 @@ public class SeparatedValuesLogStructureFinderTests extends LogStructureTestCase } assertEquals("^\"?message\"?,\"?time\"?,\"?count\"?", structure.getExcludeLinesPattern()); assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); - assertEquals(Character.valueOf(','), structure.getSeparator()); + assertEquals(Character.valueOf(','), structure.getDelimiter()); assertTrue(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); assertEquals(Arrays.asList("message", "time", "count"), structure.getInputFields()); @@ -88,15 +89,15 @@ public class SeparatedValuesLogStructureFinderTests extends LogStructureTestCase "2,2016-12-31 15:15:01,2016-12-31 15:15:09,1,.00,1,N,264,264,2,1,0,0.5,0,0,0.3,1.8,,\n" + "1,2016-12-01 00:00:01,2016-12-01 00:10:22,1,1.60,1,N,163,143,2,9,0.5,0.5,0,0,0.3,10.3,,\n" + "1,2016-12-01 00:00:01,2016-12-01 00:11:01,1,1.40,1,N,164,229,1,9,0.5,0.5,2.05,0,0.3,12.35,,\n"; - assertTrue(factory.canCreateFromSample(explanation, sample)); + assertTrue(csvFactory.canCreateFromSample(explanation, sample)); String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - LogStructureFinder structureFinder = factory.createFromSample(explanation, sample, charset, hasByteOrderMarker); + FileStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker); - LogStructure structure = structureFinder.getStructure(); + FileStructure structure = structureFinder.getStructure(); - assertEquals(LogStructure.Format.CSV, structure.getFormat()); + assertEquals(FileStructure.Format.DELIMITED, structure.getFormat()); assertEquals(charset, structure.getCharset()); if (hasByteOrderMarker == null) { assertNull(structure.getHasByteOrderMarker()); @@ -108,7 +109,7 @@ public class SeparatedValuesLogStructureFinderTests extends LogStructureTestCase "\"?extra\"?,\"?mta_tax\"?,\"?tip_amount\"?,\"?tolls_amount\"?,\"?improvement_surcharge\"?,\"?total_amount\"?,\"?\"?,\"?\"?", structure.getExcludeLinesPattern()); assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); - assertEquals(Character.valueOf(','), structure.getSeparator()); + assertEquals(Character.valueOf(','), structure.getDelimiter()); assertTrue(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); assertEquals(Arrays.asList("VendorID", "tpep_pickup_datetime", "tpep_dropoff_datetime", "passenger_count", "trip_distance", @@ -126,15 +127,15 @@ public class SeparatedValuesLogStructureFinderTests extends LogStructureTestCase "2,2016-12-31 15:15:01,2016-12-31 15:15:09,1,.00,1,N,264,264,2,1,0,0.5,0,0,0.3,1.8,,\n" + "1,2016-12-01 00:00:01,2016-12-01 00:10:22,1,1.60,1,N,163,143,2,9,0.5,0.5,0,0,0.3,10.3,,\n" + "1,2016-12-01 00:00:01,2016-12-01 00:11:01,1,1.40,1,N,164,229,1,9,0.5,0.5,2.05,0,0.3,12.35,,\n"; - assertTrue(factory.canCreateFromSample(explanation, sample)); + assertTrue(csvFactory.canCreateFromSample(explanation, sample)); String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - LogStructureFinder structureFinder = factory.createFromSample(explanation, sample, charset, hasByteOrderMarker); + FileStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker); - LogStructure structure = structureFinder.getStructure(); + FileStructure structure = structureFinder.getStructure(); - assertEquals(LogStructure.Format.CSV, structure.getFormat()); + assertEquals(FileStructure.Format.DELIMITED, structure.getFormat()); assertEquals(charset, structure.getCharset()); if (hasByteOrderMarker == null) { assertNull(structure.getHasByteOrderMarker()); @@ -146,7 +147,7 @@ public class SeparatedValuesLogStructureFinderTests extends LogStructureTestCase "\"?extra\"?,\"?mta_tax\"?,\"?tip_amount\"?,\"?tolls_amount\"?,\"?improvement_surcharge\"?,\"?total_amount\"?", structure.getExcludeLinesPattern()); assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); - assertEquals(Character.valueOf(','), structure.getSeparator()); + assertEquals(Character.valueOf(','), structure.getDelimiter()); assertTrue(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); assertEquals(Arrays.asList("VendorID", "tpep_pickup_datetime", "tpep_dropoff_datetime", "passenger_count", "trip_distance", @@ -161,15 +162,15 @@ public class SeparatedValuesLogStructureFinderTests extends LogStructureTestCase String sample = "\"pos_id\",\"trip_id\",\"latitude\",\"longitude\",\"altitude\",\"timestamp\"\n" + "\"1\",\"3\",\"4703.7815\",\"1527.4713\",\"359.9\",\"2017-01-19 16:19:04.742113\"\n" + "\"2\",\"3\",\"4703.7815\",\"1527.4714\",\"359.9\",\"2017-01-19 16:19:05.741890\"\n"; - assertTrue(factory.canCreateFromSample(explanation, sample)); + assertTrue(csvFactory.canCreateFromSample(explanation, sample)); String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - LogStructureFinder structureFinder = factory.createFromSample(explanation, sample, charset, hasByteOrderMarker); + FileStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker); - LogStructure structure = structureFinder.getStructure(); + FileStructure structure = structureFinder.getStructure(); - assertEquals(LogStructure.Format.CSV, structure.getFormat()); + assertEquals(FileStructure.Format.DELIMITED, structure.getFormat()); assertEquals(charset, structure.getCharset()); if (hasByteOrderMarker == null) { assertNull(structure.getHasByteOrderMarker()); @@ -179,7 +180,7 @@ public class SeparatedValuesLogStructureFinderTests extends LogStructureTestCase assertEquals("^\"?pos_id\"?,\"?trip_id\"?,\"?latitude\"?,\"?longitude\"?,\"?altitude\"?,\"?timestamp\"?", structure.getExcludeLinesPattern()); assertNull(structure.getMultilineStartPattern()); - assertEquals(Character.valueOf(','), structure.getSeparator()); + assertEquals(Character.valueOf(','), structure.getDelimiter()); assertTrue(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); assertEquals(Arrays.asList("pos_id", "trip_id", "latitude", "longitude", "altitude", "timestamp"), structure.getInputFields()); @@ -195,8 +196,8 @@ public class SeparatedValuesLogStructureFinderTests extends LogStructureTestCase "2014-06-23 00:00:01Z,JBU,877.5927,farequote\n" + "2014-06-23 00:00:01Z,KLM,1355.4812,farequote\n"; - Tuple header = SeparatedValuesLogStructureFinder.findHeaderFromSample(explanation, - SeparatedValuesLogStructureFinder.readRows(withHeader, CsvPreference.EXCEL_PREFERENCE).v1()); + Tuple header = DelimitedFileStructureFinder.findHeaderFromSample(explanation, + DelimitedFileStructureFinder.readRows(withHeader, CsvPreference.EXCEL_PREFERENCE).v1()); assertTrue(header.v1()); assertThat(header.v2(), arrayContaining("time", "airline", "responsetime", "sourcetype")); @@ -208,8 +209,8 @@ public class SeparatedValuesLogStructureFinderTests extends LogStructureTestCase "2014-06-23 00:00:01Z,JBU,877.5927,farequote\n" + "2014-06-23 00:00:01Z,KLM,1355.4812,farequote\n"; - Tuple header = SeparatedValuesLogStructureFinder.findHeaderFromSample(explanation, - SeparatedValuesLogStructureFinder.readRows(withoutHeader, CsvPreference.EXCEL_PREFERENCE).v1()); + Tuple header = DelimitedFileStructureFinder.findHeaderFromSample(explanation, + DelimitedFileStructureFinder.readRows(withoutHeader, CsvPreference.EXCEL_PREFERENCE).v1()); assertFalse(header.v1()); assertThat(header.v2(), arrayContaining("column1", "column2", "column3", "column4")); @@ -251,43 +252,43 @@ public class SeparatedValuesLogStructureFinderTests extends LogStructureTestCase public void testLineHasUnescapedQuote() { - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a,b,c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a\",b,c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a,b\",c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a,b,c\"", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a,\"b\",c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a,b,\"c\"", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a,\"b\"\"\",c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a,b,\"c\"\"\"", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"\"\"a\",b,c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a\"\"\",b,c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a,\"\"b\",c", CsvPreference.EXCEL_PREFERENCE)); - assertTrue(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("between\"words,b,c", CsvPreference.EXCEL_PREFERENCE)); - assertTrue(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("x and \"y\",b,c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("a,b,c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("\"a\",b,c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("\"a,b\",c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("\"a,b,c\"", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("a,\"b\",c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("a,b,\"c\"", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("a,\"b\"\"\",c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("a,b,\"c\"\"\"", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("\"\"\"a\",b,c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("\"a\"\"\",b,c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("\"a,\"\"b\",c", CsvPreference.EXCEL_PREFERENCE)); + assertTrue(DelimitedFileStructureFinder.lineHasUnescapedQuote("between\"words,b,c", CsvPreference.EXCEL_PREFERENCE)); + assertTrue(DelimitedFileStructureFinder.lineHasUnescapedQuote("x and \"y\",b,c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a\tb\tc", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a\"\tb\tc", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a\tb\"\tc", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a\tb\tc\"", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a\t\"b\"\tc", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a\tb\t\"c\"", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a\t\"b\"\"\"\tc", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a\tb\t\"c\"\"\"", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"\"\"a\"\tb\tc", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a\"\"\"\tb\tc", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a\t\"\"b\"\tc", CsvPreference.TAB_PREFERENCE)); - assertTrue(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("between\"words\tb\tc", CsvPreference.TAB_PREFERENCE)); - assertTrue(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("x and \"y\"\tb\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("a\tb\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("\"a\"\tb\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("\"a\tb\"\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("\"a\tb\tc\"", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("a\t\"b\"\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("a\tb\t\"c\"", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("a\t\"b\"\"\"\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("a\tb\t\"c\"\"\"", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("\"\"\"a\"\tb\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("\"a\"\"\"\tb\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedFileStructureFinder.lineHasUnescapedQuote("\"a\t\"\"b\"\tc", CsvPreference.TAB_PREFERENCE)); + assertTrue(DelimitedFileStructureFinder.lineHasUnescapedQuote("between\"words\tb\tc", CsvPreference.TAB_PREFERENCE)); + assertTrue(DelimitedFileStructureFinder.lineHasUnescapedQuote("x and \"y\"\tb\tc", CsvPreference.TAB_PREFERENCE)); } public void testRowContainsDuplicateNonEmptyValues() { - assertFalse(SeparatedValuesLogStructureFinder.rowContainsDuplicateNonEmptyValues(Collections.singletonList("a"))); - assertFalse(SeparatedValuesLogStructureFinder.rowContainsDuplicateNonEmptyValues(Collections.singletonList(""))); - assertFalse(SeparatedValuesLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "b", "c"))); - assertTrue(SeparatedValuesLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "b", "a"))); - assertTrue(SeparatedValuesLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "b", "b"))); - assertFalse(SeparatedValuesLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "", ""))); - assertFalse(SeparatedValuesLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("", "a", ""))); + assertFalse(DelimitedFileStructureFinder.rowContainsDuplicateNonEmptyValues(Collections.singletonList("a"))); + assertFalse(DelimitedFileStructureFinder.rowContainsDuplicateNonEmptyValues(Collections.singletonList(""))); + assertFalse(DelimitedFileStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "b", "c"))); + assertTrue(DelimitedFileStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "b", "a"))); + assertTrue(DelimitedFileStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "b", "b"))); + assertFalse(DelimitedFileStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "", ""))); + assertFalse(DelimitedFileStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("", "a", ""))); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FieldStatsCalculatorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FieldStatsCalculatorTests.java new file mode 100644 index 00000000000..08035dc741d --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FieldStatsCalculatorTests.java @@ -0,0 +1,220 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.filestructurefinder; + +import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; + +import java.util.Arrays; +import java.util.Collections; +import java.util.DoubleSummaryStatistics; +import java.util.List; +import java.util.Map; + +public class FieldStatsCalculatorTests extends FileStructureTestCase { + + public void testMean() { + + FieldStatsCalculator calculator = new FieldStatsCalculator(); + + calculator.accept(Arrays.asList("1", "3.5", "2.5", "9")); + + assertEquals(4.0, calculator.calculateMean(), 1e-10); + } + + public void testMedianGivenOddCount() { + + FieldStatsCalculator calculator = new FieldStatsCalculator(); + + calculator.accept(Arrays.asList("3", "23", "-1", "5", "1000")); + + assertEquals(5.0, calculator.calculateMedian(), 1e-10); + } + + public void testMedianGivenOddCountMinimal() { + + FieldStatsCalculator calculator = new FieldStatsCalculator(); + + calculator.accept(Collections.singletonList("3")); + + assertEquals(3.0, calculator.calculateMedian(), 1e-10); + } + + public void testMedianGivenEvenCountMiddleValuesDifferent() { + + FieldStatsCalculator calculator = new FieldStatsCalculator(); + + calculator.accept(Arrays.asList("3", "23", "-1", "5", "1000", "6")); + + assertEquals(5.5, calculator.calculateMedian(), 1e-10); + } + + public void testMedianGivenEvenCountMiddleValuesSame() { + + FieldStatsCalculator calculator = new FieldStatsCalculator(); + + calculator.accept(Arrays.asList("3", "23", "-1", "5", "1000", "5")); + + assertEquals(5.0, calculator.calculateMedian(), 1e-10); + } + + public void testMedianGivenEvenCountMinimal() { + + FieldStatsCalculator calculator = new FieldStatsCalculator(); + + calculator.accept(Arrays.asList("4", "4")); + + assertEquals(4.0, calculator.calculateMedian(), 1e-10); + } + + public void testTopHitsNumeric() { + + FieldStatsCalculator calculator = new FieldStatsCalculator(); + + calculator.accept(Arrays.asList("4", "4", "7", "4", "6", "5", "6", "5", "16", "4", "5")); + + List> topHits = calculator.findNumericTopHits(3); + + assertEquals(3, topHits.size()); + assertEquals(4.0, topHits.get(0).get("value")); + assertEquals(4, topHits.get(0).get("count")); + assertEquals(5.0, topHits.get(1).get("value")); + assertEquals(3, topHits.get(1).get("count")); + assertEquals(6.0, topHits.get(2).get("value")); + assertEquals(2, topHits.get(2).get("count")); + } + + public void testTopHitsString() { + + FieldStatsCalculator calculator = new FieldStatsCalculator(); + + calculator.accept(Arrays.asList("s", "s", "d", "s", "f", "x", "f", "x", "n", "s", "x")); + + List> topHits = calculator.findStringTopHits(3); + + assertEquals(3, topHits.size()); + assertEquals("s", topHits.get(0).get("value")); + assertEquals(4, topHits.get(0).get("count")); + assertEquals("x", topHits.get(1).get("value")); + assertEquals(3, topHits.get(1).get("count")); + assertEquals("f", topHits.get(2).get("value")); + assertEquals(2, topHits.get(2).get("count")); + } + + public void testCalculateGivenEmpty() { + + FieldStatsCalculator calculator = new FieldStatsCalculator(); + + calculator.accept(Collections.emptyList()); + + FieldStats stats = calculator.calculate(3); + + assertEquals(0L, stats.getCount()); + assertEquals(0, stats.getCardinality()); + assertNull(stats.getMinValue()); + assertNull(stats.getMaxValue()); + assertNull(stats.getMeanValue()); + assertNull(stats.getMedianValue()); + assertEquals(0, stats.getTopHits().size()); + } + + public void testCalculateGivenNumericField() { + + FieldStatsCalculator calculator = new FieldStatsCalculator(); + + calculator.accept(Arrays.asList("4", "4", "7", "4", "6", "5", "6", "5", "16", "4", "5")); + + FieldStats stats = calculator.calculate(3); + + assertEquals(11L, stats.getCount()); + assertEquals(5, stats.getCardinality()); + assertEquals(4.0, stats.getMinValue(), 1e-10); + assertEquals(16.0, stats.getMaxValue(), 1e-10); + assertEquals(6.0, stats.getMeanValue(), 1e-10); + assertEquals(5.0, stats.getMedianValue(), 1e-10); + + List> topHits = stats.getTopHits(); + + assertEquals(3, topHits.size()); + assertEquals(4.0, topHits.get(0).get("value")); + assertEquals(4, topHits.get(0).get("count")); + assertEquals(5.0, topHits.get(1).get("value")); + assertEquals(3, topHits.get(1).get("count")); + assertEquals(6.0, topHits.get(2).get("value")); + assertEquals(2, topHits.get(2).get("count")); + } + + public void testCalculateGivenStringField() { + + FieldStatsCalculator calculator = new FieldStatsCalculator(); + + calculator.accept(Arrays.asList("s", "s", "d", "s", "f", "x", "f", "x", "n", "s", "x")); + + FieldStats stats = calculator.calculate(3); + + assertEquals(11L, stats.getCount()); + assertEquals(5, stats.getCardinality()); + assertNull(stats.getMinValue()); + assertNull(stats.getMaxValue()); + assertNull(stats.getMeanValue()); + assertNull(stats.getMedianValue()); + + List> topHits = stats.getTopHits(); + + assertEquals(3, topHits.size()); + assertEquals("s", topHits.get(0).get("value")); + assertEquals(4, topHits.get(0).get("count")); + assertEquals("x", topHits.get(1).get("value")); + assertEquals(3, topHits.get(1).get("count")); + assertEquals("f", topHits.get(2).get("value")); + assertEquals(2, topHits.get(2).get("count")); + } + + public void testCalculateGivenMixedField() { + + FieldStatsCalculator calculator = new FieldStatsCalculator(); + + calculator.accept(Arrays.asList("4", "4", "d", "4", "f", "x", "f", "x", "16", "4", "x")); + + FieldStats stats = calculator.calculate(3); + + assertEquals(11L, stats.getCount()); + assertEquals(5, stats.getCardinality()); + assertNull(stats.getMinValue()); + assertNull(stats.getMaxValue()); + assertNull(stats.getMeanValue()); + assertNull(stats.getMedianValue()); + + List> topHits = stats.getTopHits(); + + assertEquals(3, topHits.size()); + assertEquals("4", topHits.get(0).get("value")); + assertEquals(4, topHits.get(0).get("count")); + assertEquals("x", topHits.get(1).get("value")); + assertEquals(3, topHits.get(1).get("count")); + assertEquals("f", topHits.get(2).get("value")); + assertEquals(2, topHits.get(2).get("count")); + } + + public void testJavaStatsEquivalence() { + + DoubleSummaryStatistics summaryStatistics = new DoubleSummaryStatistics(); + FieldStatsCalculator calculator = new FieldStatsCalculator(); + + for (int numValues = randomIntBetween(1000, 10000); numValues > 0; --numValues) { + + double value = randomDouble(); + summaryStatistics.accept(value); + calculator.accept(Collections.singletonList(Double.toString(value))); + } + + FieldStats stats = calculator.calculate(1); + + assertEquals(summaryStatistics.getCount(), stats.getCount()); + assertEquals(summaryStatistics.getMin(), stats.getMinValue(), 1e-10); + assertEquals(summaryStatistics.getMax(), stats.getMaxValue(), 1e-10); + assertEquals(summaryStatistics.getAverage(), stats.getMeanValue(), 1e-10); + } +} diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManagerTests.java similarity index 86% rename from x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManagerTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManagerTests.java index 1f8691de8cf..10e780f1d34 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManagerTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; import com.ibm.icu.text.CharsetMatch; @@ -15,9 +15,9 @@ import java.util.Arrays; import static org.hamcrest.Matchers.startsWith; import static org.hamcrest.core.IsInstanceOf.instanceOf; -public class LogStructureFinderManagerTests extends LogStructureTestCase { +public class FileStructureFinderManagerTests extends FileStructureTestCase { - private LogStructureFinderManager structureFinderManager = new LogStructureFinderManager(); + private FileStructureFinderManager structureFinderManager = new FileStructureFinderManager(); public void testFindCharsetGivenCharacterWidths() throws Exception { @@ -49,24 +49,24 @@ public class LogStructureFinderManagerTests extends LogStructureTestCase { public void testMakeBestStructureGivenJson() throws Exception { assertThat(structureFinderManager.makeBestStructureFinder(explanation, "{ \"time\": \"2018-05-17T13:41:23\", \"message\": \"hello\" }", StandardCharsets.UTF_8.name(), randomBoolean()), - instanceOf(JsonLogStructureFinder.class)); + instanceOf(JsonFileStructureFinder.class)); } public void testMakeBestStructureGivenXml() throws Exception { assertThat(structureFinderManager.makeBestStructureFinder(explanation, "hello", StandardCharsets.UTF_8.name(), randomBoolean()), - instanceOf(XmlLogStructureFinder.class)); + instanceOf(XmlFileStructureFinder.class)); } public void testMakeBestStructureGivenCsv() throws Exception { assertThat(structureFinderManager.makeBestStructureFinder(explanation, "time,message\n" + "2018-05-17T13:41:23,hello\n", StandardCharsets.UTF_8.name(), randomBoolean()), - instanceOf(SeparatedValuesLogStructureFinder.class)); + instanceOf(DelimitedFileStructureFinder.class)); } public void testMakeBestStructureGivenText() throws Exception { assertThat(structureFinderManager.makeBestStructureFinder(explanation, "[2018-05-17T13:41:23] hello\n" + "[2018-05-17T13:41:24] hello again\n", StandardCharsets.UTF_8.name(), randomBoolean()), - instanceOf(TextLogStructureFinder.class)); + instanceOf(TextLogFileStructureFinder.class)); } } diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureTestCase.java similarity index 89% rename from x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTestCase.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureTestCase.java index 5f9a87ef2a7..6246a7ad01e 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureTestCase.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.ESTestCase; @@ -17,10 +17,10 @@ import java.util.List; import java.util.Locale; import java.util.stream.Collectors; -public abstract class LogStructureTestCase extends ESTestCase { +public abstract class FileStructureTestCase extends ESTestCase { protected static final List POSSIBLE_CHARSETS = Collections.unmodifiableList(Charset.availableCharsets().keySet().stream() - .filter(name -> LogStructureFinderManager.FILEBEAT_SUPPORTED_ENCODINGS.contains(name.toLowerCase(Locale.ROOT))) + .filter(name -> FileStructureFinderManager.FILEBEAT_SUPPORTED_ENCODINGS.contains(name.toLowerCase(Locale.ROOT))) .collect(Collectors.toList())); protected static final String CSV_SAMPLE = "time,id,value\n" + @@ -34,14 +34,14 @@ public abstract class LogStructureTestCase extends ESTestCase { "\"level\":\"INFO\",\"pid\":42,\"thread\":\"0x7fff7d2a8000\",\"message\":\"message 2\",\"class\":\"ml\"," + "\"method\":\"core::SomeNoiseMaker\",\"file\":\"Noisemaker.cc\",\"line\":333}\n"; - protected static final String PIPE_SEPARATED_VALUES_SAMPLE = "2018-01-06 16:56:14.295748|INFO |VirtualServer |1 |" + + protected static final String PIPE_DELIMITED_SAMPLE = "2018-01-06 16:56:14.295748|INFO |VirtualServer |1 |" + "listening on 0.0.0.0:9987, :::9987\n" + "2018-01-06 17:19:44.465252|INFO |VirtualServer |1 |client " + "'User1'(id:2) changed default admin channelgroup to 'Guest'(id:8)\n" + "2018-01-06 17:21:25.764368|INFO |VirtualServer |1 |client " + "'User1'(id:2) was added to channelgroup 'Channel Admin'(id:5) by client 'User1'(id:2) in channel 'Default Channel'(id:1)"; - protected static final String SEMI_COLON_SEPARATED_VALUES_SAMPLE = "\"pos_id\";\"trip_id\";\"latitude\";\"longitude\";\"altitude\";" + + protected static final String SEMI_COLON_DELIMITED_SAMPLE = "\"pos_id\";\"trip_id\";\"latitude\";\"longitude\";\"altitude\";" + "\"timestamp\"\n" + "\"1\";\"3\";\"4703.7815\";\"1527.4713\";\"359.9\";\"2017-01-19 16:19:04.742113\"\n" + "\"2\";\"3\";\"4703.7815\";\"1527.4714\";\"359.9\";\"2017-01-19 16:19:05.741890\"\n" + diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureUtilsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java similarity index 58% rename from x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureUtilsTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java index 7e92728f01a..ac8f95670ab 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureUtilsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java @@ -3,36 +3,39 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.xpack.ml.logstructurefinder.TimestampFormatFinder.TimestampMatch; +import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; +import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; +import java.util.List; import java.util.Map; +import java.util.SortedMap; import static org.hamcrest.Matchers.contains; -public class LogStructureUtilsTests extends LogStructureTestCase { +public class FileStructureUtilsTests extends FileStructureTestCase { public void testMoreLikelyGivenText() { - assertTrue(LogStructureUtils.isMoreLikelyTextThanKeyword("the quick brown fox jumped over the lazy dog")); - assertTrue(LogStructureUtils.isMoreLikelyTextThanKeyword(randomAlphaOfLengthBetween(257, 10000))); + assertTrue(FileStructureUtils.isMoreLikelyTextThanKeyword("the quick brown fox jumped over the lazy dog")); + assertTrue(FileStructureUtils.isMoreLikelyTextThanKeyword(randomAlphaOfLengthBetween(257, 10000))); } public void testMoreLikelyGivenKeyword() { - assertFalse(LogStructureUtils.isMoreLikelyTextThanKeyword("1")); - assertFalse(LogStructureUtils.isMoreLikelyTextThanKeyword("DEBUG")); - assertFalse(LogStructureUtils.isMoreLikelyTextThanKeyword(randomAlphaOfLengthBetween(1, 256))); + assertFalse(FileStructureUtils.isMoreLikelyTextThanKeyword("1")); + assertFalse(FileStructureUtils.isMoreLikelyTextThanKeyword("DEBUG")); + assertFalse(FileStructureUtils.isMoreLikelyTextThanKeyword(randomAlphaOfLengthBetween(1, 256))); } public void testSingleSampleSingleField() { Map sample = Collections.singletonMap("field1", "2018-05-24T17:28:31,735"); Tuple match = - LogStructureUtils.guessTimestampField(explanation, Collections.singletonList(sample)); + FileStructureUtils.guessTimestampField(explanation, Collections.singletonList(sample)); assertNotNull(match); assertEquals("field1", match.v1()); assertThat(match.v2().dateFormats, contains("ISO8601")); @@ -43,7 +46,7 @@ public class LogStructureUtilsTests extends LogStructureTestCase { Map sample1 = Collections.singletonMap("field1", "2018-05-24T17:28:31,735"); Map sample2 = Collections.singletonMap("field1", "2018-05-24T17:33:39,406"); Tuple match = - LogStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2)); + FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2)); assertNotNull(match); assertEquals("field1", match.v1()); assertThat(match.v2().dateFormats, contains("ISO8601")); @@ -54,7 +57,7 @@ public class LogStructureUtilsTests extends LogStructureTestCase { Map sample1 = Collections.singletonMap("field1", "2018-05-24T17:28:31,735"); Map sample2 = Collections.singletonMap("field1", "2018-05-24 17:33:39,406"); Tuple match = - LogStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2)); + FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2)); assertNull(match); } @@ -62,7 +65,7 @@ public class LogStructureUtilsTests extends LogStructureTestCase { Map sample1 = Collections.singletonMap("field1", "2018-05-24T17:28:31,735"); Map sample2 = Collections.singletonMap("another_field", "2018-05-24T17:33:39,406"); Tuple match = - LogStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2)); + FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2)); assertNull(match); } @@ -72,7 +75,7 @@ public class LogStructureUtilsTests extends LogStructureTestCase { sample.put("time", "2018-05-24 17:28:31,735"); sample.put("bar", 42); Tuple match = - LogStructureUtils.guessTimestampField(explanation, Collections.singletonList(sample)); + FileStructureUtils.guessTimestampField(explanation, Collections.singletonList(sample)); assertNotNull(match); assertEquals("time", match.v1()); assertThat(match.v2().dateFormats, contains("YYYY-MM-dd HH:mm:ss,SSS")); @@ -89,7 +92,7 @@ public class LogStructureUtilsTests extends LogStructureTestCase { sample2.put("time", "2018-05-29 11:53:02,837"); sample2.put("bar", 17); Tuple match = - LogStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2)); + FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2)); assertNotNull(match); assertEquals("time", match.v1()); assertThat(match.v2().dateFormats, contains("YYYY-MM-dd HH:mm:ss,SSS")); @@ -106,7 +109,7 @@ public class LogStructureUtilsTests extends LogStructureTestCase { sample2.put("time", "May 29 2018 11:53:02"); sample2.put("bar", 17); Tuple match = - LogStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2)); + FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2)); assertNull(match); } @@ -120,7 +123,7 @@ public class LogStructureUtilsTests extends LogStructureTestCase { sample2.put("time", "2018-05-29 11:53:02,837"); sample2.put("bar", 17); Tuple match = - LogStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2)); + FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2)); assertNotNull(match); assertEquals("time", match.v1()); assertThat(match.v2().dateFormats, contains("YYYY-MM-dd HH:mm:ss,SSS")); @@ -137,7 +140,7 @@ public class LogStructureUtilsTests extends LogStructureTestCase { sample2.put("time", "May 29 2018 11:53:02"); sample2.put("red_herring", "17"); Tuple match = - LogStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2)); + FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2)); assertNotNull(match); assertEquals("time", match.v1()); assertThat(match.v2().dateFormats, contains("MMM dd YYYY HH:mm:ss", "MMM d YYYY HH:mm:ss")); @@ -154,7 +157,7 @@ public class LogStructureUtilsTests extends LogStructureTestCase { sample2.put("time2", "May 29 2018 11:53:02"); sample2.put("bar", 42); Tuple match = - LogStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2)); + FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2)); assertNull(match); } @@ -170,7 +173,7 @@ public class LogStructureUtilsTests extends LogStructureTestCase { sample2.put("time3", "Thu, May 10 2018 11:53:02"); sample2.put("bar", 42); Tuple match = - LogStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2)); + FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2)); assertNotNull(match); assertEquals("time2", match.v1()); assertThat(match.v2().dateFormats, contains("MMM dd YYYY HH:mm:ss", "MMM d YYYY HH:mm:ss")); @@ -178,96 +181,83 @@ public class LogStructureUtilsTests extends LogStructureTestCase { } public void testGuessMappingGivenNothing() { - assertNull(LogStructureUtils.guessMapping(explanation, "foo", Collections.emptyList())); + assertNull(guessMapping(explanation, "foo", Collections.emptyList())); } public void testGuessMappingGivenKeyword() { - Map expected = Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "keyword"); + Map expected = Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"); - assertEquals(expected, - LogStructureUtils.guessMapping(explanation, "foo", Arrays.asList("ERROR", "INFO", "DEBUG"))); - assertEquals(expected, - LogStructureUtils.guessMapping(explanation, "foo", Arrays.asList("2018-06-11T13:26:47Z", "not a date"))); + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("ERROR", "INFO", "DEBUG"))); + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("2018-06-11T13:26:47Z", "not a date"))); } public void testGuessMappingGivenText() { - Map expected = Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "text"); + Map expected = Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "text"); - assertEquals(expected, LogStructureUtils.guessMapping(explanation, "foo", - Arrays.asList("a", "the quick brown fox jumped over the lazy dog"))); + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("a", "the quick brown fox jumped over the lazy dog"))); } public void testGuessMappingGivenIp() { - Map expected = Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "ip"); + Map expected = Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "ip"); - assertEquals(expected, - LogStructureUtils.guessMapping(explanation, "foo", Arrays.asList("10.0.0.1", "172.16.0.1", "192.168.0.1"))); + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("10.0.0.1", "172.16.0.1", "192.168.0.1"))); } public void testGuessMappingGivenDouble() { - Map expected = Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "double"); + Map expected = Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "double"); - assertEquals(expected, - LogStructureUtils.guessMapping(explanation, "foo", Arrays.asList("3.14159265359", "0", "-8"))); + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("3.14159265359", "0", "-8"))); // 12345678901234567890 is too long for long - assertEquals(expected, - LogStructureUtils.guessMapping(explanation, "foo", Arrays.asList("1", "2", "12345678901234567890"))); - assertEquals(expected, - LogStructureUtils.guessMapping(explanation, "foo", Arrays.asList(3.14159265359, 0.0, 1e-308))); - assertEquals(expected, - LogStructureUtils.guessMapping(explanation, "foo", Arrays.asList("-1e-1", "-1e308", "1e-308"))); + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("1", "2", "12345678901234567890"))); + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList(3.14159265359, 0.0, 1e-308))); + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("-1e-1", "-1e308", "1e-308"))); } public void testGuessMappingGivenLong() { - Map expected = Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "long"); + Map expected = Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"); - assertEquals(expected, - LogStructureUtils.guessMapping(explanation, "foo", Arrays.asList("500", "3", "-3"))); - assertEquals(expected, - LogStructureUtils.guessMapping(explanation, "foo", Arrays.asList(500, 6, 0))); + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("500", "3", "-3"))); + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList(500, 6, 0))); } public void testGuessMappingGivenDate() { - Map expected = Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "date"); + Map expected = Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); - assertEquals(expected, LogStructureUtils.guessMapping(explanation, "foo", - Arrays.asList("2018-06-11T13:26:47Z", "2018-06-11T13:27:12Z"))); + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("2018-06-11T13:26:47Z", "2018-06-11T13:27:12Z"))); } public void testGuessMappingGivenBoolean() { - Map expected = Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "boolean"); + Map expected = Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "boolean"); - assertEquals(expected, LogStructureUtils.guessMapping(explanation, "foo", Arrays.asList("false", "true"))); - assertEquals(expected, LogStructureUtils.guessMapping(explanation, "foo", Arrays.asList(true, false))); + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("false", "true"))); + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList(true, false))); } public void testGuessMappingGivenArray() { - Map expected = Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "long"); + Map expected = Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"); - assertEquals(expected, - LogStructureUtils.guessMapping(explanation, "foo", Arrays.asList(42, Arrays.asList(1, -99)))); + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList(42, Arrays.asList(1, -99)))); - expected = Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "keyword"); + expected = Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"); - assertEquals(expected, - LogStructureUtils.guessMapping(explanation, "foo", Arrays.asList(new String[]{ "x", "y" }, "z"))); + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList(new String[]{ "x", "y" }, "z"))); } public void testGuessMappingGivenObject() { - Map expected = Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "object"); + Map expected = Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "object"); - assertEquals(expected, LogStructureUtils.guessMapping(explanation, "foo", + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList(Collections.singletonMap("name", "value1"), Collections.singletonMap("name", "value2")))); } public void testGuessMappingGivenObjectAndNonObject() { - RuntimeException e = expectThrows(RuntimeException.class, () -> LogStructureUtils.guessMapping(explanation, + RuntimeException e = expectThrows(RuntimeException.class, () -> guessMapping(explanation, "foo", Arrays.asList(Collections.singletonMap("name", "value1"), "value2"))); assertEquals("Field [foo] has both object and non-object values - this is not supported by Elasticsearch", e.getMessage()); } - public void testGuessMappings() { + public void testGuessMappingsAndCalculateFieldStats() { Map sample1 = new LinkedHashMap<>(); sample1.put("foo", "not a time"); sample1.put("time", "2018-05-24 17:28:31,735"); @@ -279,14 +269,42 @@ public class LogStructureUtilsTests extends LogStructureTestCase { sample2.put("bar", 17); sample2.put("nothing", null); - Map mappings = LogStructureUtils.guessMappings(explanation, Arrays.asList(sample1, sample2)); + Tuple, SortedMap> mappingsAndFieldStats = + FileStructureUtils.guessMappingsAndCalculateFieldStats(explanation, Arrays.asList(sample1, sample2)); + assertNotNull(mappingsAndFieldStats); + + Map mappings = mappingsAndFieldStats.v1(); assertNotNull(mappings); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("foo")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("foo")); Map expectedTimeMapping = new HashMap<>(); - expectedTimeMapping.put(LogStructureUtils.MAPPING_TYPE_SETTING, "date"); - expectedTimeMapping.put(LogStructureUtils.MAPPING_FORMAT_SETTING, "YYYY-MM-dd HH:mm:ss,SSS"); + expectedTimeMapping.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); + expectedTimeMapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, "YYYY-MM-dd HH:mm:ss,SSS"); assertEquals(expectedTimeMapping, mappings.get("time")); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("bar")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("bar")); assertNull(mappings.get("nothing")); + + Map fieldStats = mappingsAndFieldStats.v2(); + assertNotNull(fieldStats); + assertEquals(3, fieldStats.size()); + assertEquals(new FieldStats(2, 2, makeTopHits("not a time", 1, "whatever", 1)), fieldStats.get("foo")); + assertEquals(new FieldStats(2, 2, makeTopHits("2018-05-24 17:28:31,735", 1, "2018-05-29 11:53:02,837", 1)), fieldStats.get("time")); + assertEquals(new FieldStats(2, 2, 17.0, 42.0, 29.5, 29.5, makeTopHits(17.0, 1, 42.0, 1)), fieldStats.get("bar")); + assertNull(fieldStats.get("nothing")); + } + + private Map guessMapping(List explanation, String fieldName, List fieldValues) { + Tuple, FieldStats> mappingAndFieldStats = + FileStructureUtils.guessMappingAndCalculateFieldStats(explanation, fieldName, fieldValues); + return (mappingAndFieldStats == null) ? null : mappingAndFieldStats.v1(); + } + + private List> makeTopHits(Object value1, int count1, Object value2, int count2) { + Map topHit1 = new LinkedHashMap<>(); + topHit1.put("value", value1); + topHit1.put("count", count1); + Map topHit2 = new LinkedHashMap<>(); + topHit2.put("value", value2); + topHit2.put("count", count2); + return Arrays.asList(topHit1, topHit2); } } diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/GrokPatternCreatorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreatorTests.java similarity index 81% rename from x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/GrokPatternCreatorTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreatorTests.java index 87f9f662698..858709e2764 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/GrokPatternCreatorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreatorTests.java @@ -3,10 +3,10 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.xpack.ml.logstructurefinder.GrokPatternCreator.ValueOnlyGrokPatternCandidate; +import org.elasticsearch.xpack.ml.filestructurefinder.GrokPatternCreator.ValueOnlyGrokPatternCandidate; import java.util.ArrayList; import java.util.Arrays; @@ -17,7 +17,7 @@ import java.util.Map; import static org.hamcrest.Matchers.containsInAnyOrder; -public class GrokPatternCreatorTests extends LogStructureTestCase { +public class GrokPatternCreatorTests extends FileStructureTestCase { public void testBuildFieldName() { Map fieldNameCountStore = new HashMap<>(); @@ -43,7 +43,7 @@ public class GrokPatternCreatorTests extends LogStructureTestCase { Collection prefaces = new ArrayList<>(); Collection epilogues = new ArrayList<>(); - candidate.processCaptures(fieldNameCountStore, matchingStrings, prefaces, epilogues, null); + candidate.processCaptures(fieldNameCountStore, matchingStrings, prefaces, epilogues, null, null); assertThat(prefaces, containsInAnyOrder("[", "[", "junk [", "[")); assertThat(epilogues, containsInAnyOrder("] DEBUG ", "] ERROR ", "] INFO ", "] DEBUG ")); @@ -60,7 +60,7 @@ public class GrokPatternCreatorTests extends LogStructureTestCase { Collection prefaces = new ArrayList<>(); Collection epilogues = new ArrayList<>(); - candidate.processCaptures(fieldNameCountStore, matchingStrings, prefaces, epilogues, null); + candidate.processCaptures(fieldNameCountStore, matchingStrings, prefaces, epilogues, null, null); assertThat(prefaces, containsInAnyOrder("before ", "abc ", "")); assertThat(epilogues, containsInAnyOrder(" after", " xyz", "")); @@ -73,7 +73,7 @@ public class GrokPatternCreatorTests extends LogStructureTestCase { "junk [2018-01-22T07:33:23] INFO ", "[2018-01-21T03:33:23] DEBUG "); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null); grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); assertEquals(".*?\\[%{TIMESTAMP_ISO8601:extra_timestamp}\\] %{LOGLEVEL:loglevel} ", @@ -87,7 +87,7 @@ public class GrokPatternCreatorTests extends LogStructureTestCase { " (4)", " (-5) "); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null); grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); assertEquals(".*?\\(%{INT:field}\\).*?", grokPatternCreator.getOverallGrokPatternBuilder().toString()); @@ -99,7 +99,7 @@ public class GrokPatternCreatorTests extends LogStructureTestCase { "prior to-3", "-4"); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null); grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); // It seems sensible that we don't detect these suffices as either base 10 or base 16 numbers @@ -113,7 +113,7 @@ public class GrokPatternCreatorTests extends LogStructureTestCase { " -123", "1f is hex"); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null); grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); assertEquals(".*?%{BASE16NUM:field}.*?", grokPatternCreator.getOverallGrokPatternBuilder().toString()); @@ -124,7 +124,7 @@ public class GrokPatternCreatorTests extends LogStructureTestCase { Collection snippets = Arrays.asList(" mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null); assertEquals("%{SYSLOGTIMESTAMP:timestamp} .*? .*?\\[%{INT:field}\\]: %{LOGLEVEL:loglevel} \\(.*? .*? .*?\\) .*? " + "%{QUOTEDSTRING:field2}: %{IP:ipaddress}#%{INT:field3}", grokPatternCreator.createGrokPatternFromExamples("SYSLOGTIMESTAMP", "timestamp")); assertEquals(5, mappings.size()); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("loglevel")); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("field2")); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field3")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("loglevel")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("field2")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field3")); } public void testCreateGrokPatternFromExamplesGivenCatalinaLogs() { @@ -215,12 +215,12 @@ public class GrokPatternCreatorTests extends LogStructureTestCase { "Invalid chunk ignored."); Map mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null); assertEquals("%{CATALINA_DATESTAMP:timestamp} .*? .*?\\n%{LOGLEVEL:loglevel}: .*", grokPatternCreator.createGrokPatternFromExamples("CATALINA_DATESTAMP", "timestamp")); assertEquals(1, mappings.size()); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("loglevel")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("loglevel")); } public void testCreateGrokPatternFromExamplesGivenMultiTimestampLogs() { @@ -237,18 +237,18 @@ public class GrokPatternCreatorTests extends LogStructureTestCase { "Info\tsshd\tsubsystem request for sftp"); Map mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null); assertEquals("%{INT:field}\\t%{TIMESTAMP_ISO8601:timestamp}\\t%{TIMESTAMP_ISO8601:extra_timestamp}\\t%{INT:field2}\\t.*?\\t" + "%{IP:ipaddress}\\t.*?\\t%{LOGLEVEL:loglevel}\\t.*", grokPatternCreator.createGrokPatternFromExamples("TIMESTAMP_ISO8601", "timestamp")); assertEquals(5, mappings.size()); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "date"), + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "date"), mappings.get("extra_timestamp")); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field2")); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("loglevel")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field2")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("loglevel")); } public void testFindFullLineGrokPatternGivenApacheCombinedLogs() { @@ -271,20 +271,20 @@ public class GrokPatternCreatorTests extends LogStructureTestCase { "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36\""); Map mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null); assertEquals(new Tuple<>("timestamp", "%{COMBINEDAPACHELOG}"), grokPatternCreator.findFullLineGrokPattern()); assertEquals(10, mappings.size()); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "text"), mappings.get("agent")); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("auth")); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("bytes")); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("clientip")); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "double"), mappings.get("httpversion")); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("ident")); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("referrer")); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("request")); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("response")); - assertEquals(Collections.singletonMap(LogStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("verb")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "text"), mappings.get("agent")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("auth")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("bytes")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("clientip")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "double"), mappings.get("httpversion")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("ident")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("referrer")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("request")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("response")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("verb")); } public void testAdjustForPunctuationGivenCommonPrefix() { @@ -300,7 +300,7 @@ public class GrokPatternCreatorTests extends LogStructureTestCase { ",\"rule1\",\"Accept\",\"\",\"\",\"\",\"0000000000000000\"" ); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null); Collection adjustedSnippets = grokPatternCreator.adjustForPunctuation(snippets); assertEquals("\",", grokPatternCreator.getOverallGrokPatternBuilder().toString()); @@ -317,7 +317,7 @@ public class GrokPatternCreatorTests extends LogStructureTestCase { "was added by 'User1'(id:2) to servergroup 'GAME'(id:9)" ); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null); Collection adjustedSnippets = grokPatternCreator.adjustForPunctuation(snippets); assertEquals("", grokPatternCreator.getOverallGrokPatternBuilder().toString()); diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinderFactoryTests.java similarity index 71% rename from x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderFactoryTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinderFactoryTests.java index 39ef3b9eedb..092f11676a8 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinderFactoryTests.java @@ -3,11 +3,11 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; -public class JsonLogStructureFinderFactoryTests extends LogStructureTestCase { +public class JsonFileStructureFinderFactoryTests extends FileStructureTestCase { - private LogStructureFinderFactory factory = new JsonLogStructureFinderFactory(); + private FileStructureFinderFactory factory = new JsonFileStructureFinderFactory(); public void testCanCreateFromSampleGivenJson() { @@ -29,14 +29,14 @@ public class JsonLogStructureFinderFactoryTests extends LogStructureTestCase { assertFalse(factory.canCreateFromSample(explanation, TSV_SAMPLE)); } - public void testCanCreateFromSampleGivenSemiColonSeparatedValues() { + public void testCanCreateFromSampleGivenSemiColonDelimited() { - assertFalse(factory.canCreateFromSample(explanation, SEMI_COLON_SEPARATED_VALUES_SAMPLE)); + assertFalse(factory.canCreateFromSample(explanation, SEMI_COLON_DELIMITED_SAMPLE)); } - public void testCanCreateFromSampleGivenPipeSeparatedValues() { + public void testCanCreateFromSampleGivenPipeDelimited() { - assertFalse(factory.canCreateFromSample(explanation, PIPE_SEPARATED_VALUES_SAMPLE)); + assertFalse(factory.canCreateFromSample(explanation, PIPE_DELIMITED_SAMPLE)); } public void testCanCreateFromSampleGivenText() { diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinderTests.java similarity index 67% rename from x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinderTests.java index 2f727747bbf..f41868be862 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinderTests.java @@ -3,24 +3,26 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; + +import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; import java.util.Collections; -public class JsonLogStructureFinderTests extends LogStructureTestCase { +public class JsonFileStructureFinderTests extends FileStructureTestCase { - private LogStructureFinderFactory factory = new JsonLogStructureFinderFactory(); + private FileStructureFinderFactory factory = new JsonFileStructureFinderFactory(); public void testCreateConfigsGivenGoodJson() throws Exception { assertTrue(factory.canCreateFromSample(explanation, JSON_SAMPLE)); String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - LogStructureFinder structureFinder = factory.createFromSample(explanation, JSON_SAMPLE, charset, hasByteOrderMarker); + FileStructureFinder structureFinder = factory.createFromSample(explanation, JSON_SAMPLE, charset, hasByteOrderMarker); - LogStructure structure = structureFinder.getStructure(); + FileStructure structure = structureFinder.getStructure(); - assertEquals(LogStructure.Format.JSON, structure.getFormat()); + assertEquals(FileStructure.Format.JSON, structure.getFormat()); assertEquals(charset, structure.getCharset()); if (hasByteOrderMarker == null) { assertNull(structure.getHasByteOrderMarker()); @@ -29,7 +31,7 @@ public class JsonLogStructureFinderTests extends LogStructureTestCase { } assertNull(structure.getExcludeLinesPattern()); assertNull(structure.getMultilineStartPattern()); - assertNull(structure.getSeparator()); + assertNull(structure.getDelimiter()); assertNull(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); assertNull(structure.getGrokPattern()); diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderFactoryTests.java similarity index 53% rename from x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderFactoryTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderFactoryTests.java index 267ce375d6e..8234357fe36 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderFactoryTests.java @@ -3,14 +3,14 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; -public class TextLogStructureFinderFactoryTests extends LogStructureTestCase { +public class TextLogFileStructureFinderFactoryTests extends FileStructureTestCase { - private LogStructureFinderFactory factory = new TextLogStructureFinderFactory(); + private FileStructureFinderFactory factory = new TextLogFileStructureFinderFactory(); - // No need to check JSON, XML, CSV, TSV, semi-colon separated values or pipe - // separated values because they come earlier in the order we check formats + // No need to check JSON, XML, CSV, TSV, semi-colon delimited values or pipe + // delimited values because they come earlier in the order we check formats public void testCanCreateFromSampleGivenText() { diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java similarity index 92% rename from x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java index 7c6a58bb683..a23080a8272 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java @@ -3,29 +3,30 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.xpack.ml.logstructurefinder.TimestampFormatFinder.TimestampMatch; +import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; +import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import java.util.Collections; import java.util.Set; -public class TextLogStructureFinderTests extends LogStructureTestCase { +public class TextLogFileStructureFinderTests extends FileStructureTestCase { - private LogStructureFinderFactory factory = new TextLogStructureFinderFactory(); + private FileStructureFinderFactory factory = new TextLogFileStructureFinderFactory(); public void testCreateConfigsGivenElasticsearchLog() throws Exception { assertTrue(factory.canCreateFromSample(explanation, TEXT_SAMPLE)); String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - LogStructureFinder structureFinder = factory.createFromSample(explanation, TEXT_SAMPLE, charset, hasByteOrderMarker); + FileStructureFinder structureFinder = factory.createFromSample(explanation, TEXT_SAMPLE, charset, hasByteOrderMarker); - LogStructure structure = structureFinder.getStructure(); + FileStructure structure = structureFinder.getStructure(); - assertEquals(LogStructure.Format.SEMI_STRUCTURED_TEXT, structure.getFormat()); + assertEquals(FileStructure.Format.SEMI_STRUCTURED_TEXT, structure.getFormat()); assertEquals(charset, structure.getCharset()); if (hasByteOrderMarker == null) { assertNull(structure.getHasByteOrderMarker()); @@ -34,7 +35,7 @@ public class TextLogStructureFinderTests extends LogStructureTestCase { } assertNull(structure.getExcludeLinesPattern()); assertEquals("^\\[\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); - assertNull(structure.getSeparator()); + assertNull(structure.getDelimiter()); assertNull(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); assertEquals("\\[%{TIMESTAMP_ISO8601:timestamp}\\]\\[%{LOGLEVEL:loglevel} \\]\\[.*", structure.getGrokPattern()); @@ -46,7 +47,7 @@ public class TextLogStructureFinderTests extends LogStructureTestCase { for (TimestampFormatFinder.CandidateTimestampFormat candidateTimestampFormat : TimestampFormatFinder.ORDERED_CANDIDATE_FORMATS) { String simpleDateRegex = candidateTimestampFormat.simplePattern.pattern(); assertEquals("^" + simpleDateRegex.replaceFirst("^\\\\b", ""), - TextLogStructureFinder.createMultiLineMessageStartRegex(Collections.emptySet(), simpleDateRegex)); + TextLogFileStructureFinder.createMultiLineMessageStartRegex(Collections.emptySet(), simpleDateRegex)); } } @@ -54,7 +55,7 @@ public class TextLogStructureFinderTests extends LogStructureTestCase { for (TimestampFormatFinder.CandidateTimestampFormat candidateTimestampFormat : TimestampFormatFinder.ORDERED_CANDIDATE_FORMATS) { String simpleDateRegex = candidateTimestampFormat.simplePattern.pattern(); assertEquals("^" + simpleDateRegex.replaceFirst("^\\\\b", ""), - TextLogStructureFinder.createMultiLineMessageStartRegex(Collections.singleton(""), simpleDateRegex)); + TextLogFileStructureFinder.createMultiLineMessageStartRegex(Collections.singleton(""), simpleDateRegex)); } } @@ -62,7 +63,7 @@ public class TextLogStructureFinderTests extends LogStructureTestCase { for (TimestampFormatFinder.CandidateTimestampFormat candidateTimestampFormat : TimestampFormatFinder.ORDERED_CANDIDATE_FORMATS) { String simpleDateRegex = candidateTimestampFormat.simplePattern.pattern(); assertEquals("^\\[.*?\\] \\[" + simpleDateRegex, - TextLogStructureFinder.createMultiLineMessageStartRegex(Collections.singleton("[ERROR] ["), simpleDateRegex)); + TextLogFileStructureFinder.createMultiLineMessageStartRegex(Collections.singleton("[ERROR] ["), simpleDateRegex)); } } @@ -71,7 +72,7 @@ public class TextLogStructureFinderTests extends LogStructureTestCase { Set prefaces = Sets.newHashSet("[ERROR] [", "[DEBUG] ["); String simpleDateRegex = candidateTimestampFormat.simplePattern.pattern(); assertEquals("^\\[.*?\\] \\[" + simpleDateRegex, - TextLogStructureFinder.createMultiLineMessageStartRegex(prefaces, simpleDateRegex)); + TextLogFileStructureFinder.createMultiLineMessageStartRegex(prefaces, simpleDateRegex)); } } @@ -80,7 +81,7 @@ public class TextLogStructureFinderTests extends LogStructureTestCase { Set prefaces = Sets.newHashSet("host-1.acme.com|", "my_host.elastic.co|"); String simpleDateRegex = candidateTimestampFormat.simplePattern.pattern(); assertEquals("^.*?\\|" + simpleDateRegex, - TextLogStructureFinder.createMultiLineMessageStartRegex(prefaces, simpleDateRegex)); + TextLogFileStructureFinder.createMultiLineMessageStartRegex(prefaces, simpleDateRegex)); } } @@ -89,7 +90,7 @@ public class TextLogStructureFinderTests extends LogStructureTestCase { Set prefaces = Sets.newHashSet("", "[non-standard] "); String simpleDateRegex = candidateTimestampFormat.simplePattern.pattern(); assertEquals("^.*?" + simpleDateRegex, - TextLogStructureFinder.createMultiLineMessageStartRegex(prefaces, simpleDateRegex)); + TextLogFileStructureFinder.createMultiLineMessageStartRegex(prefaces, simpleDateRegex)); } } @@ -143,7 +144,7 @@ public class TextLogStructureFinderTests extends LogStructureTestCase { "[2018-06-27T11:59:23,588][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-watcher]\n" + "[2018-06-27T11:59:23,588][INFO ][o.e.p.PluginsService ] [node-0] no plugins loaded\n"; - Tuple> mostLikelyMatch = TextLogStructureFinder.mostLikelyTimestamp(sample.split("\n")); + Tuple> mostLikelyMatch = TextLogFileStructureFinder.mostLikelyTimestamp(sample.split("\n")); assertNotNull(mostLikelyMatch); assertEquals(new TimestampMatch(7, "", "ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", "TIMESTAMP_ISO8601", ""), mostLikelyMatch.v1()); @@ -233,7 +234,7 @@ public class TextLogStructureFinderTests extends LogStructureTestCase { "\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) [?:1.8.0_144]\n" + "\tat java.lang.Thread.run(Thread.java:748) [?:1.8.0_144]\n"; - Tuple> mostLikelyMatch = TextLogStructureFinder.mostLikelyTimestamp(sample.split("\n")); + Tuple> mostLikelyMatch = TextLogFileStructureFinder.mostLikelyTimestamp(sample.split("\n")); assertNotNull(mostLikelyMatch); // Even though many lines have a timestamp near the end (in the Lucene version information), diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TimestampFormatFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java similarity index 98% rename from x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TimestampFormatFinderTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java index cf1b65d1be2..bf27912b9db 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TimestampFormatFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java @@ -3,10 +3,10 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.xpack.ml.logstructurefinder.TimestampFormatFinder.TimestampMatch; +import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.format.DateTimeFormat; @@ -16,7 +16,7 @@ import org.joda.time.format.ISODateTimeFormat; import java.util.Arrays; import java.util.Locale; -public class TimestampFormatFinderTests extends LogStructureTestCase { +public class TimestampFormatFinderTests extends FileStructureTestCase { public void testFindFirstMatchGivenNoMatch() { diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderFactoryTests.java similarity index 70% rename from x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderFactoryTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderFactoryTests.java index 27eb4ede040..e7c11181831 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderFactoryTests.java @@ -3,11 +3,11 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; -public class XmlLogStructureFinderFactoryTests extends LogStructureTestCase { +public class XmlFileStructureFinderFactoryTests extends FileStructureTestCase { - private LogStructureFinderFactory factory = new XmlLogStructureFinderFactory(); + private FileStructureFinderFactory factory = new XmlFileStructureFinderFactory(); // No need to check JSON because it comes earlier in the order we check formats @@ -26,14 +26,14 @@ public class XmlLogStructureFinderFactoryTests extends LogStructureTestCase { assertFalse(factory.canCreateFromSample(explanation, TSV_SAMPLE)); } - public void testCanCreateFromSampleGivenSemiColonSeparatedValues() { + public void testCanCreateFromSampleGivenSemiColonDelimited() { - assertFalse(factory.canCreateFromSample(explanation, SEMI_COLON_SEPARATED_VALUES_SAMPLE)); + assertFalse(factory.canCreateFromSample(explanation, SEMI_COLON_DELIMITED_SAMPLE)); } - public void testCanCreateFromSampleGivenPipeSeparatedValues() { + public void testCanCreateFromSampleGivenPipeDelimited() { - assertFalse(factory.canCreateFromSample(explanation, PIPE_SEPARATED_VALUES_SAMPLE)); + assertFalse(factory.canCreateFromSample(explanation, PIPE_DELIMITED_SAMPLE)); } public void testCanCreateFromSampleGivenText() { diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderTests.java similarity index 67% rename from x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderTests.java index 0d04df152ef..4bf65ba7835 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderTests.java @@ -3,24 +3,26 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.logstructurefinder; +package org.elasticsearch.xpack.ml.filestructurefinder; + +import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; import java.util.Collections; -public class XmlLogStructureFinderTests extends LogStructureTestCase { +public class XmlFileStructureFinderTests extends FileStructureTestCase { - private LogStructureFinderFactory factory = new XmlLogStructureFinderFactory(); + private FileStructureFinderFactory factory = new XmlFileStructureFinderFactory(); public void testCreateConfigsGivenGoodXml() throws Exception { assertTrue(factory.canCreateFromSample(explanation, XML_SAMPLE)); String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - LogStructureFinder structureFinder = factory.createFromSample(explanation, XML_SAMPLE, charset, hasByteOrderMarker); + FileStructureFinder structureFinder = factory.createFromSample(explanation, XML_SAMPLE, charset, hasByteOrderMarker); - LogStructure structure = structureFinder.getStructure(); + FileStructure structure = structureFinder.getStructure(); - assertEquals(LogStructure.Format.XML, structure.getFormat()); + assertEquals(FileStructure.Format.XML, structure.getFormat()); assertEquals(charset, structure.getCharset()); if (hasByteOrderMarker == null) { assertNull(structure.getHasByteOrderMarker()); @@ -29,7 +31,7 @@ public class XmlLogStructureFinderTests extends LogStructureTestCase { } assertNull(structure.getExcludeLinesPattern()); assertEquals("^\\s* mustMatchStrings = Arrays.asList("2018-09-03 17:03:28,269 +0100 | ERROR | ", + "2018-09-03 17:04:27,279 +0100 | DEBUG | ", + "2018-09-03 17:05:26,289 +0100 | ERROR | "); + + Map fieldNameCountStore = new HashMap<>(); + StringBuilder overallGrokPatternBuilder = new StringBuilder(); + + GrokPatternCreator.appendBestGrokMatchForStrings(fieldNameCountStore, overallGrokPatternBuilder, false, false, mustMatchStrings); + + assertEquals(".*?%{TOMCAT_DATESTAMP:timestamp}.+?%{LOGLEVEL:loglevel}.+?", overallGrokPatternBuilder.toString()); + } + + public void testAppendBestGrokMatchForStringsGivenTrappyFloatCandidates() { + + // If we're not careful then we might detect the first part of these strings as a + // number, e.g. 1.2 in the first example, but this is inappropriate given the + // trailing dot and digit + Collection mustMatchStrings = Arrays.asList("1.2.3", + "-2.3.4", + "4.5.6.7", + "-9.8.7.6.5"); + + Map fieldNameCountStore = new HashMap<>(); + StringBuilder overallGrokPatternBuilder = new StringBuilder(); + + GrokPatternCreator.appendBestGrokMatchForStrings(fieldNameCountStore, overallGrokPatternBuilder, false, false, mustMatchStrings); + + assertEquals(".+?", overallGrokPatternBuilder.toString()); + } + public void testAppendBestGrokMatchForStringsGivenNumbersInBrackets() { Collection mustMatchStrings = Arrays.asList("(-2)", diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobStateTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobStateTests.java index cd983c6b030..2e324b6a1c2 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobStateTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobStateTests.java @@ -5,19 +5,8 @@ */ package org.elasticsearch.xpack.ml.job.config; -import org.elasticsearch.Version; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.mockito.ArgumentCaptor; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; - -import java.io.IOException; - -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class JobStateTests extends ESTestCase { @@ -60,35 +49,4 @@ public class JobStateTests extends ESTestCase { assertTrue(JobState.CLOSED.isAnyOf(JobState.CLOSED)); assertTrue(JobState.CLOSING.isAnyOf(JobState.CLOSING)); } - - @SuppressWarnings("unchecked") - public void testStreaming_v54BackwardsCompatibility() throws IOException { - StreamOutput out = mock(StreamOutput.class); - when(out.getVersion()).thenReturn(Version.V_5_4_0); - ArgumentCaptor enumCaptor = ArgumentCaptor.forClass(Enum.class); - - doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock invocationOnMock) { - return null; - } - }).when(out).writeEnum(enumCaptor.capture()); - - // OPENING state was introduced in v5.5. - // Pre v5.5 its translated as CLOSED - JobState.OPENING.writeTo(out); - assertEquals(JobState.CLOSED, enumCaptor.getValue()); - - when(out.getVersion()).thenReturn(Version.V_5_5_0); - - doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock invocationOnMock) { - return null; - } - }).when(out).writeEnum(enumCaptor.capture()); - - JobState.OPENING.writeTo(out); - assertEquals(JobState.OPENING, enumCaptor.getValue()); - } } diff --git a/x-pack/plugin/monitoring/build.gradle b/x-pack/plugin/monitoring/build.gradle index a452ef09a20..e551d577b7b 100644 --- a/x-pack/plugin/monitoring/build.gradle +++ b/x-pack/plugin/monitoring/build.gradle @@ -13,7 +13,8 @@ esplugin { archivesBaseName = 'x-pack-monitoring' dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // monitoring deps diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulk.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulk.java index f23a545b6a5..ded3064a2a6 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulk.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulk.java @@ -5,14 +5,14 @@ */ package org.elasticsearch.xpack.monitoring.exporter.http; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ByteArrayEntity; import org.apache.http.entity.ContentType; +import org.apache.http.nio.entity.NByteArrayEntity; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseListener; import org.elasticsearch.client.RestClient; @@ -94,9 +94,13 @@ class HttpExportBulk extends ExportBulk { if (payload == null) { listener.onFailure(new ExportException("unable to send documents because none were loaded for export bulk [{}]", name)); } else if (payload.length != 0) { - final HttpEntity body = new ByteArrayEntity(payload, ContentType.APPLICATION_JSON); + final Request request = new Request("POST", "/_bulk"); + for (Map.Entry param : params.entrySet()) { + request.addParameter(param.getKey(), param.getValue()); + } + request.setEntity(new NByteArrayEntity(payload, ContentType.APPLICATION_JSON)); - client.performRequestAsync("POST", "/_bulk", params, body, new ResponseListener() { + client.performRequestAsync(request, new ResponseListener() { @Override public void onSuccess(Response response) { try { diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkDocTests.java index 57106363bc1..dc294ef53de 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkDocTests.java @@ -5,12 +5,10 @@ */ package org.elasticsearch.xpack.monitoring.action; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.EqualsHashCodeTestUtils; @@ -21,7 +19,6 @@ import org.junit.Before; import java.io.IOException; import java.util.ArrayList; -import java.util.Base64; import java.util.List; import static java.util.Collections.emptyList; @@ -158,23 +155,6 @@ public class MonitoringBulkDocTests extends ESTestCase { } } - public void testSerializationBwc() throws IOException { - final byte[] data = Base64.getDecoder().decode("AQNtSWQBBTUuMS4yAAAAAQEEdHlwZQECaWQNeyJmb28iOiJiYXIifQAAAAAAAAAA"); - final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2, - Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0); - try (StreamInput in = StreamInput.wrap(data)) { - in.setVersion(version); - MonitoringBulkDoc bulkDoc = MonitoringBulkDoc.readFrom(in); - assertEquals(MonitoredSystem.UNKNOWN, bulkDoc.getSystem()); - assertEquals("type", bulkDoc.getType()); - assertEquals("id", bulkDoc.getId()); - assertEquals(0L, bulkDoc.getTimestamp()); - assertEquals(0L, bulkDoc.getIntervalMillis()); - assertEquals("{\"foo\":\"bar\"}", bulkDoc.getSource().utf8ToString()); - assertEquals(XContentType.JSON, bulkDoc.getXContentType()); - } - } - /** * Test that we allow strings to be "" because Logstash 5.2 - 5.3 would submit empty _id values for time-based documents */ diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java index b336b3c8853..dc5cad7c94f 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.monitoring.action; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -26,7 +25,6 @@ import java.io.IOException; import java.util.Collection; import java.util.List; -import static org.elasticsearch.test.VersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; @@ -254,52 +252,6 @@ public class MonitoringBulkRequestTests extends ESTestCase { assertArrayEquals(originalBulkDocs, deserializedBulkDocs); } - public void testSerializationBwc() throws IOException { - final MonitoringBulkRequest originalRequest = new MonitoringBulkRequest(); - - final int numDocs = iterations(10, 30); - for (int i = 0; i < numDocs; i++) { - originalRequest.add(randomMonitoringBulkDoc()); - } - - final Version version = randomVersionBetween(random(), Version.V_5_0_0, Version.V_6_0_0_rc1); - - final BytesStreamOutput out = new BytesStreamOutput(); - out.setVersion(version); - originalRequest.writeTo(out); - - final StreamInput in = out.bytes().streamInput(); - in.setVersion(out.getVersion()); - - final MonitoringBulkRequest deserializedRequest = new MonitoringBulkRequest(); - deserializedRequest.readFrom(in); - - assertThat(in.available(), equalTo(0)); - - final MonitoringBulkDoc[] originalBulkDocs = originalRequest.getDocs().toArray(new MonitoringBulkDoc[]{}); - final MonitoringBulkDoc[] deserializedBulkDocs = deserializedRequest.getDocs().toArray(new MonitoringBulkDoc[]{}); - - assertThat(originalBulkDocs.length, equalTo(deserializedBulkDocs.length)); - - for (int i = 0; i < originalBulkDocs.length; i++) { - final MonitoringBulkDoc original = originalBulkDocs[i]; - final MonitoringBulkDoc deserialized = deserializedBulkDocs[i]; - - assertThat(deserialized.getSystem(), equalTo(original.getSystem())); - assertThat(deserialized.getType(), equalTo(original.getType())); - assertThat(deserialized.getId(), equalTo(original.getId())); - assertThat(deserialized.getTimestamp(), equalTo(original.getTimestamp())); - assertThat(deserialized.getSource(), equalTo(original.getSource())); - assertThat(deserialized.getXContentType(), equalTo(original.getXContentType())); - - if (version.onOrAfter(Version.V_6_0_0_rc1)) { - assertThat(deserialized.getIntervalMillis(), equalTo(original.getIntervalMillis())); - } else { - assertThat(deserialized.getIntervalMillis(), equalTo(0L)); - } - } - } - /** * Return a {@link XContentType} supported by the Monitoring Bulk API (JSON or Smile) */ diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/CollectorTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/CollectorTests.java index 79279faa6f4..3d1a0bf9ade 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/CollectorTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/CollectorTests.java @@ -5,39 +5,11 @@ */ package org.elasticsearch.xpack.monitoring.collector; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; - -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; public class CollectorTests extends ESTestCase { public void testConvertNullNode() { assertEquals(null, Collector.convertNode(randomNonNegativeLong(), null)); } - - public void testConvertNode() { - final String name = randomBoolean() ? randomAlphaOfLength(5) : ""; - final String nodeId = randomAlphaOfLength(5); - final TransportAddress address = buildNewFakeTransportAddress(); - final Version version = randomFrom(Version.V_5_0_1, Version.V_5_3_0, Version.CURRENT); - final long timestamp = randomNonNegativeLong(); - - final Set roles = new HashSet<>(); - if (randomBoolean()) { - roles.addAll(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values()))); - } - - final MonitoringDoc.Node expectedNode = new MonitoringDoc.Node(nodeId, address.address().getHostString(), address.toString(), - address.getAddress(), name, timestamp); - - DiscoveryNode discoveryNode = new DiscoveryNode(name, nodeId, address, Collections.emptyMap(), roles, version); - assertEquals(expectedNode, Collector.convertNode(timestamp, discoveryNode)); - } } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index b152bfc8d71..294f56e26b0 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.bytes.BytesReference; @@ -60,7 +61,6 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singleton; import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; -import static org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -289,7 +289,8 @@ public class ClusterStatsMonitoringDocTests extends BaseMonitoringDocTestCase extends assertEquals(deserialized.hashCode(), original.hashCode()); assertNotSame(deserialized, original); } - - public void testMonitoringNodeBwcSerialization() throws IOException { - final Version version = randomVersionBetween(random(), Version.V_5_0_0, Version.V_6_0_0_beta2); - - final byte[] data = Base64.getDecoder() - .decode("AQVFSWJKdgEDdFFOAQV3cGtMagEFa2xqeWEBBVZTamF2AwVrZXkjMgEyBWtleSMxATEFa2V5IzABMAAAAAAAAA=="); - try (StreamInput in = StreamInput.wrap(data)) { - in.setVersion(version); - - final MonitoringDoc.Node node = new MonitoringDoc.Node(in); - assertEquals("EIbJv", node.getUUID()); - assertEquals("VSjav", node.getName()); - assertEquals("tQN", node.getHost()); - assertEquals("wpkLj", node.getTransportAddress()); - assertEquals("kljya", node.getIp()); - assertEquals(0L, node.getTimestamp()); - } - } } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java index 28a19090225..7bc035f7ae2 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.ingest.PipelineConfiguration; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.max.Max; +import org.elasticsearch.search.aggregations.metrics.Max; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkDoc; diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java index efc32fccb3d..e062ea96de3 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java @@ -53,6 +53,11 @@ import org.joda.time.format.DateTimeFormat; import org.joda.time.format.ISODateTimeFormat; import java.io.IOException; +import java.lang.Thread.State; +import java.lang.management.LockInfo; +import java.lang.management.ManagementFactory; +import java.lang.management.MonitorInfo; +import java.lang.management.ThreadInfo; import java.util.Arrays; import java.util.Collection; import java.util.List; @@ -67,6 +72,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke import static org.elasticsearch.threadpool.ThreadPool.Names.WRITE; import static org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils.TEMPLATE_VERSION; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -119,8 +125,10 @@ public class MonitoringIT extends ESSingleNodeTestCase { // REST is the realistic way that these operations happen, so it's the most realistic way to integration test it too // Use Monitoring Bulk API to index 3 documents - //final Response bulkResponse = getRestClient().performRequest("POST", "/_xpack/monitoring/_bulk", - // parameters, createBulkEntity()); + //final Request bulkRequest = new Request("POST", "/_xpack/monitoring/_bulk"); + //< + //bulkRequest.setJsonEntity(createBulkEntity()); + //final Response bulkResponse = getRestClient().performRequest(request); final MonitoringBulkResponse bulkResponse = new MonitoringBulkRequestBuilder(client()) @@ -186,7 +194,6 @@ public class MonitoringIT extends ESSingleNodeTestCase { * This test waits for the monitoring service to collect monitoring documents and then checks that all expected documents * have been indexed with the expected information. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29880") public void testMonitoringService() throws Exception { final boolean createAPMIndex = randomBoolean(); final String indexName = createAPMIndex ? "apm-2017.11.06" : "books"; @@ -337,7 +344,7 @@ public class MonitoringIT extends ESSingleNodeTestCase { final Map clusterStats = (Map) source.get("cluster_stats"); assertThat(clusterStats, notNullValue()); - assertThat(clusterStats.size(), equalTo(4)); + assertThat(clusterStats.size(), equalTo(5)); final Map stackStats = (Map) source.get("stack_stats"); assertThat(stackStats, notNullValue()); @@ -347,7 +354,7 @@ public class MonitoringIT extends ESSingleNodeTestCase { assertThat(apm, notNullValue()); assertThat(apm.size(), equalTo(1)); assertThat(apm.remove("found"), is(apmIndicesExist)); - assertThat(apm.isEmpty(), is(true)); + assertThat(apm.keySet(), empty()); final Map xpackStats = (Map) stackStats.get("xpack"); assertThat(xpackStats, notNullValue()); @@ -359,14 +366,14 @@ public class MonitoringIT extends ESSingleNodeTestCase { final Map clusterState = (Map) source.get("cluster_state"); assertThat(clusterState, notNullValue()); - assertThat(clusterState.size(), equalTo(6)); assertThat(clusterState.remove("nodes_hash"), notNullValue()); assertThat(clusterState.remove("status"), notNullValue()); assertThat(clusterState.remove("version"), notNullValue()); assertThat(clusterState.remove("state_uuid"), notNullValue()); + assertThat(clusterState.remove("cluster_uuid"), notNullValue()); assertThat(clusterState.remove("master_node"), notNullValue()); assertThat(clusterState.remove("nodes"), notNullValue()); - assertThat(clusterState.isEmpty(), is(true)); + assertThat(clusterState.keySet(), empty()); } /** @@ -452,6 +459,11 @@ public class MonitoringIT extends ESSingleNodeTestCase { return; } + // bulk is not a thread pool in the current version but we allow it to support mixed version clusters + if (filter.startsWith("node_stats.thread_pool.bulk")) { + return; + } + assertThat(filter + " must not be null in the monitoring document", extractValue(filter, source), notNullValue()); }); } @@ -496,13 +508,75 @@ public class MonitoringIT extends ESSingleNodeTestCase { */ private void whenExportersAreReady(final CheckedRunnable runnable) throws Exception { try { - enableMonitoring(); + try { + enableMonitoring(); + } catch (AssertionError e) { + // Added to debug https://github.com/elastic/elasticsearch/issues/29880 + // Remove when fixed + StringBuilder b = new StringBuilder(); + b.append("\n==== jstack at monitoring enablement failure time ====\n"); + for (ThreadInfo ti : ManagementFactory.getThreadMXBean().dumpAllThreads(true, true)) { + append(b, ti); + } + b.append("^^==============================================\n"); + logger.info(b.toString()); + throw e; + } runnable.run(); } finally { disableMonitoring(); } } + // borrowed from randomized-testing + private static void append(StringBuilder b, ThreadInfo ti) { + b.append('"').append(ti.getThreadName()).append('"'); + b.append(" ID=").append(ti.getThreadId()); + + final State threadState = ti.getThreadState(); + b.append(" ").append(threadState); + if (ti.getLockName() != null) { + b.append(" on ").append(ti.getLockName()); + } + + if (ti.getLockOwnerName() != null) { + b.append(" owned by \"").append(ti.getLockOwnerName()) + .append("\" ID=").append(ti.getLockOwnerId()); + } + + b.append(ti.isSuspended() ? " (suspended)" : ""); + b.append(ti.isInNative() ? " (in native code)" : ""); + b.append("\n"); + + final StackTraceElement[] stack = ti.getStackTrace(); + final LockInfo lockInfo = ti.getLockInfo(); + final MonitorInfo [] monitorInfos = ti.getLockedMonitors(); + for (int i = 0; i < stack.length; i++) { + b.append("\tat ").append(stack[i]).append("\n"); + if (i == 0 && lockInfo != null) { + b.append("\t- ") + .append(threadState) + .append(lockInfo) + .append("\n"); + } + + for (MonitorInfo mi : monitorInfos) { + if (mi.getLockedStackDepth() == i) { + b.append("\t- locked ").append(mi).append("\n"); + } + } + } + + LockInfo [] lockInfos = ti.getLockedSynchronizers(); + if (lockInfos.length > 0) { + b.append("\tLocked synchronizers:\n"); + for (LockInfo li : ti.getLockedSynchronizers()) { + b.append("\t- ").append(li).append("\n"); + } + } + b.append("\n"); + } + /** * Enable the monitoring service and the Local exporter, waiting for some monitoring documents * to be indexed before it returns. diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MockIngestPlugin.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MockIngestPlugin.java index 818ab374d34..b4521ad58b2 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MockIngestPlugin.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MockIngestPlugin.java @@ -74,8 +74,9 @@ public class MockIngestPlugin extends Plugin implements IngestPlugin { } @Override - public void execute(IngestDocument ingestDocument) throws Exception { + public IngestDocument execute(IngestDocument ingestDocument) throws Exception { // mock processor does nothing + return ingestDocument; } @Override diff --git a/x-pack/plugin/rollup/build.gradle b/x-pack/plugin/rollup/build.gradle index 649a89bc2cd..75fd22abacc 100644 --- a/x-pack/plugin/rollup/build.gradle +++ b/x-pack/plugin/rollup/build.gradle @@ -16,7 +16,8 @@ compileTestJava.options.compilerArgs << "-Xlint:-rawtypes" dependencies { compileOnly "org.elasticsearch:elasticsearch:${version}" - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java index 0fc4d838f7c..09b2ccd079a 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java @@ -194,7 +194,7 @@ public class Rollup extends Plugin implements ActionPlugin, PersistentTaskPlugin return emptyList(); } - SchedulerEngine schedulerEngine = new SchedulerEngine(getClock()); + SchedulerEngine schedulerEngine = new SchedulerEngine(settings, getClock()); return Collections.singletonList(new RollupJobTask.RollupJobPersistentTasksExecutor(settings, client, schedulerEngine, threadPool)); } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java index d1706fd708e..232034177e8 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java @@ -5,9 +5,11 @@ */ package org.elasticsearch.xpack.rollup; +import org.elasticsearch.common.rounding.DateTimeUnit; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; @@ -30,7 +32,7 @@ import java.util.Set; */ public class RollupJobIdentifierUtils { - private static final Comparator COMPARATOR = RollupJobIdentifierUtils.getComparator(); + static final Comparator COMPARATOR = RollupJobIdentifierUtils.getComparator(); /** * Given the aggregation tree and a list of available job capabilities, this method will return a set @@ -93,8 +95,9 @@ public class RollupJobIdentifierUtils { if (fieldCaps != null) { for (Map agg : fieldCaps.getAggs()) { if (agg.get(RollupField.AGG).equals(DateHistogramAggregationBuilder.NAME)) { - TimeValue interval = TimeValue.parseTimeValue((String)agg.get(RollupField.INTERVAL), "date_histogram.interval"); - String thisTimezone = (String) agg.get(DateHistogramGroupConfig.TIME_ZONE); + DateHistogramInterval interval = new DateHistogramInterval((String)agg.get(RollupField.INTERVAL)); + + String thisTimezone = (String)agg.get(DateHistogramGroupConfig.TIME_ZONE); String sourceTimeZone = source.timeZone() == null ? DateTimeZone.UTC.toString() : source.timeZone().toString(); // Ensure we are working on the same timezone @@ -102,17 +105,20 @@ public class RollupJobIdentifierUtils { continue; } if (source.dateHistogramInterval() != null) { - TimeValue sourceInterval = TimeValue.parseTimeValue(source.dateHistogramInterval().toString(), - "source.date_histogram.interval"); - //TODO should be divisor of interval - if (interval.compareTo(sourceInterval) <= 0) { + // Check if both are calendar and validate if they are. + // If not, check if both are fixed and validate + if (validateCalendarInterval(source.dateHistogramInterval(), interval)) { + localCaps.add(cap); + } else if (validateFixedInterval(source.dateHistogramInterval(), interval)) { localCaps.add(cap); } } else { - if (interval.getMillis() <= source.interval()) { + // check if config is fixed and validate if it is + if (validateFixedInterval(source.interval(), interval)) { localCaps.add(cap); } } + // not a candidate if we get here break; } } @@ -133,6 +139,57 @@ public class RollupJobIdentifierUtils { } } + private static boolean isCalendarInterval(DateHistogramInterval interval) { + return DateHistogramAggregationBuilder.DATE_FIELD_UNITS.containsKey(interval.toString()); + } + + static boolean validateCalendarInterval(DateHistogramInterval requestInterval, + DateHistogramInterval configInterval) { + // Both must be calendar intervals + if (isCalendarInterval(requestInterval) == false || isCalendarInterval(configInterval) == false) { + return false; + } + + // The request must be gte the config. The CALENDAR_ORDERING map values are integers representing + // relative orders between the calendar units + DateTimeUnit requestUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(requestInterval.toString()); + long requestOrder = requestUnit.field(DateTimeZone.UTC).getDurationField().getUnitMillis(); + DateTimeUnit configUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(configInterval.toString()); + long configOrder = configUnit.field(DateTimeZone.UTC).getDurationField().getUnitMillis(); + + // All calendar units are multiples naturally, so we just care about gte + return requestOrder >= configOrder; + } + + static boolean validateFixedInterval(DateHistogramInterval requestInterval, + DateHistogramInterval configInterval) { + // Neither can be calendar intervals + if (isCalendarInterval(requestInterval) || isCalendarInterval(configInterval)) { + return false; + } + + // Both are fixed, good to convert to millis now + long configIntervalMillis = TimeValue.parseTimeValue(configInterval.toString(), + "date_histo.config.interval").getMillis(); + long requestIntervalMillis = TimeValue.parseTimeValue(requestInterval.toString(), + "date_histo.request.interval").getMillis(); + + // Must be a multiple and gte the config + return requestIntervalMillis >= configIntervalMillis && requestIntervalMillis % configIntervalMillis == 0; + } + + static boolean validateFixedInterval(long requestInterval, DateHistogramInterval configInterval) { + // config must not be a calendar interval + if (isCalendarInterval(configInterval)) { + return false; + } + long configIntervalMillis = TimeValue.parseTimeValue(configInterval.toString(), + "date_histo.config.interval").getMillis(); + + // Must be a multiple and gte the config + return requestInterval >= configIntervalMillis && requestInterval % configIntervalMillis == 0; + } + /** * Find the set of histo's with the largest interval */ @@ -144,8 +201,8 @@ public class RollupJobIdentifierUtils { for (Map agg : fieldCaps.getAggs()) { if (agg.get(RollupField.AGG).equals(HistogramAggregationBuilder.NAME)) { Long interval = (long)agg.get(RollupField.INTERVAL); - // TODO should be divisor of interval - if (interval <= source.interval()) { + // query interval must be gte the configured interval, and a whole multiple + if (interval <= source.interval() && source.interval() % interval == 0) { localCaps.add(cap); } break; @@ -155,8 +212,8 @@ public class RollupJobIdentifierUtils { } if (localCaps.isEmpty()) { - throw new IllegalArgumentException("There is not a rollup job that has a [" + source.getWriteableName() + "] agg on field [" + - source.field() + "] which also satisfies all requirements of query."); + throw new IllegalArgumentException("There is not a rollup job that has a [" + source.getWriteableName() + + "] agg on field [" + source.field() + "] which also satisfies all requirements of query."); } // We are a leaf, save our best caps @@ -247,8 +304,8 @@ public class RollupJobIdentifierUtils { return 0; } - TimeValue thisTime = null; - TimeValue thatTime = null; + long thisTime = Long.MAX_VALUE; + long thatTime = Long.MAX_VALUE; // histogram intervals are averaged and compared, with the idea that // a larger average == better, because it will generate fewer documents @@ -265,7 +322,7 @@ public class RollupJobIdentifierUtils { for (RollupJobCaps.RollupFieldCaps fieldCaps : o1.getFieldCaps().values()) { for (Map agg : fieldCaps.getAggs()) { if (agg.get(RollupField.AGG).equals(DateHistogramAggregationBuilder.NAME)) { - thisTime = TimeValue.parseTimeValue((String) agg.get(RollupField.INTERVAL), RollupField.INTERVAL); + thisTime = getMillisFixedOrCalendar((String) agg.get(RollupField.INTERVAL)); } else if (agg.get(RollupField.AGG).equals(HistogramAggregationBuilder.NAME)) { thisHistoWeights += (long) agg.get(RollupField.INTERVAL); counter += 1; @@ -281,7 +338,7 @@ public class RollupJobIdentifierUtils { for (RollupJobCaps.RollupFieldCaps fieldCaps : o2.getFieldCaps().values()) { for (Map agg : fieldCaps.getAggs()) { if (agg.get(RollupField.AGG).equals(DateHistogramAggregationBuilder.NAME)) { - thatTime = TimeValue.parseTimeValue((String) agg.get(RollupField.INTERVAL), RollupField.INTERVAL); + thatTime = getMillisFixedOrCalendar((String) agg.get(RollupField.INTERVAL)); } else if (agg.get(RollupField.AGG).equals(HistogramAggregationBuilder.NAME)) { thatHistoWeights += (long) agg.get(RollupField.INTERVAL); counter += 1; @@ -292,13 +349,9 @@ public class RollupJobIdentifierUtils { } thatHistoWeights = counter == 0 ? 0 : thatHistoWeights / counter; - // DateHistos are mandatory so these should always be present no matter what - assert thisTime != null; - assert thatTime != null; - // Compare on date interval first // The "smaller" job is the one with the larger interval - int timeCompare = thisTime.compareTo(thatTime); + int timeCompare = Long.compare(thisTime, thatTime); if (timeCompare != 0) { return -timeCompare; } @@ -330,4 +383,14 @@ public class RollupJobIdentifierUtils { // coverage }; } + + static long getMillisFixedOrCalendar(String value) { + DateHistogramInterval interval = new DateHistogramInterval(value); + if (isCalendarInterval(interval)) { + DateTimeUnit intervalUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(interval.toString()); + return intervalUnit.field(DateTimeZone.UTC).getDurationField().getUnitMillis(); + } else { + return TimeValue.parseTimeValue(value, "date_histo.comparator.interval").getMillis(); + } + } } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupRequestTranslator.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupRequestTranslator.java index 0668e7c43ad..44e67cc619c 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupRequestTranslator.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupRequestTranslator.java @@ -16,8 +16,8 @@ import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java index 4042e98ef93..0c1ca89f32d 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java @@ -28,11 +28,11 @@ import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; import org.elasticsearch.search.aggregations.bucket.terms.LongTerms; import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation.SingleValue; -import org.elasticsearch.search.aggregations.metrics.avg.InternalAvg; -import org.elasticsearch.search.aggregations.metrics.max.InternalMax; -import org.elasticsearch.search.aggregations.metrics.min.InternalMin; -import org.elasticsearch.search.aggregations.metrics.sum.InternalSum; -import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.InternalAvg; +import org.elasticsearch.search.aggregations.metrics.InternalMax; +import org.elasticsearch.search.aggregations.metrics.InternalMin; +import org.elasticsearch.search.aggregations.metrics.InternalSum; +import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.xpack.core.rollup.RollupField; @@ -238,11 +238,23 @@ public class RollupResponseTranslator { ? (InternalAggregations)liveResponse.getAggregations() : InternalAggregations.EMPTY; - rolledResponses.forEach(r -> { - if (r == null || r.getAggregations() == null || r.getAggregations().asList().size() == 0) { - throw new RuntimeException("Expected to find aggregations in rollup response, but none found."); + int missingRollupAggs = rolledResponses.stream().mapToInt(searchResponse -> { + if (searchResponse == null + || searchResponse.getAggregations() == null + || searchResponse.getAggregations().asList().size() == 0) { + return 1; } - }); + return 0; + }).sum(); + + // We had no rollup aggs, so there is nothing to process + if (missingRollupAggs == rolledResponses.size()) { + // Return an empty response, but make sure we include all the shard, failure, etc stats + return mergeFinalResponse(liveResponse, rolledResponses, InternalAggregations.EMPTY); + } else if (missingRollupAggs > 0 && missingRollupAggs != rolledResponses.size()) { + // We were missing some but not all the aggs, unclear how to handle this. Bail. + throw new RuntimeException("Expected to find aggregations in rollup response, but none found."); + } // The combination process returns a tree that is identical to the non-rolled // which means we can use aggregation's reduce method to combine, just as if @@ -275,27 +287,39 @@ public class RollupResponseTranslator { new InternalAggregation.ReduceContext(reduceContext.bigArrays(), reduceContext.scriptService(), true)); } - // TODO allow profiling in the future - InternalSearchResponse combinedInternal = new InternalSearchResponse(SearchHits.empty(), currentTree, null, null, - rolledResponses.stream().anyMatch(SearchResponse::isTimedOut), - rolledResponses.stream().anyMatch(SearchResponse::isTimedOut), - rolledResponses.stream().mapToInt(SearchResponse::getNumReducePhases).sum()); + return mergeFinalResponse(liveResponse, rolledResponses, currentTree); + } + + private static SearchResponse mergeFinalResponse(SearchResponse liveResponse, List rolledResponses, + InternalAggregations aggs) { int totalShards = rolledResponses.stream().mapToInt(SearchResponse::getTotalShards).sum(); int sucessfulShards = rolledResponses.stream().mapToInt(SearchResponse::getSuccessfulShards).sum(); int skippedShards = rolledResponses.stream().mapToInt(SearchResponse::getSkippedShards).sum(); long took = rolledResponses.stream().mapToLong(r -> r.getTook().getMillis()).sum() ; + boolean isTimedOut = rolledResponses.stream().anyMatch(SearchResponse::isTimedOut); + boolean isTerminatedEarly = rolledResponses.stream() + .filter(r -> r.isTerminatedEarly() != null) + .anyMatch(SearchResponse::isTerminatedEarly); + int numReducePhases = rolledResponses.stream().mapToInt(SearchResponse::getNumReducePhases).sum(); + if (liveResponse != null) { totalShards += liveResponse.getTotalShards(); sucessfulShards += liveResponse.getSuccessfulShards(); skippedShards += liveResponse.getSkippedShards(); took = Math.max(took, liveResponse.getTook().getMillis()); + isTimedOut = isTimedOut && liveResponse.isTimedOut(); + isTerminatedEarly = isTerminatedEarly && liveResponse.isTerminatedEarly(); + numReducePhases += liveResponse.getNumReducePhases(); } + InternalSearchResponse combinedInternal = new InternalSearchResponse(SearchHits.empty(), aggs, null, null, + isTimedOut, isTerminatedEarly, numReducePhases); + // Shard failures are ignored atm, so returning an empty array is fine return new SearchResponse(combinedInternal, null, totalShards, sucessfulShards, skippedShards, - took, ShardSearchFailure.EMPTY_ARRAY, rolledResponses.get(0).getClusters()); + took, ShardSearchFailure.EMPTY_ARRAY, rolledResponses.get(0).getClusters()); } /** diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java index 9f20fba8e92..f0600d80f82 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java @@ -158,7 +158,8 @@ public class TransportPutRollupJobAction extends TransportMasterNodeAction metadata = (Map) m; if (metadata.get(RollupField.ROLLUP_META) == null) { - String msg = "Expected to find rollup meta key [" + RollupField.ROLLUP_META + "] in mapping of rollup index [" + indexName - + "] but not found."; + String msg = "Rollup data cannot be added to existing indices that contain non-rollup data (expected " + + "to find rollup meta key [" + RollupField.ROLLUP_META + "] in mapping of rollup index [" + + indexName + "] but not found)."; logger.error(msg); listener.onFailure(new RuntimeException(msg)); return; diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java index c63ab96fa25..ea0319c3432 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java @@ -155,6 +155,18 @@ public class TransportRollupSearchAction extends TransportAction validatedCaps = new HashSet<>(); sourceAgg.getAggregatorFactories() @@ -248,11 +260,6 @@ public class TransportRollupSearchAction extends TransportAction jobCaps) { diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/IndexerUtils.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/IndexerUtils.java index 9119a5445d4..94d64b17de8 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/IndexerUtils.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/IndexerUtils.java @@ -17,7 +17,7 @@ import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggre import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; +import org.elasticsearch.xpack.core.rollup.job.RollupIndexerJobStats; import org.elasticsearch.xpack.rollup.Rollup; import java.util.ArrayList; @@ -46,7 +46,7 @@ class IndexerUtils { * @param isUpgradedDocID `true` if this job is using the new ID scheme * @return A list of rolled documents derived from the response */ - static List processBuckets(CompositeAggregation agg, String rollupIndex, RollupJobStats stats, + static List processBuckets(CompositeAggregation agg, String rollupIndex, RollupIndexerJobStats stats, GroupConfig groupConfig, String jobId, boolean isUpgradedDocID) { logger.debug("Buckets: [" + agg.getBuckets().size() + "][" + jobId + "]"); diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java index 87294706b3b..ee29e56a331 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java @@ -5,31 +5,45 @@ */ package org.elasticsearch.xpack.rollup.job; -import org.apache.log4j.Logger; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; +import org.elasticsearch.search.aggregations.bucket.composite.DateHistogramValuesSourceBuilder; +import org.elasticsearch.search.aggregations.bucket.composite.HistogramValuesSourceBuilder; +import org.elasticsearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; +import org.elasticsearch.search.aggregations.support.ValueType; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.core.indexing.IndexerState; +import org.elasticsearch.xpack.core.indexing.IterationResult; +import org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; -import org.elasticsearch.xpack.core.rollup.job.IndexerState; +import org.elasticsearch.xpack.core.rollup.job.HistogramGroupConfig; +import org.elasticsearch.xpack.core.rollup.job.MetricConfig; +import org.elasticsearch.xpack.core.rollup.job.RollupIndexerJobStats; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; +import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig; +import org.joda.time.DateTimeZone; import java.util.ArrayList; -import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -37,26 +51,16 @@ import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -/** - * An abstract class that builds a rollup index incrementally. A background job can be launched using {@link #maybeTriggerAsyncJob(long)}, - * it will create the rollup index from the source index up to the last complete bucket that is allowed to be built (based on the current - * time and the delay set on the rollup job). Only one background job can run simultaneously and {@link #onFinish()} is called when the job - * finishes. {@link #onFailure(Exception)} is called if the job fails with an exception and {@link #onAbort()} is called if the indexer is - * aborted while a job is running. The indexer must be started ({@link #start()} to allow a background job to run when - * {@link #maybeTriggerAsyncJob(long)} is called. {@link #stop()} can be used to stop the background job without aborting the indexer. - */ -public abstract class RollupIndexer { - private static final Logger logger = Logger.getLogger(RollupIndexer.class.getName()); +import static org.elasticsearch.xpack.core.rollup.RollupField.formatFieldName; +/** + * An abstract implementation of {@link AsyncTwoPhaseIndexer} that builds a rollup index incrementally. + */ +public abstract class RollupIndexer extends AsyncTwoPhaseIndexer, RollupIndexerJobStats> { static final String AGGREGATION_NAME = RollupField.NAME; private final RollupJob job; - private final RollupJobStats stats; - private final AtomicReference state; - private final AtomicReference> position; - private final Executor executor; protected final AtomicBoolean upgradedDocumentID; - private final CompositeAggregationBuilder compositeBuilder; private long maxBoundary; @@ -66,84 +70,16 @@ public abstract class RollupIndexer { * @param job The rollup job * @param initialState Initial state for the indexer * @param initialPosition The last indexed bucket of the task + * @param upgradedDocumentID whether job has updated IDs (for BWC) */ - RollupIndexer(Executor executor, RollupJob job, AtomicReference initialState, - Map initialPosition, AtomicBoolean upgradedDocumentID) { - this.executor = executor; + RollupIndexer(Executor executor, RollupJob job, AtomicReference initialState, Map initialPosition, + AtomicBoolean upgradedDocumentID) { + super(executor, initialState, initialPosition, new RollupIndexerJobStats()); this.job = job; - this.stats = new RollupJobStats(); - this.state = initialState; - this.position = new AtomicReference<>(initialPosition); this.compositeBuilder = createCompositeBuilder(job.getConfig()); this.upgradedDocumentID = upgradedDocumentID; } - /** - * Executes the {@link SearchRequest} and calls nextPhase with the response - * or the exception if an error occurs. - * - * @param request The search request to execute - * @param nextPhase Listener for the next phase - */ - protected abstract void doNextSearch(SearchRequest request, ActionListener nextPhase); - - /** - * Executes the {@link BulkRequest} and calls nextPhase with the response - * or the exception if an error occurs. - * - * @param request The bulk request to execute - * @param nextPhase Listener for the next phase - */ - protected abstract void doNextBulk(BulkRequest request, ActionListener nextPhase); - - /** - * Called periodically during the execution of a background job. Implementation should - * persists the state somewhere and continue the execution asynchronously using next. - * - * @param state The current state of the indexer - * @param position The current position of the indexer - * @param next Runnable for the next phase - */ - protected abstract void doSaveState(IndexerState state, Map position, Runnable next); - - /** - * Called when a failure occurs in an async job causing the execution to stop. - * @param exc The exception - */ - protected abstract void onFailure(Exception exc); - - /** - * Called when a background job finishes. - */ - protected abstract void onFinish(); - - /** - * Called when a background job detects that the indexer is aborted causing the async execution - * to stop. - */ - protected abstract void onAbort(); - - /** - * Get the current state of the indexer. - */ - public IndexerState getState() { - return state.get(); - } - - /** - * Get the current position of the indexer. - */ - public Map getPosition() { - return position.get(); - } - - /** - * Get the stats of this indexer. - */ - public RollupJobStats getStats() { - return stats; - } - /** * Returns if this job has upgraded it's ID scheme yet or not */ @@ -151,229 +87,28 @@ public abstract class RollupIndexer { return upgradedDocumentID.get(); } - /** - * Sets the internal state to {@link IndexerState#STARTED} if the previous state was {@link IndexerState#STOPPED}. Setting the state to - * STARTED allows a job to run in the background when {@link #maybeTriggerAsyncJob(long)} is called. - * @return The new state for the indexer (STARTED, INDEXING or ABORTING if the job was already aborted). - */ - public synchronized IndexerState start() { - state.compareAndSet(IndexerState.STOPPED, IndexerState.STARTED); - return state.get(); + @Override + protected String getJobId() { + return job.getConfig().getId(); } - /** - * Sets the internal state to {@link IndexerState#STOPPING} if an async job is running in the background and in such case - * {@link #onFinish()} will be called as soon as the background job detects that the indexer is stopped. If there is no job running when - * this function is called, the state is directly set to {@link IndexerState#STOPPED} and {@link #onFinish()} will never be called. - * @return The new state for the indexer (STOPPED, STOPPING or ABORTING if the job was already aborted). - */ - public synchronized IndexerState stop() { - IndexerState currentState = state.updateAndGet(previousState -> { - if (previousState == IndexerState.INDEXING) { - return IndexerState.STOPPING; - } else if (previousState == IndexerState.STARTED) { - return IndexerState.STOPPED; - } else { - return previousState; - } - }); - return currentState; - } - - /** - * Sets the internal state to {@link IndexerState#ABORTING}. It returns false if an async job is running in the background and in such - * case {@link #onAbort} will be called as soon as the background job detects that the indexer is aborted. If there is no job running - * when this function is called, it returns true and {@link #onAbort()} will never be called. - * @return true if the indexer is aborted, false if a background job is running and abort is delayed. - */ - public synchronized boolean abort() { - IndexerState prevState = state.getAndUpdate((prev) -> IndexerState.ABORTING); - return prevState == IndexerState.STOPPED || prevState == IndexerState.STARTED; - } - - /** - * Triggers a background job that builds the rollup index asynchronously iff there is no other job that runs - * and the indexer is started ({@link IndexerState#STARTED}. - * - * @param now The current time in milliseconds (used to limit the job to complete buckets) - * @return true if a job has been triggered, false otherwise - */ - public synchronized boolean maybeTriggerAsyncJob(long now) { - final IndexerState currentState = state.get(); - switch (currentState) { - case INDEXING: - case STOPPING: - case ABORTING: - logger.warn("Schedule was triggered for rollup job [" + job.getConfig().getId() + "], but prior indexer is still running."); - return false; - - case STOPPED: - logger.debug("Schedule was triggered for rollup job [" + job.getConfig().getId() - + "] but job is stopped. Ignoring trigger."); - return false; - - case STARTED: - logger.debug("Schedule was triggered for rollup job [" + job.getConfig().getId() + "], state: [" + currentState + "]"); - // Only valid time to start indexing is when we are STARTED but not currently INDEXING. - stats.incrementNumInvocations(1); - - // rounds the current time to its current bucket based on the date histogram interval. - // this is needed to exclude buckets that can still receive new documents. - DateHistogramGroupConfig dateHisto = job.getConfig().getGroupConfig().getDateHistogram(); - long rounded = dateHisto.createRounding().round(now); - if (dateHisto.getDelay() != null) { - // if the job has a delay we filter all documents that appear before it. - maxBoundary = rounded - TimeValue.parseTimeValue(dateHisto.getDelay().toString(), "").millis(); - } else { - maxBoundary = rounded; - } - - if (state.compareAndSet(IndexerState.STARTED, IndexerState.INDEXING)) { - // fire off the search. Note this is async, the method will return from here - executor.execute(() -> doNextSearch(buildSearchRequest(), - ActionListener.wrap(this::onSearchResponse, exc -> finishWithFailure(exc)))); - logger.debug("Beginning to rollup [" + job.getConfig().getId() + "], state: [" + currentState + "]"); - return true; - } else { - logger.debug("Could not move from STARTED to INDEXING state because current state is [" + state.get() + "]"); - return false; - } - - default: - logger.warn("Encountered unexpected state [" + currentState + "] while indexing"); - throw new IllegalStateException("Rollup job encountered an illegal state [" + currentState + "]"); + @Override + protected void onStartJob(long now) { + // this is needed to exclude buckets that can still receive new documents. + DateHistogramGroupConfig dateHisto = job.getConfig().getGroupConfig().getDateHistogram(); + long rounded = dateHisto.createRounding().round(now); + if (dateHisto.getDelay() != null) { + // if the job has a delay we filter all documents that appear before it. + maxBoundary = rounded - TimeValue.parseTimeValue(dateHisto.getDelay().toString(), "").millis(); + } else { + maxBoundary = rounded; } } - /** - * Checks the {@link IndexerState} and returns false if the execution - * should be stopped. - */ - private boolean checkState(IndexerState currentState) { - switch (currentState) { - case INDEXING: - // normal state; - return true; - - case STOPPING: - logger.info("Rollup job encountered [" + IndexerState.STOPPING + "] state, halting indexer."); - doSaveState(finishAndSetState(), getPosition(), () -> {}); - return false; - - case STOPPED: - return false; - - case ABORTING: - logger.info("Requested shutdown of indexer for job [" + job.getConfig().getId() + "]"); - onAbort(); - return false; - - default: - // Anything other than indexing, aborting or stopping is unanticipated - logger.warn("Encountered unexpected state [" + currentState + "] while indexing"); - throw new IllegalStateException("Rollup job encountered an illegal state [" + currentState + "]"); - } - } - - private void onBulkResponse(BulkResponse response, Map after) { - // TODO we should check items in the response and move after accordingly to resume the failing buckets ? - stats.incrementNumRollups(response.getItems().length); - if (response.hasFailures()) { - logger.warn("Error while attempting to bulk index rollup documents: " + response.buildFailureMessage()); - } - try { - if (checkState(getState()) == false) { - return ; - } - position.set(after); - ActionListener listener = ActionListener.wrap(this::onSearchResponse, this::finishWithFailure); - // TODO probably something more intelligent than every-50 is needed - if (stats.getNumPages() > 0 && stats.getNumPages() % 50 == 0) { - doSaveState(IndexerState.INDEXING, after, () -> doNextSearch(buildSearchRequest(), listener)); - } else { - doNextSearch(buildSearchRequest(), listener); - } - } catch (Exception e) { - finishWithFailure(e); - } - } - - private void onSearchResponse(SearchResponse searchResponse) { - try { - if (checkState(getState()) == false) { - return ; - } - if (searchResponse.getShardFailures().length != 0) { - throw new RuntimeException("Shard failures encountered while running indexer for rollup job [" - + job.getConfig().getId() + "]: " + Arrays.toString(searchResponse.getShardFailures())); - } - final CompositeAggregation response = searchResponse.getAggregations().get(AGGREGATION_NAME); - if (response == null) { - throw new IllegalStateException("Missing composite response for query: " + compositeBuilder.toString()); - } - stats.incrementNumPages(1); - if (response.getBuckets().isEmpty()) { - // this is the end... - logger.debug("Finished indexing for job [" + job.getConfig().getId() + "], saving state and shutting down."); - - // Change state first, then try to persist. This prevents in-progress STOPPING/ABORTING from - // being persisted as STARTED but then stop the job - doSaveState(finishAndSetState(), position.get(), this::onFinish); - return; - } - - final BulkRequest bulkRequest = new BulkRequest(); + @Override + protected SearchRequest buildSearchRequest() { // Indexer is single-threaded, and only place that the ID scheme can get upgraded is doSaveState(), so // we can pass down the boolean value rather than the atomic here - final List docs = IndexerUtils.processBuckets(response, job.getConfig().getRollupIndex(), - stats, job.getConfig().getGroupConfig(), job.getConfig().getId(), upgradedDocumentID.get()); - docs.forEach(bulkRequest::add); - assert bulkRequest.requests().size() > 0; - doNextBulk(bulkRequest, - ActionListener.wrap( - bulkResponse -> onBulkResponse(bulkResponse, response.afterKey()), - exc -> finishWithFailure(exc) - ) - ); - } catch(Exception e) { - finishWithFailure(e); - } - } - - private void finishWithFailure(Exception exc) { - doSaveState(finishAndSetState(), position.get(), () -> onFailure(exc)); - } - - private IndexerState finishAndSetState() { - return state.updateAndGet( - prev -> { - switch (prev) { - case INDEXING: - // ready for another job - return IndexerState.STARTED; - - case STOPPING: - // must be started again - return IndexerState.STOPPED; - - case ABORTING: - // abort and exit - onAbort(); - return IndexerState.ABORTING; // This shouldn't matter, since onAbort() will kill the task first - - case STOPPED: - // No-op. Shouldn't really be possible to get here (should have to go through STOPPING - // first which will be handled) but is harmless to no-op and we don't want to throw exception here - return IndexerState.STOPPED; - - default: - // any other state is unanticipated at this point - throw new IllegalStateException("Rollup job encountered an illegal state [" + prev + "]"); - } - }); - } - - private SearchRequest buildSearchRequest() { final Map position = getPosition(); SearchSourceBuilder searchSource = new SearchSourceBuilder() .size(0) @@ -384,6 +119,16 @@ public abstract class RollupIndexer { return new SearchRequest(job.getConfig().getIndexPattern()).source(searchSource); } + @Override + protected IterationResult> doProcess(SearchResponse searchResponse) { + final CompositeAggregation response = searchResponse.getAggregations().get(AGGREGATION_NAME); + + return new IterationResult<>( + IndexerUtils.processBuckets(response, job.getConfig().getRollupIndex(), getStats(), + job.getConfig().getGroupConfig(), job.getConfig().getId(), upgradedDocumentID.get()), + response.afterKey(), response.getBuckets().isEmpty()); + } + /** * Creates a skeleton {@link CompositeAggregationBuilder} from the provided job config. * @param config The config for the job. @@ -391,24 +136,14 @@ public abstract class RollupIndexer { */ private CompositeAggregationBuilder createCompositeBuilder(RollupJobConfig config) { final GroupConfig groupConfig = config.getGroupConfig(); - List> builders = new ArrayList<>(); - Map metadata = new HashMap<>(); - - // Add all the agg builders to our request in order: date_histo -> histo -> terms - if (groupConfig != null) { - builders.addAll(groupConfig.getDateHistogram().toBuilders()); - metadata.putAll(groupConfig.getDateHistogram().getMetadata()); - if (groupConfig.getHistogram() != null) { - builders.addAll(groupConfig.getHistogram().toBuilders()); - metadata.putAll(groupConfig.getHistogram().getMetadata()); - } - if (groupConfig.getTerms() != null) { - builders.addAll(groupConfig.getTerms().toBuilders()); - } - } + List> builders = createValueSourceBuilders(groupConfig); CompositeAggregationBuilder composite = new CompositeAggregationBuilder(AGGREGATION_NAME, builders); - config.getMetricsConfig().forEach(m -> m.toBuilders().forEach(composite::subAggregation)); + + List aggregations = createAggregationBuilders(config.getMetricsConfig()); + aggregations.forEach(composite::subAggregation); + + final Map metadata = createMetadata(groupConfig); if (metadata.isEmpty() == false) { composite.setMetaData(metadata); } @@ -441,5 +176,127 @@ public abstract class RollupIndexer { .format("epoch_millis"); return query; } + + static Map createMetadata(final GroupConfig groupConfig) { + final Map metadata = new HashMap<>(); + if (groupConfig != null) { + // Add all the metadata in order: date_histo -> histo + final DateHistogramGroupConfig dateHistogram = groupConfig.getDateHistogram(); + metadata.put(RollupField.formatMetaField(RollupField.INTERVAL), dateHistogram.getInterval().toString()); + + final HistogramGroupConfig histogram = groupConfig.getHistogram(); + if (histogram != null) { + metadata.put(RollupField.formatMetaField(RollupField.INTERVAL), histogram.getInterval()); + } + } + return metadata; + } + + public static List> createValueSourceBuilders(final GroupConfig groupConfig) { + final List> builders = new ArrayList<>(); + // Add all the agg builders to our request in order: date_histo -> histo -> terms + if (groupConfig != null) { + final DateHistogramGroupConfig dateHistogram = groupConfig.getDateHistogram(); + builders.addAll(createValueSourceBuilders(dateHistogram)); + + final HistogramGroupConfig histogram = groupConfig.getHistogram(); + builders.addAll(createValueSourceBuilders(histogram)); + + final TermsGroupConfig terms = groupConfig.getTerms(); + builders.addAll(createValueSourceBuilders(terms)); + } + return Collections.unmodifiableList(builders); + } + + public static List> createValueSourceBuilders(final DateHistogramGroupConfig dateHistogram) { + final String dateHistogramField = dateHistogram.getField(); + final String dateHistogramName = RollupField.formatIndexerAggName(dateHistogramField, DateHistogramAggregationBuilder.NAME); + final DateHistogramValuesSourceBuilder dateHistogramBuilder = new DateHistogramValuesSourceBuilder(dateHistogramName); + dateHistogramBuilder.dateHistogramInterval(dateHistogram.getInterval()); + dateHistogramBuilder.field(dateHistogramField); + dateHistogramBuilder.timeZone(toDateTimeZone(dateHistogram.getTimeZone())); + return Collections.singletonList(dateHistogramBuilder); + } + + public static List> createValueSourceBuilders(final HistogramGroupConfig histogram) { + final List> builders = new ArrayList<>(); + if (histogram != null) { + for (String field : histogram.getFields()) { + final String histogramName = RollupField.formatIndexerAggName(field, HistogramAggregationBuilder.NAME); + final HistogramValuesSourceBuilder histogramBuilder = new HistogramValuesSourceBuilder(histogramName); + histogramBuilder.interval(histogram.getInterval()); + histogramBuilder.field(field); + histogramBuilder.missingBucket(true); + builders.add(histogramBuilder); + } + } + return Collections.unmodifiableList(builders); + } + + public static List> createValueSourceBuilders(final TermsGroupConfig terms) { + final List> builders = new ArrayList<>(); + if (terms != null) { + for (String field : terms.getFields()) { + final String termsName = RollupField.formatIndexerAggName(field, TermsAggregationBuilder.NAME); + final TermsValuesSourceBuilder termsBuilder = new TermsValuesSourceBuilder(termsName); + termsBuilder.field(field); + termsBuilder.missingBucket(true); + builders.add(termsBuilder); + } + } + return Collections.unmodifiableList(builders); + } + + /** + * This returns a set of aggregation builders which represent the configured + * set of metrics. Used to iterate over historical data. + */ + static List createAggregationBuilders(final List metricsConfigs) { + final List builders = new ArrayList<>(); + if (metricsConfigs != null) { + for (MetricConfig metricConfig : metricsConfigs) { + final List metrics = metricConfig.getMetrics(); + if (metrics.isEmpty() == false) { + final String field = metricConfig.getField(); + for (String metric : metrics) { + ValuesSourceAggregationBuilder.LeafOnly newBuilder; + if (metric.equals(MetricConfig.MIN.getPreferredName())) { + newBuilder = new MinAggregationBuilder(formatFieldName(field, MinAggregationBuilder.NAME, RollupField.VALUE)); + } else if (metric.equals(MetricConfig.MAX.getPreferredName())) { + newBuilder = new MaxAggregationBuilder(formatFieldName(field, MaxAggregationBuilder.NAME, RollupField.VALUE)); + } else if (metric.equals(MetricConfig.AVG.getPreferredName())) { + // Avgs are sum + count + newBuilder = new SumAggregationBuilder(formatFieldName(field, AvgAggregationBuilder.NAME, RollupField.VALUE)); + ValuesSourceAggregationBuilder.LeafOnly countBuilder + = new ValueCountAggregationBuilder( + formatFieldName(field, AvgAggregationBuilder.NAME, RollupField.COUNT_FIELD), ValueType.NUMERIC); + countBuilder.field(field); + builders.add(countBuilder); + } else if (metric.equals(MetricConfig.SUM.getPreferredName())) { + newBuilder = new SumAggregationBuilder(formatFieldName(field, SumAggregationBuilder.NAME, RollupField.VALUE)); + } else if (metric.equals(MetricConfig.VALUE_COUNT.getPreferredName())) { + // TODO allow non-numeric value_counts. + // Hardcoding this is fine for now since the job validation guarantees that all metric fields are numerics + newBuilder = new ValueCountAggregationBuilder( + formatFieldName(field, ValueCountAggregationBuilder.NAME, RollupField.VALUE), ValueType.NUMERIC); + } else { + throw new IllegalArgumentException("Unsupported metric type [" + metric + "]"); + } + newBuilder.field(field); + builders.add(newBuilder); + } + } + } + } + return Collections.unmodifiableList(builders); + } + + private static DateTimeZone toDateTimeZone(final String timezone) { + try { + return DateTimeZone.forOffsetHours(Integer.parseInt(timezone)); + } catch (NumberFormatException e) { + return DateTimeZone.forID(timezone); + } + } } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java index 65362f9ad9d..4a4b53575b2 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java @@ -25,13 +25,13 @@ import org.elasticsearch.persistent.PersistentTasksExecutor; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.action.StartRollupJobAction; import org.elasticsearch.xpack.core.rollup.action.StopRollupJobAction; -import org.elasticsearch.xpack.core.rollup.job.IndexerState; +import org.elasticsearch.xpack.core.rollup.job.RollupIndexerJobStats; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; import org.elasticsearch.xpack.core.rollup.job.RollupJobStatus; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; import org.elasticsearch.xpack.rollup.Rollup; @@ -218,7 +218,7 @@ public class RollupJobTask extends AllocatedPersistentTask implements SchedulerE * Gets the stats for this task. * @return The stats of this task */ - public RollupJobStats getStats() { + public RollupIndexerJobStats getStats() { return indexer.getStats(); } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestGetRollupJobsAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestGetRollupJobsAction.java index 00aeb0d06ab..fcc1f2c4f57 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestGetRollupJobsAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestGetRollupJobsAction.java @@ -12,21 +12,19 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.rollup.Rollup; import org.elasticsearch.xpack.core.rollup.action.GetRollupJobsAction; - -import java.io.IOException; +import org.elasticsearch.xpack.rollup.Rollup; public class RestGetRollupJobsAction extends BaseRestHandler { public static final ParseField ID = new ParseField("id"); public RestGetRollupJobsAction(Settings settings, RestController controller) { super(settings); - controller.registerHandler(RestRequest.Method.GET, Rollup.BASE_PATH + "job/{id}/", this); + controller.registerHandler(RestRequest.Method.GET, Rollup.BASE_PATH + "job/{id}/", this); } @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { String id = restRequest.param(ID.getPreferredName()); GetRollupJobsAction.Request request = new GetRollupJobsAction.Request(id); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java index 3235d0c39e2..95161e0d149 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java @@ -9,12 +9,13 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggre import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.action.RollupJobCaps; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; @@ -24,17 +25,22 @@ import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig; import org.joda.time.DateTimeZone; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class RollupJobIdentifierUtilTests extends ESTestCase { + private static final List UNITS = new ArrayList<>(DateHistogramAggregationBuilder.DATE_FIELD_UNITS.keySet()); + public void testOneMatch() { final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); @@ -61,6 +67,32 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { assertThat(bestCaps.size(), equalTo(1)); } + public void testBiggerButCompatibleFixedInterval() { + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("100s"))); + final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); + RollupJobCaps cap = new RollupJobCaps(job); + Set caps = singletonSet(cap); + + DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") + .dateHistogramInterval(new DateHistogramInterval("1000s")); + + Set bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps); + assertThat(bestCaps.size(), equalTo(1)); + } + + public void testBiggerButCompatibleFixedMillisInterval() { + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("100ms"))); + final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); + RollupJobCaps cap = new RollupJobCaps(job); + Set caps = singletonSet(cap); + + DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") + .interval(1000); + + Set bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps); + assertThat(bestCaps.size(), equalTo(1)); + } + public void testIncompatibleInterval() { final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"))); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); @@ -75,6 +107,20 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { "[foo] which also satisfies all requirements of query.")); } + public void testIncompatibleFixedCalendarInterval() { + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("5d"))); + final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); + RollupJobCaps cap = new RollupJobCaps(job); + Set caps = singletonSet(cap); + + DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") + .dateHistogramInterval(new DateHistogramInterval("day")); + + RuntimeException e = expectThrows(RuntimeException.class, () -> RollupJobIdentifierUtils.findBestJobs(builder, caps)); + assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [date_histogram] agg on field " + + "[foo] which also satisfies all requirements of query.")); + } + public void testBadTimeZone() { final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, "EST")); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); @@ -385,6 +431,27 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { "[bar] which also satisfies all requirements of query.")); } + public void testHistoIntervalNotMultiple() { + HistogramAggregationBuilder histo = new HistogramAggregationBuilder("test_histo"); + histo.interval(10) // <--- interval is not a multiple of 3 + .field("bar") + .subAggregation(new MaxAggregationBuilder("the_max").field("max_field")) + .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field")); + + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", + new DateHistogramInterval("1d"), null, "UTC"), + new HistogramGroupConfig(3L, "bar"), + null); + final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); + RollupJobCaps cap = new RollupJobCaps(job); + Set caps = singletonSet(cap); + + Exception e = expectThrows(RuntimeException.class, + () -> RollupJobIdentifierUtils.findBestJobs(histo, caps)); + assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [histogram] agg on field " + + "[bar] which also satisfies all requirements of query.")); + } + public void testMissingMetric() { int i = ESTestCase.randomIntBetween(0, 3); @@ -417,6 +484,223 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { } + public void testValidateFixedInterval() { + boolean valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("100ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(200, new DateHistogramInterval("100ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(1000, new DateHistogramInterval("200ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(5*60*1000, new DateHistogramInterval("5m")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(10*5*60*1000, new DateHistogramInterval("5m")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("500ms")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("5m")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("minute")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("second")); + assertFalse(valid); + + // ----------- + // Same tests, with both being DateHistoIntervals + // ----------- + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("100ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("200ms"), + new DateHistogramInterval("100ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("1000ms"), + new DateHistogramInterval("200ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("5m"), + new DateHistogramInterval("5m")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("20m"), + new DateHistogramInterval("5m")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("500ms")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("5m")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("minute")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("second")); + assertFalse(valid); + } + + public void testValidateCalendarInterval() { + boolean valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("second"), + new DateHistogramInterval("second")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("minute"), + new DateHistogramInterval("second")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("month"), + new DateHistogramInterval("day")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("1d"), + new DateHistogramInterval("1s")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("second"), + new DateHistogramInterval("minute")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("second"), + new DateHistogramInterval("1m")); + assertFalse(valid); + + // Fails because both are actually fixed + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("100ms")); + assertFalse(valid); + } + + public void testComparatorMixed() { + int numCaps = randomIntBetween(1, 10); + List caps = new ArrayList<>(numCaps); + + for (int i = 0; i < numCaps; i++) { + DateHistogramInterval interval = getRandomInterval(); + GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", interval)); + RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); + RollupJobCaps cap = new RollupJobCaps(job); + caps.add(cap); + } + + caps.sort(RollupJobIdentifierUtils.COMPARATOR); + + // This only tests for calendar/fixed ordering, ignoring the other criteria + for (int i = 1; i < numCaps; i++) { + RollupJobCaps a = caps.get(i - 1); + RollupJobCaps b = caps.get(i); + long aMillis = getMillis(a); + long bMillis = getMillis(b); + + assertThat(aMillis, greaterThanOrEqualTo(bMillis)); + + } + } + + public void testComparatorFixed() { + int numCaps = randomIntBetween(1, 10); + List caps = new ArrayList<>(numCaps); + + for (int i = 0; i < numCaps; i++) { + DateHistogramInterval interval = getRandomFixedInterval(); + GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", interval)); + RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); + RollupJobCaps cap = new RollupJobCaps(job); + caps.add(cap); + } + + caps.sort(RollupJobIdentifierUtils.COMPARATOR); + + // This only tests for fixed ordering, ignoring the other criteria + for (int i = 1; i < numCaps; i++) { + RollupJobCaps a = caps.get(i - 1); + RollupJobCaps b = caps.get(i); + long aMillis = getMillis(a); + long bMillis = getMillis(b); + + assertThat(aMillis, greaterThanOrEqualTo(bMillis)); + + } + } + + public void testComparatorCalendar() { + int numCaps = randomIntBetween(1, 10); + List caps = new ArrayList<>(numCaps); + + for (int i = 0; i < numCaps; i++) { + DateHistogramInterval interval = getRandomCalendarInterval(); + GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", interval)); + RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); + RollupJobCaps cap = new RollupJobCaps(job); + caps.add(cap); + } + + caps.sort(RollupJobIdentifierUtils.COMPARATOR); + + // This only tests for calendar ordering, ignoring the other criteria + for (int i = 1; i < numCaps; i++) { + RollupJobCaps a = caps.get(i - 1); + RollupJobCaps b = caps.get(i); + long aMillis = getMillis(a); + long bMillis = getMillis(b); + + assertThat(aMillis, greaterThanOrEqualTo(bMillis)); + + } + } + + private static long getMillis(RollupJobCaps cap) { + for (RollupJobCaps.RollupFieldCaps fieldCaps : cap.getFieldCaps().values()) { + for (Map agg : fieldCaps.getAggs()) { + if (agg.get(RollupField.AGG).equals(DateHistogramAggregationBuilder.NAME)) { + return RollupJobIdentifierUtils.getMillisFixedOrCalendar((String) agg.get(RollupField.INTERVAL)); + } + } + } + return Long.MAX_VALUE; + } + + private static DateHistogramInterval getRandomInterval() { + if (randomBoolean()) { + return getRandomFixedInterval(); + } + return getRandomCalendarInterval(); + } + + private static DateHistogramInterval getRandomFixedInterval() { + int value = randomIntBetween(1, 1000); + String unit; + int randomValue = randomInt(4); + if (randomValue == 0) { + unit = "ms"; + } else if (randomValue == 1) { + unit = "s"; + } else if (randomValue == 2) { + unit = "m"; + } else if (randomValue == 3) { + unit = "h"; + } else { + unit = "d"; + } + return new DateHistogramInterval(Integer.toString(value) + unit); + } + + private static DateHistogramInterval getRandomCalendarInterval() { + return new DateHistogramInterval(UNITS.get(randomIntBetween(0, UNITS.size()-1))); + } + private Set singletonSet(RollupJobCaps cap) { Set caps = new HashSet<>(); caps.add(cap); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupRequestTranslationTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupRequestTranslationTests.java index a618e8b4e6f..c72808bba37 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupRequestTranslationTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupRequestTranslationTests.java @@ -19,11 +19,11 @@ import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBounds; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.range.GeoDistanceAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.stats.StatsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.StatsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.test.ESTestCase; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java index 35d9f0d133a..0a133cc8e07 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java @@ -54,16 +54,16 @@ import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregati import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram; import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.avg.Avg; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.avg.InternalAvg; -import org.elasticsearch.search.aggregations.metrics.cardinality.CardinalityAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBoundsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.max.InternalMax; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.sum.InternalSum; -import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.Avg; +import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.InternalAvg; +import org.elasticsearch.search.aggregations.metrics.CardinalityAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.GeoBoundsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.InternalMax; +import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.InternalSum; +import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.xpack.core.rollup.RollupField; @@ -198,10 +198,11 @@ public class RollupResponseTranslationTests extends AggregatorTestCase { BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); ScriptService scriptService = mock(ScriptService.class); - Exception e = expectThrows(RuntimeException.class, - () -> RollupResponseTranslator.combineResponses(msearch, - new InternalAggregation.ReduceContext(bigArrays, scriptService, true))); - assertThat(e.getMessage(), equalTo("Expected to find aggregations in rollup response, but none found.")); + SearchResponse response = RollupResponseTranslator.combineResponses(msearch, + new InternalAggregation.ReduceContext(bigArrays, scriptService, true)); + assertNotNull(response); + Aggregations responseAggs = response.getAggregations(); + assertThat(responseAggs.asList().size(), equalTo(0)); } public void testMissingRolledIndex() { @@ -511,7 +512,7 @@ public class RollupResponseTranslationTests extends AggregatorTestCase { ClassCastException e = expectThrows(ClassCastException.class, () -> RollupResponseTranslator.combineResponses(msearch, reduceContext)); assertThat(e.getMessage(), - containsString("org.elasticsearch.search.aggregations.metrics.geobounds.InternalGeoBounds")); + containsString("org.elasticsearch.search.aggregations.metrics.InternalGeoBounds")); assertThat(e.getMessage(), containsString("org.elasticsearch.search.aggregations.InternalMultiBucketAggregation")); } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java index 5599c50321c..3d346456ea9 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java @@ -180,8 +180,9 @@ public class PutJobStateMachineTests extends ESTestCase { ActionListener testListener = ActionListener.wrap(response -> { fail("Listener success should not have been triggered."); }, e -> { - assertThat(e.getMessage(), equalTo("Expected to find _meta key in mapping of rollup index [" - + job.getConfig().getRollupIndex() + "] but not found.")); + assertThat(e.getMessage(), equalTo("Rollup data cannot be added to existing indices that contain " + + "non-rollup data (expected to find _meta key in mapping of rollup index [" + + job.getConfig().getRollupIndex() + "] but not found).")); }); Logger logger = mock(Logger.class); @@ -206,6 +207,44 @@ public class PutJobStateMachineTests extends ESTestCase { verify(client).execute(eq(GetMappingsAction.INSTANCE), any(GetMappingsRequest.class), any()); } + @SuppressWarnings("unchecked") + public void testMetadataButNotRollup() { + RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); + + ActionListener testListener = ActionListener.wrap(response -> { + fail("Listener success should not have been triggered."); + }, e -> { + assertThat(e.getMessage(), equalTo("Rollup data cannot be added to existing indices that contain " + + "non-rollup data (expected to find rollup meta key [_rollup] in mapping of rollup index [" + + job.getConfig().getRollupIndex() + "] but not found).")); + }); + + Logger logger = mock(Logger.class); + Client client = mock(Client.class); + + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(ActionListener.class); + doAnswer(invocation -> { + GetMappingsResponse response = mock(GetMappingsResponse.class); + Map m = new HashMap<>(2); + m.put("random", + Collections.singletonMap(job.getConfig().getId(), job.getConfig())); + MappingMetaData meta = new MappingMetaData(RollupField.TYPE_NAME, + Collections.singletonMap("_meta", m)); + ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(1); + builder.put(RollupField.TYPE_NAME, meta); + + ImmutableOpenMap.Builder> builder2 = ImmutableOpenMap.builder(1); + builder2.put(job.getConfig().getRollupIndex(), builder.build()); + + when(response.getMappings()).thenReturn(builder2.build()); + requestCaptor.getValue().onResponse(response); + return null; + }).when(client).execute(eq(GetMappingsAction.INSTANCE), any(GetMappingsRequest.class), requestCaptor.capture()); + + TransportPutRollupJobAction.updateMapping(job, testListener, mock(PersistentTasksService.class), client, logger); + verify(client).execute(eq(GetMappingsAction.INSTANCE), any(GetMappingsRequest.class), any()); + } + @SuppressWarnings("unchecked") public void testNoMappingVersion() { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java index 069e23e4093..d7bb34bb156 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java @@ -36,10 +36,10 @@ import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuil import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.elasticsearch.search.aggregations.metrics.avg.Avg; -import org.elasticsearch.search.aggregations.metrics.avg.InternalAvg; -import org.elasticsearch.search.aggregations.metrics.sum.InternalSum; -import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.Avg; +import org.elasticsearch.search.aggregations.metrics.InternalAvg; +import org.elasticsearch.search.aggregations.metrics.InternalSum; +import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.suggest.SuggestBuilder; @@ -307,21 +307,22 @@ public class SearchActionTests extends ESTestCase { assertThat(e.getMessage(), equalTo("Rollup search does not support explaining.")); } - public void testNoAgg() { - String[] normalIndices = new String[]{randomAlphaOfLength(10)}; + public void testNoRollupAgg() { + String[] normalIndices = new String[]{}; String[] rollupIndices = new String[]{randomAlphaOfLength(10)}; TransportRollupSearchAction.RollupSearchContext ctx = new TransportRollupSearchAction.RollupSearchContext(normalIndices, rollupIndices, Collections.emptySet()); SearchSourceBuilder source = new SearchSourceBuilder(); source.query(new MatchAllQueryBuilder()); source.size(0); - SearchRequest request = new SearchRequest(normalIndices, source); + SearchRequest request = new SearchRequest(rollupIndices, source); NamedWriteableRegistry registry = mock(NamedWriteableRegistry.class); - Exception e = expectThrows(IllegalArgumentException.class, - () -> TransportRollupSearchAction.createMSearchRequest(request, registry, ctx)); - assertThat(e.getMessage(), equalTo("Rollup requires at least one aggregation to be set.")); + MultiSearchRequest msearch = TransportRollupSearchAction.createMSearchRequest(request, registry, ctx); + assertThat(msearch.requests().size(), equalTo(1)); + assertThat(msearch.requests().get(0), equalTo(request)); } + public void testNoLiveNoRollup() { String[] normalIndices = new String[0]; String[] rollupIndices = new String[0]; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/job/RollupIndexTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/job/RollupIndexTests.java new file mode 100644 index 00000000000..c0ba74e762d --- /dev/null +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/job/RollupIndexTests.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.rollup.action.job; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig; +import org.elasticsearch.xpack.rollup.job.RollupIndexer; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class RollupIndexTests extends ESTestCase { + + public void testValidateMatchingField() { + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + String type = getRandomType(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + when(fieldCaps.isAggregatable()).thenReturn(true); + responseMap.put("my_field", Collections.singletonMap(type, fieldCaps)); + + TermsGroupConfig config = new TermsGroupConfig("my_field"); + config.validateMappings(responseMap, e); + if (e.validationErrors().size() != 0) { + fail(e.getMessage()); + } + + List> builders = RollupIndexer.createValueSourceBuilders(config); + assertThat(builders.size(), equalTo(1)); + } + + public void testValidateFieldMatchingNotAggregatable() { + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + when(fieldCaps.isAggregatable()).thenReturn(false); + responseMap.put("my_field", Collections.singletonMap(getRandomType(), fieldCaps)); + + TermsGroupConfig config = new TermsGroupConfig("my_field"); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().get(0), equalTo("The field [my_field] must be aggregatable across all indices, but is not.")); + } + + private String getRandomType() { + int n = randomIntBetween(0,8); + if (n == 0) { + return "keyword"; + } else if (n == 1) { + return "text"; + } else if (n == 2) { + return "long"; + } else if (n == 3) { + return "integer"; + } else if (n == 4) { + return "short"; + } else if (n == 5) { + return "float"; + } else if (n == 6) { + return "double"; + } else if (n == 7) { + return "scaled_float"; + } else if (n == 8) { + return "half_float"; + } + return "long"; + } +} diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java index e8c66f7e8c1..f5d335ca6f1 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorTestCase; @@ -32,15 +33,15 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggre import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; import org.elasticsearch.xpack.core.rollup.job.HistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.MetricConfig; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; +import org.elasticsearch.xpack.core.rollup.job.RollupIndexerJobStats; import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig; import org.joda.time.DateTime; import org.mockito.stubbing.Answer; @@ -57,6 +58,7 @@ import static java.util.Collections.singletonList; import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomDateHistogramGroupConfig; import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomGroupConfig; import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomHistogramGroupConfig; +import static org.elasticsearch.xpack.rollup.job.RollupIndexer.createAggregationBuilders; import static org.hamcrest.Matchers.equalTo; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -64,7 +66,7 @@ import static org.mockito.Mockito.when; public class IndexerUtilsTests extends AggregatorTestCase { public void testMissingFields() throws IOException { String indexName = randomAlphaOfLengthBetween(1, 10); - RollupJobStats stats = new RollupJobStats(0, 0, 0, 0); + RollupIndexerJobStats stats = new RollupIndexerJobStats(0, 0, 0, 0); String timestampField = "the_histo"; String valueField = "the_avg"; @@ -101,9 +103,11 @@ public class IndexerUtilsTests extends AggregatorTestCase { //TODO swap this over to DateHistoConfig.Builder once DateInterval is in DateHistogramGroupConfig dateHistoGroupConfig = new DateHistogramGroupConfig(timestampField, DateHistogramInterval.DAY); CompositeAggregationBuilder compositeBuilder = - new CompositeAggregationBuilder(RollupIndexer.AGGREGATION_NAME, dateHistoGroupConfig.toBuilders()); + new CompositeAggregationBuilder(RollupIndexer.AGGREGATION_NAME, + RollupIndexer.createValueSourceBuilders(dateHistoGroupConfig)); MetricConfig metricConfig = new MetricConfig("does_not_exist", singletonList("max")); - metricConfig.toBuilders().forEach(compositeBuilder::subAggregation); + List metricAgg = createAggregationBuilders(singletonList(metricConfig)); + metricAgg.forEach(compositeBuilder::subAggregation); Aggregator aggregator = createAggregator(compositeBuilder, indexSearcher, timestampFieldType, valueFieldType); aggregator.preCollection(); @@ -126,7 +130,7 @@ public class IndexerUtilsTests extends AggregatorTestCase { public void testCorrectFields() throws IOException { String indexName = randomAlphaOfLengthBetween(1, 10); - RollupJobStats stats= new RollupJobStats(0, 0, 0, 0); + RollupIndexerJobStats stats = new RollupIndexerJobStats(0, 0, 0, 0); String timestampField = "the_histo"; String valueField = "the_avg"; @@ -170,7 +174,8 @@ public class IndexerUtilsTests extends AggregatorTestCase { singletonList(dateHisto)); MetricConfig metricConfig = new MetricConfig(valueField, singletonList("max")); - metricConfig.toBuilders().forEach(compositeBuilder::subAggregation); + List metricAgg = createAggregationBuilders(singletonList(metricConfig)); + metricAgg.forEach(compositeBuilder::subAggregation); Aggregator aggregator = createAggregator(compositeBuilder, indexSearcher, timestampFieldType, valueFieldType); aggregator.preCollection(); @@ -193,7 +198,7 @@ public class IndexerUtilsTests extends AggregatorTestCase { public void testNumericTerms() throws IOException { String indexName = randomAlphaOfLengthBetween(1, 10); - RollupJobStats stats= new RollupJobStats(0, 0, 0, 0); + RollupIndexerJobStats stats= new RollupIndexerJobStats(0, 0, 0, 0); String timestampField = "the_histo"; String valueField = "the_avg"; @@ -226,7 +231,8 @@ public class IndexerUtilsTests extends AggregatorTestCase { singletonList(terms)); MetricConfig metricConfig = new MetricConfig(valueField, singletonList("max")); - metricConfig.toBuilders().forEach(compositeBuilder::subAggregation); + List metricAgg = createAggregationBuilders(singletonList(metricConfig)); + metricAgg.forEach(compositeBuilder::subAggregation); Aggregator aggregator = createAggregator(compositeBuilder, indexSearcher, valueFieldType); aggregator.preCollection(); @@ -249,7 +255,7 @@ public class IndexerUtilsTests extends AggregatorTestCase { public void testEmptyCounts() throws IOException { String indexName = randomAlphaOfLengthBetween(1, 10); - RollupJobStats stats= new RollupJobStats(0, 0, 0, 0); + RollupIndexerJobStats stats = new RollupIndexerJobStats(0, 0, 0, 0); String timestampField = "ts"; String valueField = "the_avg"; @@ -292,7 +298,8 @@ public class IndexerUtilsTests extends AggregatorTestCase { singletonList(dateHisto)); MetricConfig metricConfig = new MetricConfig("another_field", Arrays.asList("avg", "sum")); - metricConfig.toBuilders().forEach(compositeBuilder::subAggregation); + List metricAgg = createAggregationBuilders(singletonList(metricConfig)); + metricAgg.forEach(compositeBuilder::subAggregation); Aggregator aggregator = createAggregator(compositeBuilder, indexSearcher, timestampFieldType, valueFieldType); aggregator.preCollection(); @@ -355,7 +362,7 @@ public class IndexerUtilsTests extends AggregatorTestCase { // The content of the config don't actually matter for this test // because the test is just looking at agg keys GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), new HistogramGroupConfig(123L, "abc"), null); - List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", false); + List docs = IndexerUtils.processBuckets(composite, "foo", new RollupIndexerJobStats(), groupConfig, "foo", false); assertThat(docs.size(), equalTo(1)); assertThat(docs.get(0).id(), equalTo("1237859798")); } @@ -399,7 +406,7 @@ public class IndexerUtilsTests extends AggregatorTestCase { }); GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), new HistogramGroupConfig(1L, "abc"), null); - List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", true); + List docs = IndexerUtils.processBuckets(composite, "foo", new RollupIndexerJobStats(), groupConfig, "foo", true); assertThat(docs.size(), equalTo(1)); assertThat(docs.get(0).id(), equalTo("foo$c9LcrFqeFW92uN_Z7sv1hA")); } @@ -449,7 +456,7 @@ public class IndexerUtilsTests extends AggregatorTestCase { }); GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), new HistogramGroupConfig(1, "abc"), null); - List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", true); + List docs = IndexerUtils.processBuckets(composite, "foo", new RollupIndexerJobStats(), groupConfig, "foo", true); assertThat(docs.size(), equalTo(1)); assertThat(docs.get(0).id(), equalTo("foo$VAFKZpyaEqYRPLyic57_qw")); } @@ -476,14 +483,15 @@ public class IndexerUtilsTests extends AggregatorTestCase { }); GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), randomHistogramGroupConfig(random()), null); - List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", randomBoolean()); + List docs = IndexerUtils.processBuckets(composite, "foo", new RollupIndexerJobStats(), + groupConfig, "foo", randomBoolean()); assertThat(docs.size(), equalTo(1)); assertFalse(Strings.isNullOrEmpty(docs.get(0).id())); } public void testMissingBuckets() throws IOException { String indexName = randomAlphaOfLengthBetween(1, 10); - RollupJobStats stats= new RollupJobStats(0, 0, 0, 0); + RollupIndexerJobStats stats = new RollupIndexerJobStats(0, 0, 0, 0); String metricField = "metric_field"; String valueField = "value_field"; @@ -523,11 +531,13 @@ public class IndexerUtilsTests extends AggregatorTestCase { // Setup the composite agg TermsGroupConfig termsGroupConfig = new TermsGroupConfig(valueField); - CompositeAggregationBuilder compositeBuilder = new CompositeAggregationBuilder(RollupIndexer.AGGREGATION_NAME, - termsGroupConfig.toBuilders()).size(numDocs*2); + CompositeAggregationBuilder compositeBuilder = + new CompositeAggregationBuilder(RollupIndexer.AGGREGATION_NAME, RollupIndexer.createValueSourceBuilders(termsGroupConfig)) + .size(numDocs*2); MetricConfig metricConfig = new MetricConfig(metricField, singletonList("max")); - metricConfig.toBuilders().forEach(compositeBuilder::subAggregation); + List metricAgg = createAggregationBuilders(singletonList(metricConfig)); + metricAgg.forEach(compositeBuilder::subAggregation); Aggregator aggregator = createAggregator(compositeBuilder, indexSearcher, valueFieldType, metricFieldType); aggregator.preCollection(); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java index 6d29ee9f9ba..55f1cfbdbb2 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java @@ -50,10 +50,10 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInter import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; -import org.elasticsearch.xpack.core.rollup.job.IndexerState; import org.elasticsearch.xpack.core.rollup.job.MetricConfig; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.junit.Before; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java index 955dcbc2beb..c74ecbadf4f 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java @@ -23,8 +23,8 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; -import org.elasticsearch.xpack.core.rollup.job.IndexerState; import org.elasticsearch.xpack.core.rollup.job.RollupJob; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; import org.mockito.stubbing.Answer; @@ -639,7 +639,7 @@ public class RollupIndexerStateTests extends ESTestCase { assertThat(indexer.getStats().getNumPages(), equalTo(1L)); // Note: no docs were indexed - assertThat(indexer.getStats().getNumRollups(), equalTo(0L)); + assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); assertTrue(indexer.abort()); } finally { executor.shutdownNow(); @@ -743,7 +743,7 @@ public class RollupIndexerStateTests extends ESTestCase { assertThat(indexer.getStats().getNumPages(), equalTo(1L)); // Note: no docs were indexed - assertThat(indexer.getStats().getNumRollups(), equalTo(0L)); + assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); assertTrue(indexer.abort()); } finally { executor.shutdownNow(); @@ -763,7 +763,7 @@ public class RollupIndexerStateTests extends ESTestCase { Function bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100); Consumer failureConsumer = e -> { - assertThat(e.getMessage(), startsWith("Shard failures encountered while running indexer for rollup job")); + assertThat(e.getMessage(), startsWith("Shard failures encountered while running indexer for job")); isFinished.set(true); }; @@ -786,7 +786,7 @@ public class RollupIndexerStateTests extends ESTestCase { // Note: no pages processed, no docs were indexed assertThat(indexer.getStats().getNumPages(), equalTo(0L)); - assertThat(indexer.getStats().getNumRollups(), equalTo(0L)); + assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); assertTrue(indexer.abort()); } finally { executor.shutdownNow(); @@ -896,7 +896,7 @@ public class RollupIndexerStateTests extends ESTestCase { assertThat(indexer.getStats().getNumPages(), equalTo(1L)); // Note: no docs were indexed - assertThat(indexer.getStats().getNumRollups(), equalTo(0L)); + assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); assertTrue(indexer.abort()); } finally { executor.shutdownNow(); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerTests.java new file mode 100644 index 00000000000..5ab85e2ffa7 --- /dev/null +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerTests.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.rollup.job; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; +import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; +import org.elasticsearch.xpack.core.rollup.job.GroupConfig; +import org.elasticsearch.xpack.core.rollup.job.HistogramGroupConfig; + +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class RollupIndexerTests extends ESTestCase { + + public void testCreateMetadataNoGroupConfig() { + final Map metadata = RollupIndexer.createMetadata(null); + assertNotNull(metadata); + assertTrue(metadata.isEmpty()); + } + + public void testCreateMetadataWithDateHistogramGroupConfigOnly() { + final DateHistogramGroupConfig dateHistogram = ConfigTestHelpers.randomDateHistogramGroupConfig(random()); + final GroupConfig groupConfig = new GroupConfig(dateHistogram); + + final Map metadata = RollupIndexer.createMetadata(groupConfig); + assertEquals(1, metadata.size()); + assertTrue(metadata.containsKey("_rollup.interval")); + Object value = metadata.get("_rollup.interval"); + assertThat(value, equalTo(dateHistogram.getInterval().toString())); + } + + public void testCreateMetadata() { + final DateHistogramGroupConfig dateHistogram = ConfigTestHelpers.randomDateHistogramGroupConfig(random()); + final HistogramGroupConfig histogram = ConfigTestHelpers.randomHistogramGroupConfig(random()); + final GroupConfig groupConfig = new GroupConfig(dateHistogram, histogram, null); + + final Map metadata = RollupIndexer.createMetadata(groupConfig); + assertEquals(1, metadata.size()); + assertTrue(metadata.containsKey("_rollup.interval")); + Object value = metadata.get("_rollup.interval"); + assertThat(value, equalTo(histogram.getInterval())); + } +} + diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java index 13290f09e8e..a47d057b5d5 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.client.Client; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.node.Node; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.search.aggregations.Aggregations; @@ -19,11 +20,11 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.action.StartRollupJobAction; import org.elasticsearch.xpack.core.rollup.action.StopRollupJobAction; -import org.elasticsearch.xpack.core.rollup.job.IndexerState; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobStatus; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; @@ -47,6 +48,9 @@ import static org.mockito.Mockito.when; public class RollupJobTaskTests extends ESTestCase { + private static final Settings SETTINGS = Settings.builder() + .put(Node.NODE_NAME_SETTING.getKey(), "test") + .build(); private static ThreadPool pool = new TestThreadPool("test"); @AfterClass @@ -62,7 +66,7 @@ public class RollupJobTaskTests extends ESTestCase { RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPED, Collections.singletonMap("foo", "bar"), randomBoolean()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); @@ -75,7 +79,7 @@ public class RollupJobTaskTests extends ESTestCase { RollupJobStatus status = new RollupJobStatus(IndexerState.ABORTING, Collections.singletonMap("foo", "bar"), randomBoolean()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); @@ -88,7 +92,7 @@ public class RollupJobTaskTests extends ESTestCase { RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPING, Collections.singletonMap("foo", "bar"), randomBoolean()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); @@ -101,7 +105,7 @@ public class RollupJobTaskTests extends ESTestCase { RollupJobStatus status = new RollupJobStatus(IndexerState.STARTED, Collections.singletonMap("foo", "bar"), randomBoolean()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); @@ -114,7 +118,7 @@ public class RollupJobTaskTests extends ESTestCase { RollupJobStatus status = new RollupJobStatus(IndexerState.INDEXING, Collections.singletonMap("foo", "bar"), false); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); @@ -128,7 +132,7 @@ public class RollupJobTaskTests extends ESTestCase { RollupJobStatus status = new RollupJobStatus(IndexerState.INDEXING, Collections.singletonMap("foo", "bar"), true); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); @@ -141,7 +145,7 @@ public class RollupJobTaskTests extends ESTestCase { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, null, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); @@ -154,7 +158,7 @@ public class RollupJobTaskTests extends ESTestCase { RollupJobStatus status = new RollupJobStatus(IndexerState.STARTED, Collections.singletonMap("foo", "bar"), randomBoolean()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); @@ -641,7 +645,7 @@ public class RollupJobTaskTests extends ESTestCase { RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPED, null, randomBoolean()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); @@ -748,7 +752,7 @@ public class RollupJobTaskTests extends ESTestCase { RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPED, null, randomBoolean()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); CountDownLatch latch = new CountDownLatch(2); diff --git a/x-pack/plugin/security/build.gradle b/x-pack/plugin/security/build.gradle index 6db533bbecf..71b22531cca 100644 --- a/x-pack/plugin/security/build.gradle +++ b/x-pack/plugin/security/build.gradle @@ -1,6 +1,7 @@ evaluationDependsOn(xpackModule('core')) apply plugin: 'elasticsearch.esplugin' +apply plugin: 'nebula.maven-scm' esplugin { name 'x-pack-security' description 'Elasticsearch Expanded Pack Plugin - Security' @@ -12,7 +13,8 @@ esplugin { archivesBaseName = 'x-pack-security' dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') compileOnly project(path: ':modules:transport-netty4', configuration: 'runtime') compileOnly project(path: ':plugins:transport-nio', configuration: 'runtime') @@ -157,8 +159,7 @@ forbiddenPatterns { } forbiddenApisMain { - signaturesURLs += file('forbidden/ldap-signatures.txt').toURI().toURL() - signaturesURLs += file('forbidden/xml-signatures.txt').toURI().toURL() + signaturesFiles += files('forbidden/ldap-signatures.txt', 'forbidden/xml-signatures.txt') } // classes are missing, e.g. com.ibm.icu.lang.UCharacter @@ -241,7 +242,7 @@ thirdPartyAudit.excludes = [ 'javax.persistence.EntityManagerFactory', 'javax.persistence.EntityTransaction', 'javax.persistence.LockModeType', - 'javax/persistence/Query', + 'javax.persistence.Query', // [missing classes] OpenSAML storage and HttpClient cache have optional memcache support 'net.spy.memcached.CASResponse', 'net.spy.memcached.CASValue', @@ -265,7 +266,7 @@ thirdPartyAudit.excludes = [ 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', ] -if (JavaVersion.current() > JavaVersion.VERSION_1_8) { +if (project.runtimeJavaVersion > JavaVersion.VERSION_1_8) { thirdPartyAudit.excludes += [ 'javax.xml.bind.JAXBContext', 'javax.xml.bind.JAXBElement', diff --git a/x-pack/plugin/security/cli/build.gradle b/x-pack/plugin/security/cli/build.gradle index 1a00b2a0340..377d10ec7f2 100644 --- a/x-pack/plugin/security/cli/build.gradle +++ b/x-pack/plugin/security/cli/build.gradle @@ -1,12 +1,15 @@ +import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask + apply plugin: 'elasticsearch.build' archivesBaseName = 'elasticsearch-security-cli' dependencies { compileOnly "org.elasticsearch:elasticsearch:${version}" - compileOnly project(path: xpackModule('core'), configuration: 'shadow') - compile 'org.bouncycastle:bcprov-jdk15on:1.59' + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') compile 'org.bouncycastle:bcpkix-jdk15on:1.59' + compile 'org.bouncycastle:bcprov-jdk15on:1.59' testImplementation 'com.google.jimfs:jimfs:1.1' testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" @@ -19,6 +22,14 @@ dependencyLicenses { mapping from: /bc.*/, to: 'bouncycastle' } -if (inFipsJvm) { +if (project.inFipsJvm) { test.enabled = false -} \ No newline at end of file + // Forbiden APIs non-portable checks fail because bouncy castle classes being used from the FIPS JDK since those are + // not part of the Java specification - all of this is as designed, so we have to relax this check for FIPS. + tasks.withType(ForbiddenApisCliTask) { + bundledSignatures -= "jdk-non-portable" + } + // FIPS JVM includes manny classes from bouncycastle which count as jar hell for the third party audit, + // rather than provide a long list of exclusions, disable the check on FIPS. + thirdPartyAudit.enabled = false +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java index cd5a720eef1..28f2756cf26 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java @@ -13,12 +13,6 @@ import org.elasticsearch.xpack.core.XPackSettings; public class FIPS140JKSKeystoreBootstrapCheck implements BootstrapCheck { - private final boolean fipsModeEnabled; - - FIPS140JKSKeystoreBootstrapCheck(Settings settings) { - this.fipsModeEnabled = XPackSettings.FIPS_MODE_ENABLED.get(settings); - } - /** * Test if the node fails the check. * @@ -28,7 +22,7 @@ public class FIPS140JKSKeystoreBootstrapCheck implements BootstrapCheck { @Override public BootstrapCheckResult check(BootstrapContext context) { - if (fipsModeEnabled) { + if (XPackSettings.FIPS_MODE_ENABLED.get(context.settings)) { final Settings settings = context.settings; Settings keystoreTypeSettings = settings.filter(k -> k.endsWith("keystore.type")) .filter(k -> settings.get(k).equalsIgnoreCase("jks")); @@ -50,6 +44,6 @@ public class FIPS140JKSKeystoreBootstrapCheck implements BootstrapCheck { @Override public boolean alwaysEnforce() { - return fipsModeEnabled; + return true; } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java index d1bce0dcdd2..957276bdad2 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java @@ -10,6 +10,7 @@ import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.license.License; import org.elasticsearch.license.LicenseService; +import org.elasticsearch.xpack.core.XPackSettings; import java.util.EnumSet; @@ -21,15 +22,9 @@ final class FIPS140LicenseBootstrapCheck implements BootstrapCheck { static final EnumSet ALLOWED_LICENSE_OPERATION_MODES = EnumSet.of(License.OperationMode.PLATINUM, License.OperationMode.TRIAL); - private final boolean isInFipsMode; - - FIPS140LicenseBootstrapCheck(boolean isInFipsMode) { - this.isInFipsMode = isInFipsMode; - } - @Override public BootstrapCheckResult check(BootstrapContext context) { - if (isInFipsMode) { + if (XPackSettings.FIPS_MODE_ENABLED.get(context.settings)) { License license = LicenseService.getLicense(context.metaData); if (license != null && ALLOWED_LICENSE_OPERATION_MODES.contains(license.operationMode()) == false) { return BootstrapCheckResult.failure("FIPS mode is only allowed with a Platinum or Trial license"); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java index 751d63be4fb..3faec3d7475 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java @@ -7,19 +7,12 @@ package org.elasticsearch.xpack.security; import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.core.XPackSettings; import java.util.Locale; public class FIPS140PasswordHashingAlgorithmBootstrapCheck implements BootstrapCheck { - private final boolean fipsModeEnabled; - - FIPS140PasswordHashingAlgorithmBootstrapCheck(final Settings settings) { - this.fipsModeEnabled = XPackSettings.FIPS_MODE_ENABLED.get(settings); - } - /** * Test if the node fails the check. * @@ -28,7 +21,7 @@ public class FIPS140PasswordHashingAlgorithmBootstrapCheck implements BootstrapC */ @Override public BootstrapCheckResult check(final BootstrapContext context) { - if (fipsModeEnabled) { + if (XPackSettings.FIPS_MODE_ENABLED.get(context.settings)) { final String selectedAlgorithm = XPackSettings.PASSWORD_HASHING_ALGORITHM.get(context.settings); if (selectedAlgorithm.toLowerCase(Locale.ROOT).startsWith("pbkdf2") == false) { return BootstrapCheckResult.failure("Only PBKDF2 is allowed for password hashing in a FIPS-140 JVM. Please set the " + diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 02910b5dd74..363cc7bb882 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -121,9 +121,6 @@ import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessCo import org.elasticsearch.xpack.core.security.authz.accesscontrol.SecurityIndexSearcherWrapper; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; -import org.elasticsearch.xpack.security.action.privilege.TransportDeletePrivilegesAction; -import org.elasticsearch.xpack.security.action.privilege.TransportGetPrivilegesAction; -import org.elasticsearch.xpack.security.action.privilege.TransportPutPrivilegesAction; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField; import org.elasticsearch.xpack.core.security.support.Automatons; @@ -143,6 +140,9 @@ import org.elasticsearch.xpack.security.action.interceptor.RequestInterceptor; import org.elasticsearch.xpack.security.action.interceptor.ResizeRequestInterceptor; import org.elasticsearch.xpack.security.action.interceptor.SearchRequestInterceptor; import org.elasticsearch.xpack.security.action.interceptor.UpdateRequestInterceptor; +import org.elasticsearch.xpack.security.action.privilege.TransportDeletePrivilegesAction; +import org.elasticsearch.xpack.security.action.privilege.TransportGetPrivilegesAction; +import org.elasticsearch.xpack.security.action.privilege.TransportPutPrivilegesAction; import org.elasticsearch.xpack.security.action.realm.TransportClearRealmCacheAction; import org.elasticsearch.xpack.security.action.role.TransportClearRolesCacheAction; import org.elasticsearch.xpack.security.action.role.TransportDeleteRoleAction; @@ -181,8 +181,8 @@ import org.elasticsearch.xpack.security.authz.AuthorizationService; import org.elasticsearch.xpack.security.authz.SecuritySearchOperationListener; import org.elasticsearch.xpack.security.authz.accesscontrol.OptOutQueryCache; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; -import org.elasticsearch.xpack.security.authz.store.NativePrivilegeStore; import org.elasticsearch.xpack.security.authz.store.FileRolesStore; +import org.elasticsearch.xpack.security.authz.store.NativePrivilegeStore; import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; import org.elasticsearch.xpack.security.ingest.SetSecurityUserProcessor; import org.elasticsearch.xpack.security.rest.SecurityRestFilter; @@ -257,9 +257,11 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw static final Setting> AUDIT_OUTPUTS_SETTING = Setting.listSetting(SecurityField.setting("audit.outputs"), - s -> s.keySet().contains(SecurityField.setting("audit.outputs")) ? - Collections.emptyList() : Collections.singletonList(LoggingAuditTrail.NAME), - Function.identity(), Property.NodeScope); + Function.identity(), + s -> s.keySet().contains(SecurityField.setting("audit.outputs")) + ? Collections.emptyList() + : Collections.singletonList(LoggingAuditTrail.NAME), + Property.NodeScope); private final Settings settings; private final Environment env; @@ -300,9 +302,9 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw new PkiRealmBootstrapCheck(getSslService()), new TLSLicenseBootstrapCheck(), new FIPS140SecureSettingsBootstrapCheck(settings, env), - new FIPS140JKSKeystoreBootstrapCheck(settings), - new FIPS140PasswordHashingAlgorithmBootstrapCheck(settings), - new FIPS140LicenseBootstrapCheck(XPackSettings.FIPS_MODE_ENABLED.get(settings)))); + new FIPS140JKSKeystoreBootstrapCheck(), + new FIPS140PasswordHashingAlgorithmBootstrapCheck(), + new FIPS140LicenseBootstrapCheck())); checks.addAll(InternalRealms.getBootstrapChecks(settings, env)); this.bootstrapChecks = Collections.unmodifiableList(checks); Automatons.updateMaxDeterminizedStates(settings); @@ -672,7 +674,8 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw * This impl. disabled the query cache if field level security is used for a particular request. If we wouldn't do * forcefully overwrite the query cache implementation then we leave the system vulnerable to leakages of data to * unauthorized users. */ - module.forceQueryCacheProvider((settings, cache) -> new OptOutQueryCache(settings, cache, threadContext.get())); + module.forceQueryCacheProvider( + (settings, cache) -> new OptOutQueryCache(settings, cache, threadContext.get(), getLicenseState())); } // in order to prevent scroll ids from being maliciously crafted and/or guessed, a listener is added that diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java index d2507d51d0e..9dd18be510f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java @@ -61,7 +61,7 @@ public final class TransportSamlAuthenticateAction extends HandledTransportActio final TimeValue expiresIn = tokenService.getExpirationDelay(); listener.onResponse( new SamlAuthenticateResponse(authentication.getUser().principal(), tokenString, tuple.v2(), expiresIn)); - }, listener::onFailure), tokenMeta); + }, listener::onFailure), tokenMeta, true); }, e -> { logger.debug(() -> new ParameterizedMessage("SamlToken [{}] could not be authenticated", saml), e); listener.onFailure(e); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutAction.java index 3e489c69d1d..63931d119e0 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutAction.java @@ -19,7 +19,7 @@ import org.elasticsearch.xpack.core.security.action.saml.SamlLogoutRequest; import org.elasticsearch.xpack.core.security.action.saml.SamlLogoutResponse; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Realm; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.TokenService; import org.elasticsearch.xpack.security.authc.saml.SamlNameId; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java index 358f6aee712..23aaa9e0d99 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.security.authc.AuthenticationService; import org.elasticsearch.xpack.security.authc.TokenService; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import java.io.IOException; import java.util.Collections; /** @@ -48,29 +49,52 @@ public final class TransportCreateTokenAction extends HandledTransportAction listener) { + CreateTokenRequest.GrantType type = CreateTokenRequest.GrantType.fromString(request.getGrantType()); + assert type != null : "type should have been validated in the action"; + switch (type) { + case PASSWORD: + authenticateAndCreateToken(request, listener); + break; + case CLIENT_CREDENTIALS: + Authentication authentication = Authentication.getAuthentication(threadPool.getThreadContext()); + createToken(request, authentication, authentication, false, listener); + break; + default: + listener.onFailure(new IllegalStateException("grant_type [" + request.getGrantType() + + "] is not supported by the create token action")); + break; + } + } + + private void authenticateAndCreateToken(CreateTokenRequest request, ActionListener listener) { Authentication originatingAuthentication = Authentication.getAuthentication(threadPool.getThreadContext()); try (ThreadContext.StoredContext ignore = threadPool.getThreadContext().stashContext()) { final UsernamePasswordToken authToken = new UsernamePasswordToken(request.getUsername(), request.getPassword()); authenticationService.authenticate(CreateTokenAction.NAME, request, authToken, - ActionListener.wrap(authentication -> { - request.getPassword().close(); - tokenService.createUserToken(authentication, originatingAuthentication, ActionListener.wrap(tuple -> { - final String tokenStr = tokenService.getUserTokenString(tuple.v1()); - final String scope = getResponseScopeValue(request.getScope()); + ActionListener.wrap(authentication -> { + request.getPassword().close(); + createToken(request, authentication, originatingAuthentication, true, listener); + }, e -> { + // clear the request password + request.getPassword().close(); + listener.onFailure(e); + })); + } + } - final CreateTokenResponse response = - new CreateTokenResponse(tokenStr, tokenService.getExpirationDelay(), scope, tuple.v2()); - listener.onResponse(response); - }, e -> { - // clear the request password - request.getPassword().close(); - listener.onFailure(e); - }), Collections.emptyMap()); - }, e -> { - // clear the request password - request.getPassword().close(); - listener.onFailure(e); - })); + private void createToken(CreateTokenRequest request, Authentication authentication, Authentication originatingAuth, + boolean includeRefreshToken, ActionListener listener) { + try { + tokenService.createUserToken(authentication, originatingAuth, ActionListener.wrap(tuple -> { + final String tokenStr = tokenService.getUserTokenString(tuple.v1()); + final String scope = getResponseScopeValue(request.getScope()); + + final CreateTokenResponse response = + new CreateTokenResponse(tokenStr, tokenService.getExpirationDelay(), scope, tuple.v2()); + listener.onResponse(response); + }, listener::onFailure), Collections.emptyMap(), includeRefreshToken); + } catch (IOException e) { + listener.onFailure(e); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateAction.java index af56ab8d4eb..57510ce116f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateAction.java @@ -18,7 +18,7 @@ import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import java.util.function.Supplier; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersAction.java index f89745d23e3..7e17cda75f0 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersAction.java @@ -18,7 +18,7 @@ import org.elasticsearch.xpack.core.security.action.user.GetUsersRequest; import org.elasticsearch.xpack.core.security.action.user.GetUsersResponse; import org.elasticsearch.xpack.core.security.authc.esnative.ClientReservedRealm; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesAction.java index eefaaa72b1e..b49984b28da 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesAction.java @@ -30,7 +30,7 @@ import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.Privilege; import org.elasticsearch.xpack.core.security.support.Automatons; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authz.AuthorizationService; import org.elasticsearch.xpack.security.authz.store.NativePrivilegeStore; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportPutUserAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportPutUserAction.java index a2715896da6..ed23b84fbf5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportPutUserAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportPutUserAction.java @@ -93,10 +93,6 @@ public class TransportPutUserAction extends HandledTransportAction userConsumer) { - final List realmsList = realms.asList(); - final BiConsumer> realmLookupConsumer = (realm, lookupUserListener) -> - realm.lookupUser(runAsUsername, ActionListener.wrap((lookedupUser) -> { - if (lookedupUser != null) { - lookedupBy = new RealmRef(realm.name(), realm.type(), nodeName); - lookupUserListener.onResponse(lookedupUser); - } else { - lookupUserListener.onResponse(null); - } - }, lookupUserListener::onFailure)); - - final IteratingActionListener userLookupListener = - new IteratingActionListener<>(ActionListener.wrap((lookupUser) -> { - if (lookupUser == null) { - // the user does not exist, but we still create a User object, which will later be rejected by authz - userConsumer.accept(new User(runAsUsername, null, user)); - } else { - userConsumer.accept(new User(lookupUser, user)); - } - }, - (e) -> listener.onFailure(request.exceptionProcessingRequest(e, authenticationToken))), - realmLookupConsumer, realmsList, threadContext); - try { - userLookupListener.run(); - } catch (Exception e) { - listener.onFailure(request.exceptionProcessingRequest(e, authenticationToken)); - } + final RealmUserLookup lookup = new RealmUserLookup(realms.asList(), threadContext); + lookup.lookup(runAsUsername, ActionListener.wrap(tuple -> { + if (tuple == null) { + // the user does not exist, but we still create a User object, which will later be rejected by authz + userConsumer.accept(new User(runAsUsername, null, user)); + } else { + User foundUser = Objects.requireNonNull(tuple.v1()); + Realm realm = Objects.requireNonNull(tuple.v2()); + lookedupBy = new RealmRef(realm.name(), realm.type(), nodeName); + userConsumer.accept(new User(foundUser, user)); + } + }, exception -> listener.onFailure(request.exceptionProcessingRequest(exception, authenticationToken)))); } /** diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredTokenRemover.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredTokenRemover.java index b8ae5c94441..78670dd99f6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredTokenRemover.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredTokenRemover.java @@ -9,7 +9,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.BulkItemResponse; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.Client; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; @@ -50,15 +49,14 @@ final class ExpiredTokenRemover extends AbstractRunnable { @Override public void doRun() { - SearchRequest searchRequest = new SearchRequest(SecurityIndexManager.SECURITY_INDEX_NAME); - DeleteByQueryRequest expiredDbq = new DeleteByQueryRequest(searchRequest); + DeleteByQueryRequest expiredDbq = new DeleteByQueryRequest(SecurityIndexManager.SECURITY_INDEX_NAME); if (timeout != TimeValue.MINUS_ONE) { expiredDbq.setTimeout(timeout); - searchRequest.source().timeout(timeout); + expiredDbq.getSearchRequest().source().timeout(timeout); } final Instant now = Instant.now(); - searchRequest.source() - .query(QueryBuilders.boolQuery() + expiredDbq + .setQuery(QueryBuilders.boolQuery() .filter(QueryBuilders.termsQuery("doc_type", TokenService.INVALIDATED_TOKEN_DOC_TYPE, "token")) .filter(QueryBuilders.boolQuery() .should(QueryBuilders.rangeQuery("expiration_time").lte(now.toEpochMilli())) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java index 8b80c1f1d1c..d2573b9343d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java @@ -93,6 +93,7 @@ public class Realms extends AbstractComponent implements Iterable { this.standardRealmsOnly = Collections.unmodifiableList(standardRealms); this.nativeRealmsOnly = Collections.unmodifiableList(nativeRealms); + realms.forEach(r -> r.initialize(this, licenseState)); } @Override diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index 8b6dd8295d3..937bd22d982 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -212,7 +212,8 @@ public final class TokenService extends AbstractComponent { * The created token will be stored in the security index. */ public void createUserToken(Authentication authentication, Authentication originatingClientAuth, - ActionListener> listener, Map metadata) throws IOException { + ActionListener> listener, Map metadata, + boolean includeRefreshToken) throws IOException { ensureEnabled(); if (authentication == null) { listener.onFailure(new IllegalArgumentException("authentication must be provided")); @@ -226,13 +227,14 @@ public final class TokenService extends AbstractComponent { new Authentication(authentication.getUser(), authentication.getAuthenticatedBy(), authentication.getLookedUpBy(), version); final UserToken userToken = new UserToken(version, matchingVersionAuth, expiration, metadata); - final String refreshToken = UUIDs.randomBase64UUID(); + final String refreshToken = includeRefreshToken ? UUIDs.randomBase64UUID() : null; try (XContentBuilder builder = XContentFactory.jsonBuilder()) { builder.startObject(); builder.field("doc_type", "token"); builder.field("creation_time", created.toEpochMilli()); - builder.startObject("refresh_token") + if (includeRefreshToken) { + builder.startObject("refresh_token") .field("token", refreshToken) .field("invalidated", false) .field("refreshed", false) @@ -242,6 +244,7 @@ public final class TokenService extends AbstractComponent { .field("realm", originatingClientAuth.getAuthenticatedBy().getName()) .endObject() .endObject(); + } builder.startObject("access_token") .field("invalidated", false) .field("user_token", userToken) @@ -734,7 +737,7 @@ public final class TokenService extends AbstractComponent { .request(); executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, updateRequest, ActionListener.wrap( - updateResponse -> createUserToken(authentication, userAuth, listener, metadata), + updateResponse -> createUserToken(authentication, userAuth, listener, metadata, true), e -> { Throwable cause = ExceptionsHelper.unwrapCause(e); if (cause instanceof VersionConflictEngineException || diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java index ffd9c3f73bc..a84b76beab8 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java @@ -11,7 +11,7 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; import org.elasticsearch.xpack.security.support.SecurityIndexManager; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java index 507ed4684a1..d923a029804 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java @@ -48,8 +48,8 @@ import org.elasticsearch.xpack.core.security.authc.esnative.ClientReservedRealm; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; -import org.elasticsearch.protocol.xpack.security.User.Fields; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.user.User.Fields; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.support.SecurityIndexManager; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java index 99c138bbb12..33726671911 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java @@ -24,12 +24,13 @@ import org.elasticsearch.xpack.core.security.authc.esnative.ClientReservedRealm; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.support.Exceptions; +import org.elasticsearch.xpack.core.security.user.APMSystemUser; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore.ReservedUserInfo; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; import org.elasticsearch.xpack.security.support.SecurityIndexManager; @@ -149,6 +150,8 @@ public class ReservedRealm extends CachingUsernamePasswordRealm { return new LogstashSystemUser(userInfo.enabled); case BeatsSystemUser.NAME: return new BeatsSystemUser(userInfo.enabled); + case APMSystemUser.NAME: + return new APMSystemUser(userInfo.enabled); default: if (anonymousEnabled && anonymousUser.principal().equals(username)) { return anonymousUser; @@ -177,6 +180,9 @@ public class ReservedRealm extends CachingUsernamePasswordRealm { userInfo = reservedUserInfos.get(BeatsSystemUser.NAME); users.add(new BeatsSystemUser(userInfo == null || userInfo.enabled)); + userInfo = reservedUserInfos.get(APMSystemUser.NAME); + users.add(new APMSystemUser(userInfo == null || userInfo.enabled)); + if (anonymousEnabled) { users.add(anonymousUser); } @@ -226,12 +232,12 @@ public class ReservedRealm extends CachingUsernamePasswordRealm { private Version getDefinedVersion(String username) { switch (username) { - case LogstashSystemUser.NAME: - return LogstashSystemUser.DEFINED_SINCE; case BeatsSystemUser.NAME: return BeatsSystemUser.DEFINED_SINCE; + case APMSystemUser.NAME: + return APMSystemUser.DEFINED_SINCE; default: - return Version.V_5_0_0; + return Version.V_6_0_0; } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/UserAndPassword.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/UserAndPassword.java index d9971ab2388..3f636312f0f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/UserAndPassword.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/UserAndPassword.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.security.authc.esnative; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.xpack.core.security.authc.support.Hasher; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; /** * Like User, but includes the hashed password diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java index 336acbdb181..fad10c821c8 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.env.Environment; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.support.Validation; +import org.elasticsearch.xpack.core.security.user.APMSystemUser; import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; @@ -63,7 +64,8 @@ import static java.util.Arrays.asList; public class SetupPasswordTool extends LoggingAwareMultiCommand { private static final char[] CHARS = ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789").toCharArray(); - public static final List USERS = asList(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME); + public static final List USERS = asList(ElasticUser.NAME, APMSystemUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, + BeatsSystemUser.NAME); private final BiFunction clientFunction; private final CheckedFunction keyStoreFunction; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java index 8d529897534..e2586ea836d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java @@ -12,7 +12,7 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; import java.util.Map; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStore.java index 220108b5637..15a6c2c41da 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStore.java @@ -24,7 +24,7 @@ import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.support.NoOpLogger; import org.elasticsearch.xpack.core.security.support.Validation; import org.elasticsearch.xpack.core.security.support.Validation.Users; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.support.SecurityFiles; import java.io.IOException; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java index 53146203ee2..9c531d3159f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java @@ -13,14 +13,16 @@ import org.elasticsearch.common.cache.Cache; import org.elasticsearch.common.cache.CacheBuilder; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.CachingRealm; +import org.elasticsearch.xpack.security.authc.support.DelegatedAuthorizationSupport; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import org.ietf.jgss.GSSException; @@ -63,6 +65,7 @@ public final class KerberosRealm extends Realm implements CachingRealm { private final Path keytabPath; private final boolean enableKerberosDebug; private final boolean removeRealmName; + private DelegatedAuthorizationSupport delegatedRealms; public KerberosRealm(final RealmConfig config, final NativeRoleMappingStore nativeRoleMappingStore, final ThreadPool threadPool) { this(config, nativeRoleMappingStore, new KerberosTicketValidator(), threadPool, null); @@ -100,6 +103,15 @@ public final class KerberosRealm extends Realm implements CachingRealm { } this.enableKerberosDebug = KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE.get(config.settings()); this.removeRealmName = KerberosRealmSettings.SETTING_REMOVE_REALM_NAME.get(config.settings()); + this.delegatedRealms = null; + } + + @Override + public void initialize(Iterable realms, XPackLicenseState licenseState) { + if (delegatedRealms != null) { + throw new IllegalStateException("Realm has already been initialized"); + } + delegatedRealms = new DelegatedAuthorizationSupport(realms, config, licenseState); } @Override @@ -133,13 +145,14 @@ public final class KerberosRealm extends Realm implements CachingRealm { @Override public void authenticate(final AuthenticationToken token, final ActionListener listener) { + assert delegatedRealms != null : "Realm has not been initialized correctly"; assert token instanceof KerberosAuthenticationToken; final KerberosAuthenticationToken kerbAuthnToken = (KerberosAuthenticationToken) token; kerberosTicketValidator.validateTicket((byte[]) kerbAuthnToken.credentials(), keytabPath, enableKerberosDebug, ActionListener.wrap(userPrincipalNameOutToken -> { if (userPrincipalNameOutToken.v1() != null) { final String username = maybeRemoveRealmName(userPrincipalNameOutToken.v1()); - buildUser(username, userPrincipalNameOutToken.v2(), listener); + resolveUser(username, userPrincipalNameOutToken.v2(), listener); } else { /** * This is when security context could not be established may be due to ongoing @@ -192,35 +205,36 @@ public final class KerberosRealm extends Realm implements CachingRealm { } } - private void buildUser(final String username, final String outToken, final ActionListener listener) { + private void resolveUser(final String username, final String outToken, final ActionListener listener) { // if outToken is present then it needs to be communicated with peer, add it to // response header in thread context. if (Strings.hasText(outToken)) { threadPool.getThreadContext().addResponseHeader(WWW_AUTHENTICATE, NEGOTIATE_AUTH_HEADER_PREFIX + outToken); } - final User user = (userPrincipalNameToUserCache != null) ? userPrincipalNameToUserCache.get(username) : null; - if (user != null) { - /** - * TODO: bizybot If authorizing realms configured, resolve user from those - * realms and then return. - */ - listener.onResponse(AuthenticationResult.success(user)); + + if (delegatedRealms.hasDelegation()) { + delegatedRealms.resolve(username, listener); } else { - /** - * TODO: bizybot If authorizing realms configured, resolve user from those - * realms, cache it and then return. - */ - final UserRoleMapper.UserData userData = new UserRoleMapper.UserData(username, null, Collections.emptySet(), null, this.config); - userRoleMapper.resolveRoles(userData, ActionListener.wrap(roles -> { - final User computedUser = new User(username, roles.toArray(new String[roles.size()]), null, null, null, true); - if (userPrincipalNameToUserCache != null) { - userPrincipalNameToUserCache.put(username, computedUser); - } - listener.onResponse(AuthenticationResult.success(computedUser)); - }, listener::onFailure)); + final User user = (userPrincipalNameToUserCache != null) ? userPrincipalNameToUserCache.get(username) : null; + if (user != null) { + listener.onResponse(AuthenticationResult.success(user)); + } else { + buildUser(username, listener); + } } } + private void buildUser(final String username, final ActionListener listener) { + final UserRoleMapper.UserData userData = new UserRoleMapper.UserData(username, null, Collections.emptySet(), null, this.config); + userRoleMapper.resolveRoles(userData, ActionListener.wrap(roles -> { + final User computedUser = new User(username, roles.toArray(new String[roles.size()]), null, null, null, true); + if (userPrincipalNameToUserCache != null) { + userPrincipalNameToUserCache.put(username, computedUser); + } + listener.onResponse(AuthenticationResult.success(computedUser)); + }, listener::onFailure)); + } + @Override public void lookupUser(final String username, final ActionListener listener) { listener.onResponse(null); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySIDUtil.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySIDUtil.java index e8f418d891b..d93ba9e017e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySIDUtil.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySIDUtil.java @@ -18,7 +18,8 @@ */ /* - * This code sourced from:http://svn.apache.org/repos/asf/directory/studio/tags/2.0.0.v20170904-M13/plugins/valueeditors/src/main/java/org/apache/directory/studio/valueeditors/msad/InPlaceMsAdObjectSidValueEditor.java + * This code sourced from: + * http://svn.apache.org/repos/asf/directory/studio/tags/2.0.0.v20170904-M13/plugins/valueeditors/src/main/java/org/apache/directory/studio/valueeditors/msad/InPlaceMsAdObjectSidValueEditor.java */ package org.elasticsearch.xpack.security.authc.ldap; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java index f689bc28789..193b33b7d8f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.security.authc.ldap; import com.unboundid.ldap.sdk.LDAPException; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ContextPreservingActionListener; @@ -16,21 +15,25 @@ import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.LdapSessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.ldap.support.LdapLoadBalancing; import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; import org.elasticsearch.xpack.security.authc.ldap.support.SessionFactory; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; +import org.elasticsearch.xpack.security.authc.support.DelegatedAuthorizationSupport; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper.UserData; import org.elasticsearch.xpack.security.authc.support.mapper.CompositeRoleMapper; @@ -53,7 +56,7 @@ public final class LdapRealm extends CachingUsernamePasswordRealm { private final UserRoleMapper roleMapper; private final ThreadPool threadPool; private final TimeValue executionTimeout; - + private DelegatedAuthorizationSupport delegatedRealms; public LdapRealm(String type, RealmConfig config, SSLService sslService, ResourceWatcherService watcherService, @@ -118,6 +121,7 @@ public final class LdapRealm extends CachingUsernamePasswordRealm { */ @Override protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { + assert delegatedRealms != null : "Realm has not been initialized correctly"; // we submit to the threadpool because authentication using LDAP will execute blocking I/O for a bind request and we don't want // network threads stuck waiting for a socket to connect. After the bind, then all interaction with LDAP should be async final CancellableLdapRunnable cancellableLdapRunnable = new CancellableLdapRunnable<>(listener, @@ -159,6 +163,14 @@ public final class LdapRealm extends CachingUsernamePasswordRealm { sessionListener); } + @Override + public void initialize(Iterable realms, XPackLicenseState licenseState) { + if (delegatedRealms != null) { + throw new IllegalStateException("Realm has already been initialized"); + } + delegatedRealms = new DelegatedAuthorizationSupport(realms, config, licenseState); + } + @Override public void usageStats(ActionListener> listener) { super.usageStats(ActionListener.wrap(usage -> { @@ -171,39 +183,56 @@ public final class LdapRealm extends CachingUsernamePasswordRealm { } private static void buildUser(LdapSession session, String username, ActionListener listener, - UserRoleMapper roleMapper) { + UserRoleMapper roleMapper, DelegatedAuthorizationSupport delegatedAuthz) { + assert delegatedAuthz != null : "DelegatedAuthorizationSupport is null"; if (session == null) { listener.onResponse(AuthenticationResult.notHandled()); + } else if (delegatedAuthz.hasDelegation()) { + delegatedAuthz.resolve(username, listener); } else { - boolean loadingGroups = false; - try { - final Consumer onFailure = e -> { - IOUtils.closeWhileHandlingException(session); - listener.onFailure(e); - }; - session.resolve(ActionListener.wrap((ldapData) -> { - final Map metadata = MapBuilder.newMapBuilder() - .put("ldap_dn", session.userDn()) - .put("ldap_groups", ldapData.groups) - .putAll(ldapData.metaData) - .map(); - final UserData user = new UserData(username, session.userDn(), ldapData.groups, - metadata, session.realm()); - roleMapper.resolveRoles(user, ActionListener.wrap( - roles -> { - IOUtils.close(session); - String[] rolesArray = roles.toArray(new String[roles.size()]); - listener.onResponse(AuthenticationResult.success( - new User(username, rolesArray, null, null, metadata, true)) - ); - }, onFailure - )); - }, onFailure)); - loadingGroups = true; - } finally { - if (loadingGroups == false) { - session.close(); - } + lookupUserFromSession(username, session, roleMapper, listener); + } + } + + @Override + protected void handleCachedAuthentication(User user, ActionListener listener) { + if (delegatedRealms.hasDelegation()) { + delegatedRealms.resolve(user.principal(), listener); + } else { + super.handleCachedAuthentication(user, listener); + } + } + + private static void lookupUserFromSession(String username, LdapSession session, UserRoleMapper roleMapper, + ActionListener listener) { + boolean loadingGroups = false; + try { + final Consumer onFailure = e -> { + IOUtils.closeWhileHandlingException(session); + listener.onFailure(e); + }; + session.resolve(ActionListener.wrap((ldapData) -> { + final Map metadata = MapBuilder.newMapBuilder() + .put("ldap_dn", session.userDn()) + .put("ldap_groups", ldapData.groups) + .putAll(ldapData.metaData) + .map(); + final UserData user = new UserData(username, session.userDn(), ldapData.groups, + metadata, session.realm()); + roleMapper.resolveRoles(user, ActionListener.wrap( + roles -> { + IOUtils.close(session); + String[] rolesArray = roles.toArray(new String[roles.size()]); + listener.onResponse(AuthenticationResult.success( + new User(username, rolesArray, null, null, metadata, true)) + ); + }, onFailure + )); + }, onFailure)); + loadingGroups = true; + } finally { + if (loadingGroups == false) { + session.close(); } } } @@ -233,7 +262,7 @@ public final class LdapRealm extends CachingUsernamePasswordRealm { resultListener.onResponse(AuthenticationResult.notHandled()); } else { ldapSessionAtomicReference.set(session); - buildUser(session, username, resultListener, roleMapper); + buildUser(session, username, resultListener, roleMapper, delegatedRealms); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java index 58e10a54755..4d13f332ffe 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; @@ -26,17 +27,17 @@ import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.CertParsingUtils; import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.security.authc.BytesKey; import org.elasticsearch.xpack.security.authc.support.CachingRealm; +import org.elasticsearch.xpack.security.authc.support.DelegatedAuthorizationSupport; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.CompositeRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import javax.net.ssl.X509TrustManager; - import java.security.MessageDigest; import java.security.cert.Certificate; import java.security.cert.CertificateEncodingException; @@ -75,6 +76,7 @@ public class PkiRealm extends Realm implements CachingRealm { private final Pattern principalPattern; private final UserRoleMapper roleMapper; private final Cache cache; + private DelegatedAuthorizationSupport delegatedRealms; public PkiRealm(RealmConfig config, ResourceWatcherService watcherService, NativeRoleMappingStore nativeRoleMappingStore) { this(config, new CompositeRoleMapper(PkiRealmSettings.TYPE, config, watcherService, nativeRoleMappingStore)); @@ -91,6 +93,15 @@ public class PkiRealm extends Realm implements CachingRealm { .setExpireAfterWrite(PkiRealmSettings.CACHE_TTL_SETTING.get(config.settings())) .setMaximumWeight(PkiRealmSettings.CACHE_MAX_USERS_SETTING.get(config.settings())) .build(); + this.delegatedRealms = null; + } + + @Override + public void initialize(Iterable realms, XPackLicenseState licenseState) { + if (delegatedRealms != null) { + throw new IllegalStateException("Realm has already been initialized"); + } + delegatedRealms = new DelegatedAuthorizationSupport(realms, config, licenseState); } @Override @@ -105,32 +116,50 @@ public class PkiRealm extends Realm implements CachingRealm { @Override public void authenticate(AuthenticationToken authToken, ActionListener listener) { + assert delegatedRealms != null : "Realm has not been initialized correctly"; X509AuthenticationToken token = (X509AuthenticationToken)authToken; try { final BytesKey fingerprint = computeFingerprint(token.credentials()[0]); User user = cache.get(fingerprint); if (user != null) { - listener.onResponse(AuthenticationResult.success(user)); + if (delegatedRealms.hasDelegation()) { + delegatedRealms.resolve(token.principal(), listener); + } else { + listener.onResponse(AuthenticationResult.success(user)); + } } else if (isCertificateChainTrusted(trustManager, token, logger) == false) { listener.onResponse(AuthenticationResult.unsuccessful("Certificate for " + token.dn() + " is not trusted", null)); } else { - final Map metadata = Collections.singletonMap("pki_dn", token.dn()); - final UserRoleMapper.UserData userData = new UserRoleMapper.UserData(token.principal(), - token.dn(), Collections.emptySet(), metadata, this.config); - roleMapper.resolveRoles(userData, ActionListener.wrap(roles -> { - final User computedUser = - new User(token.principal(), roles.toArray(new String[roles.size()]), null, null, metadata, true); - try (ReleasableLock ignored = readLock.acquire()) { - cache.put(fingerprint, computedUser); + final ActionListener cachingListener = ActionListener.wrap(result -> { + if (result.isAuthenticated()) { + try (ReleasableLock ignored = readLock.acquire()) { + cache.put(fingerprint, result.getUser()); + } } - listener.onResponse(AuthenticationResult.success(computedUser)); - }, listener::onFailure)); + listener.onResponse(result); + }, listener::onFailure); + if (delegatedRealms.hasDelegation()) { + delegatedRealms.resolve(token.principal(), cachingListener); + } else { + this.buildUser(token, cachingListener); + } } } catch (CertificateEncodingException e) { listener.onResponse(AuthenticationResult.unsuccessful("Certificate for " + token.dn() + " has encoding issues", e)); } } + private void buildUser(X509AuthenticationToken token, ActionListener listener) { + final Map metadata = Collections.singletonMap("pki_dn", token.dn()); + final UserRoleMapper.UserData userData = new UserRoleMapper.UserData(token.principal(), + token.dn(), Collections.emptySet(), metadata, this.config); + roleMapper.resolveRoles(userData, ActionListener.wrap(roles -> { + final User computedUser = + new User(token.principal(), roles.toArray(new String[roles.size()]), null, null, metadata, true); + listener.onResponse(AuthenticationResult.success(computedUser)); + }, listener::onFailure)); + } + @Override public void lookupUser(String username, ActionListener listener) { listener.onResponse(null); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java index a8f50d975e8..4a9db7c5d61 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.watcher.FileChangesListener; import org.elasticsearch.watcher.FileWatcher; import org.elasticsearch.watcher.ResourceWatcherService; @@ -43,13 +44,15 @@ import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLConfiguration; import org.elasticsearch.xpack.core.ssl.CertParsingUtils; +import org.elasticsearch.xpack.core.ssl.SSLConfiguration; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.X509KeyPairSettings; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.TokenService; +import org.elasticsearch.xpack.security.authc.support.DelegatedAuthorizationSupport; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; import org.opensaml.core.criterion.EntityIdCriterion; import org.opensaml.saml.common.xml.SAMLConstants; @@ -117,6 +120,7 @@ import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.NAME_ATTRIBUTE; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.POPULATE_USER_METADATA; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.PRINCIPAL_ATTRIBUTE; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.REQUESTED_AUTHN_CONTEXT_CLASS_REF; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SIGNING_KEY_ALIAS; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SIGNING_MESSAGE_TYPES; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SIGNING_SETTINGS; @@ -124,7 +128,6 @@ import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SP_ENTITY_ID; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SP_LOGOUT; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.TYPE; -import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.REQUESTED_AUTHN_CONTEXT_CLASS_REF; /** * This class is {@link Releasable} because it uses a library that thinks timers and timer tasks @@ -166,6 +169,7 @@ public final class SamlRealm extends Realm implements Releasable { private final AttributeParser nameAttribute; private final AttributeParser mailAttribute; + private DelegatedAuthorizationSupport delegatedRealms; /** * Factory for SAML realm. @@ -231,6 +235,14 @@ public final class SamlRealm extends Realm implements Releasable { this.releasables = new ArrayList<>(); } + @Override + public void initialize(Iterable realms, XPackLicenseState licenseState) { + if (delegatedRealms != null) { + throw new IllegalStateException("Realm has already been initialized"); + } + delegatedRealms = new DelegatedAuthorizationSupport(realms, config, licenseState); + } + static String require(RealmConfig config, Setting setting) { final String value = setting.get(config.settings()); if (value.isEmpty()) { @@ -402,14 +414,27 @@ public final class SamlRealm extends Realm implements Releasable { } } - private void buildUser(SamlAttributes attributes, ActionListener listener) { + private void buildUser(SamlAttributes attributes, ActionListener baseListener) { final String principal = resolveSingleValueAttribute(attributes, principalAttribute, PRINCIPAL_ATTRIBUTE.name()); if (Strings.isNullOrEmpty(principal)) { - listener.onResponse(AuthenticationResult.unsuccessful( + baseListener.onResponse(AuthenticationResult.unsuccessful( principalAttribute + " not found in " + attributes.attributes(), null)); return; } + final Map tokenMetadata = createTokenMetadata(attributes.name(), attributes.session()); + ActionListener wrappedListener = ActionListener.wrap(auth -> { + if (auth.isAuthenticated()) { + config.threadContext().putTransient(CONTEXT_TOKEN_DATA, tokenMetadata); + } + baseListener.onResponse(auth); + }, baseListener::onFailure); + + if (delegatedRealms.hasDelegation()) { + delegatedRealms.resolve(principal, wrappedListener); + return; + } + final Map userMeta = new HashMap<>(); if (populateUserMetadata) { for (SamlAttributes.SamlAttribute a : attributes.attributes()) { @@ -424,7 +449,6 @@ public final class SamlRealm extends Realm implements Releasable { userMeta.put(USER_METADATA_NAMEID_FORMAT, attributes.name().format); } - final Map tokenMetadata = createTokenMetadata(attributes.name(), attributes.session()); final List groups = groupsAttribute.getAttribute(attributes); final String dn = resolveSingleValueAttribute(attributes, dnAttribute, DN_ATTRIBUTE.name()); @@ -433,9 +457,8 @@ public final class SamlRealm extends Realm implements Releasable { UserRoleMapper.UserData userData = new UserRoleMapper.UserData(principal, dn, groups, userMeta, config); roleMapper.resolveRoles(userData, ActionListener.wrap(roles -> { final User user = new User(principal, roles.toArray(new String[roles.size()]), name, mail, userMeta, true); - config.threadContext().putTransient(CONTEXT_TOKEN_DATA, tokenMetadata); - listener.onResponse(AuthenticationResult.success(user)); - }, listener::onFailure)); + wrappedListener.onResponse(AuthenticationResult.success(user)); + }, wrappedListener::onFailure)); } public Map createTokenMetadata(SamlNameId nameId, String session) { @@ -745,10 +768,10 @@ public final class SamlRealm extends Realm implements Releasable { attributes -> attributes.getAttributeValues(attributeName)); } } else if (required) { - throw new SettingsException("Setting" + RealmSettings.getFullSettingKey(realmConfig, setting.getAttribute()) + throw new SettingsException("Setting " + RealmSettings.getFullSettingKey(realmConfig, setting.getAttribute()) + " is required"); } else if (setting.getPattern().exists(settings)) { - throw new SettingsException("Setting" + RealmSettings.getFullSettingKey(realmConfig, setting.getPattern()) + throw new SettingsException("Setting " + RealmSettings.getFullSettingKey(realmConfig, setting.getPattern()) + " cannot be set unless " + RealmSettings.getFullSettingKey(realmConfig, setting.getAttribute()) + " is also set"); } else { return new AttributeParser("No SAML attribute for [" + setting.name() + "]", attributes -> Collections.emptyList()); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java index bcdbc1e1dd3..af93a180072 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java @@ -5,11 +5,9 @@ */ package org.elasticsearch.xpack.security.authc.support; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.cache.Cache; import org.elasticsearch.common.cache.CacheBuilder; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ListenableFuture; @@ -20,7 +18,7 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.util.Collections; import java.util.Map; @@ -30,7 +28,7 @@ import java.util.concurrent.atomic.AtomicBoolean; public abstract class CachingUsernamePasswordRealm extends UsernamePasswordRealm implements CachingRealm { - private final Cache>> cache; + private final Cache> cache; private final ThreadPool threadPool; final Hasher cacheHasher; @@ -38,17 +36,18 @@ public abstract class CachingUsernamePasswordRealm extends UsernamePasswordRealm super(type, config); cacheHasher = Hasher.resolve(CachingUsernamePasswordRealmSettings.CACHE_HASH_ALGO_SETTING.get(config.settings())); this.threadPool = threadPool; - TimeValue ttl = CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING.get(config.settings()); + final TimeValue ttl = CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING.get(config.settings()); if (ttl.getNanos() > 0) { - cache = CacheBuilder.>>builder() - .setExpireAfterWrite(ttl) - .setMaximumWeight(CachingUsernamePasswordRealmSettings.CACHE_MAX_USERS_SETTING.get(config.settings())) - .build(); + cache = CacheBuilder.>builder() + .setExpireAfterWrite(ttl) + .setMaximumWeight(CachingUsernamePasswordRealmSettings.CACHE_MAX_USERS_SETTING.get(config.settings())) + .build(); } else { cache = null; } } + @Override public final void expire(String username) { if (cache != null) { logger.trace("invalidating cache for user [{}] in realm [{}]", username, name()); @@ -56,6 +55,7 @@ public abstract class CachingUsernamePasswordRealm extends UsernamePasswordRealm } } + @Override public final void expireAll() { if (cache != null) { logger.trace("invalidating cache for all users in realm [{}]", name()); @@ -72,109 +72,101 @@ public abstract class CachingUsernamePasswordRealm extends UsernamePasswordRealm */ @Override public final void authenticate(AuthenticationToken authToken, ActionListener listener) { - UsernamePasswordToken token = (UsernamePasswordToken) authToken; + final UsernamePasswordToken token = (UsernamePasswordToken) authToken; try { if (cache == null) { doAuthenticate(token, listener); } else { authenticateWithCache(token, listener); } - } catch (Exception e) { + } catch (final Exception e) { // each realm should handle exceptions, if we get one here it should be considered fatal listener.onFailure(e); } } + /** + * This validates the {@code token} while making sure there is only one inflight + * request to the authentication source. Only successful responses are cached + * and any subsequent requests, bearing the same password, will succeed + * without reaching to the authentication source. A different password in a + * subsequent request, however, will clear the cache and try to reach to + * the authentication source. + * + * @param token The authentication token + * @param listener to be called at completion + */ private void authenticateWithCache(UsernamePasswordToken token, ActionListener listener) { try { - final SetOnce authenticatedUser = new SetOnce<>(); - final AtomicBoolean createdAndStartedFuture = new AtomicBoolean(false); - final ListenableFuture> future = cache.computeIfAbsent(token.principal(), k -> { - final ListenableFuture> created = new ListenableFuture<>(); - if (createdAndStartedFuture.compareAndSet(false, true) == false) { - throw new IllegalStateException("something else already started this. how?"); - } - return created; + final AtomicBoolean authenticationInCache = new AtomicBoolean(true); + final ListenableFuture listenableCacheEntry = cache.computeIfAbsent(token.principal(), k -> { + authenticationInCache.set(false); + return new ListenableFuture<>(); }); - - if (createdAndStartedFuture.get()) { - doAuthenticate(token, ActionListener.wrap(result -> { - if (result.isAuthenticated()) { - final User user = result.getUser(); - authenticatedUser.set(user); - final UserWithHash userWithHash = new UserWithHash(user, token.credentials(), cacheHasher); - future.onResponse(new Tuple<>(result, userWithHash)); + if (authenticationInCache.get()) { + // there is a cached or an inflight authenticate request + listenableCacheEntry.addListener(ActionListener.wrap(authenticatedUserWithHash -> { + if (authenticatedUserWithHash != null && authenticatedUserWithHash.verify(token.credentials())) { + // cached credential hash matches the credential hash for this forestalled request + handleCachedAuthentication(authenticatedUserWithHash.user, ActionListener.wrap(cacheResult -> { + if (cacheResult.isAuthenticated()) { + logger.debug("realm [{}] authenticated user [{}], with roles [{}]", + name(), token.principal(), cacheResult.getUser().roles()); + } else { + logger.debug("realm [{}] authenticated user [{}] from cache, but then failed [{}]", + name(), token.principal(), cacheResult.getMessage()); + } + listener.onResponse(cacheResult); + }, listener::onFailure)); } else { - future.onResponse(new Tuple<>(result, null)); - } - }, future::onFailure)); - } - - future.addListener(ActionListener.wrap(tuple -> { - if (tuple != null) { - final UserWithHash userWithHash = tuple.v2(); - final boolean performedAuthentication = createdAndStartedFuture.get() && userWithHash != null && - tuple.v2().user == authenticatedUser.get(); - handleResult(future, createdAndStartedFuture.get(), performedAuthentication, token, tuple, listener); - } else { - handleFailure(future, createdAndStartedFuture.get(), token, new IllegalStateException("unknown error authenticating"), - listener); - } - }, e -> handleFailure(future, createdAndStartedFuture.get(), token, e, listener)), - threadPool.executor(ThreadPool.Names.GENERIC)); - } catch (ExecutionException e) { - listener.onResponse(AuthenticationResult.unsuccessful("", e)); - } - } - - private void handleResult(ListenableFuture> future, boolean createdAndStartedFuture, - boolean performedAuthentication, UsernamePasswordToken token, - Tuple result, ActionListener listener) { - final AuthenticationResult authResult = result.v1(); - if (authResult == null) { - // this was from a lookup; clear and redo - cache.invalidate(token.principal(), future); - authenticateWithCache(token, listener); - } else if (authResult.isAuthenticated()) { - if (performedAuthentication) { - listener.onResponse(authResult); - } else { - UserWithHash userWithHash = result.v2(); - if (userWithHash.verify(token.credentials())) { - if (userWithHash.user.enabled()) { - User user = userWithHash.user; - logger.debug("realm [{}] authenticated user [{}], with roles [{}]", - name(), token.principal(), user.roles()); - listener.onResponse(AuthenticationResult.success(user)); - } else { - // re-auth to see if user has been enabled - cache.invalidate(token.principal(), future); + // The inflight request has failed or its credential hash does not match the + // hash of the credential for this forestalled request. + // clear cache and try to reach the authentication source again because password + // might have changed there and the local cached hash got stale + cache.invalidate(token.principal(), listenableCacheEntry); authenticateWithCache(token, listener); } - } else { - // could be a password change? - cache.invalidate(token.principal(), future); + }, e -> { + // the inflight request failed, so try again, but first (always) make sure cache + // is cleared of the failed authentication + cache.invalidate(token.principal(), listenableCacheEntry); authenticateWithCache(token, listener); - } - } - } else { - cache.invalidate(token.principal(), future); - if (createdAndStartedFuture) { - listener.onResponse(authResult); + }), threadPool.executor(ThreadPool.Names.GENERIC)); } else { - authenticateWithCache(token, listener); + // attempt authentication against the authentication source + doAuthenticate(token, ActionListener.wrap(authResult -> { + if (authResult.isAuthenticated() && authResult.getUser().enabled()) { + // compute the credential hash of this successful authentication request + final UserWithHash userWithHash = new UserWithHash(authResult.getUser(), token.credentials(), cacheHasher); + // notify any forestalled request listeners; they will not reach to the + // authentication request and instead will use this hash for comparison + listenableCacheEntry.onResponse(userWithHash); + } else { + // notify any forestalled request listeners; they will retry the request + listenableCacheEntry.onResponse(null); + } + // notify the listener of the inflight authentication request; this request is not retried + listener.onResponse(authResult); + }, e -> { + // notify any staved off listeners; they will retry the request + listenableCacheEntry.onFailure(e); + // notify the listener of the inflight authentication request; this request is not retried + listener.onFailure(e); + })); } + } catch (final ExecutionException e) { + listener.onFailure(e); } } - private void handleFailure(ListenableFuture> future, boolean createdAndStarted, - UsernamePasswordToken token, Exception e, ActionListener listener) { - cache.invalidate(token.principal(), future); - if (createdAndStarted) { - listener.onFailure(e); - } else { - authenticateWithCache(token, listener); - } + /** + * {@code handleCachedAuthentication} is called when a {@link User} is retrieved from the cache. + * The first {@code user} parameter is the user object that was found in the cache. + * The default implementation returns a {@link AuthenticationResult#success(User) success result} with the + * provided user, but sub-classes can return a different {@code User} object, or an unsuccessful result. + */ + protected void handleCachedAuthentication(User user, ActionListener listener) { + listener.onResponse(AuthenticationResult.success(user)); } @Override @@ -193,38 +185,57 @@ public abstract class CachingUsernamePasswordRealm extends UsernamePasswordRealm @Override public final void lookupUser(String username, ActionListener listener) { - if (cache != null) { - try { - ListenableFuture> future = cache.computeIfAbsent(username, key -> { - ListenableFuture> created = new ListenableFuture<>(); - doLookupUser(username, ActionListener.wrap(user -> { - if (user != null) { - UserWithHash userWithHash = new UserWithHash(user, null, null); - created.onResponse(new Tuple<>(null, userWithHash)); - } else { - created.onResponse(new Tuple<>(null, null)); - } - }, created::onFailure)); - return created; - }); - - future.addListener(ActionListener.wrap(tuple -> { - if (tuple != null) { - if (tuple.v2() == null) { - cache.invalidate(username, future); - listener.onResponse(null); - } else { - listener.onResponse(tuple.v2().user); - } - } else { - listener.onResponse(null); - } - }, listener::onFailure), threadPool.executor(ThreadPool.Names.GENERIC)); - } catch (ExecutionException e) { - listener.onFailure(e); + try { + if (cache == null) { + doLookupUser(username, listener); + } else { + lookupWithCache(username, listener); } - } else { - doLookupUser(username, listener); + } catch (final Exception e) { + // each realm should handle exceptions, if we get one here it should be + // considered fatal + listener.onFailure(e); + } + } + + private void lookupWithCache(String username, ActionListener listener) { + try { + final AtomicBoolean lookupInCache = new AtomicBoolean(true); + final ListenableFuture listenableCacheEntry = cache.computeIfAbsent(username, key -> { + lookupInCache.set(false); + return new ListenableFuture<>(); + }); + if (false == lookupInCache.get()) { + // attempt lookup against the user directory + doLookupUser(username, ActionListener.wrap(user -> { + if (user != null) { + // user found + final UserWithHash userWithHash = new UserWithHash(user, null, null); + // notify forestalled request listeners + listenableCacheEntry.onResponse(userWithHash); + } else { + // user not found, invalidate cache so that subsequent requests are forwarded to + // the user directory + cache.invalidate(username, listenableCacheEntry); + // notify forestalled request listeners + listenableCacheEntry.onResponse(null); + } + }, e -> { + // the next request should be forwarded, not halted by a failed lookup attempt + cache.invalidate(username, listenableCacheEntry); + // notify forestalled listeners + listenableCacheEntry.onFailure(e); + })); + } + listenableCacheEntry.addListener(ActionListener.wrap(userWithHash -> { + if (userWithHash != null) { + listener.onResponse(userWithHash.user); + } else { + listener.onResponse(null); + } + }, listener::onFailure), threadPool.executor(ThreadPool.Names.GENERIC)); + } catch (final ExecutionException e) { + listener.onFailure(e); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/DelegatedAuthorizationSupport.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/DelegatedAuthorizationSupport.java new file mode 100644 index 00000000000..ff6fc6042e7 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/DelegatedAuthorizationSupport.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.authc.support; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; +import org.elasticsearch.xpack.core.security.user.User; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.Strings.collectionToDelimitedString; + +/** + * Utility class for supporting "delegated authorization" (aka "authorization_realms", aka "lookup realms"). + * A {@link Realm} may support delegating authorization to another realm. It does this by registering a + * setting for {@link DelegatedAuthorizationSettings#AUTHZ_REALMS}, and constructing an instance of this + * class. Then, after the realm has performed any authentication steps, if {@link #hasDelegation()} is + * {@code true}, it delegates the construction of the {@link User} object and {@link AuthenticationResult} + * to {@link #resolve(String, ActionListener)}. + */ +public class DelegatedAuthorizationSupport { + + private final RealmUserLookup lookup; + private final Logger logger; + private final XPackLicenseState licenseState; + + /** + * Resolves the {@link DelegatedAuthorizationSettings#AUTHZ_REALMS} setting from {@code config} and calls + * {@link #DelegatedAuthorizationSupport(Iterable, List, Settings, ThreadContext, XPackLicenseState)} + */ + public DelegatedAuthorizationSupport(Iterable allRealms, RealmConfig config, XPackLicenseState licenseState) { + this(allRealms, DelegatedAuthorizationSettings.AUTHZ_REALMS.get(config.settings()), config.globalSettings(), config.threadContext(), + licenseState); + } + + /** + * Constructs a new object that delegates to the named realms ({@code lookupRealms}), which must exist within + * {@code allRealms}. + * @throws IllegalArgumentException if one of the specified realms does not exist + */ + protected DelegatedAuthorizationSupport(Iterable allRealms, List lookupRealms, Settings settings, + ThreadContext threadContext, XPackLicenseState licenseState) { + final List resolvedLookupRealms = resolveRealms(allRealms, lookupRealms); + checkForRealmChains(resolvedLookupRealms, settings); + this.lookup = new RealmUserLookup(resolvedLookupRealms, threadContext); + this.logger = Loggers.getLogger(getClass()); + this.licenseState = licenseState; + } + + /** + * Are there any realms configured for delegated lookup + */ + public boolean hasDelegation() { + return this.lookup.hasRealms(); + } + + /** + * Attempts to find the user specified by {@code username} in one of the delegated realms. + * The realms are searched in the order specified during construction. + * Returns a {@link AuthenticationResult#success(User) successful result} if a {@link User} + * was found, otherwise returns an + * {@link AuthenticationResult#unsuccessful(String, Exception) unsuccessful result} + * with a meaningful diagnostic message. + */ + public void resolve(String username, ActionListener resultListener) { + if (licenseState.isAuthorizationRealmAllowed() == false) { + resultListener.onResponse(AuthenticationResult.unsuccessful( + DelegatedAuthorizationSettings.AUTHZ_REALMS.getKey() + " are not permitted", + LicenseUtils.newComplianceException(DelegatedAuthorizationSettings.AUTHZ_REALMS.getKey()) + )); + return; + } + if (hasDelegation() == false) { + resultListener.onResponse(AuthenticationResult.unsuccessful( + "No [" + DelegatedAuthorizationSettings.AUTHZ_REALMS.getKey() + "] have been configured", null)); + return; + } + ActionListener> userListener = ActionListener.wrap(tuple -> { + if (tuple != null) { + logger.trace("Found user " + tuple.v1() + " in realm " + tuple.v2()); + resultListener.onResponse(AuthenticationResult.success(tuple.v1())); + } else { + resultListener.onResponse(AuthenticationResult.unsuccessful("the principal [" + username + + "] was authenticated, but no user could be found in realms [" + collectionToDelimitedString(lookup.getRealms(), ",") + + "]", null)); + } + }, resultListener::onFailure); + lookup.lookup(username, userListener); + } + + private List resolveRealms(Iterable allRealms, List lookupRealms) { + final List result = new ArrayList<>(lookupRealms.size()); + for (String name : lookupRealms) { + result.add(findRealm(name, allRealms)); + } + assert result.size() == lookupRealms.size(); + return result; + } + + /** + * Checks for (and rejects) chains of delegation in the provided realms. + * A chain occurs when "realmA" delegates authorization to "realmB", and realmB also delegates authorization (to any realm). + * Since "realmB" does not handle its own authorization, it is not a valid target for delegated authorization. + * @param delegatedRealms The list of realms that are going to be used for authorization. If is an error if any of these realms are + * also configured to delegate their authorization. + * @throws IllegalArgumentException if a chain is detected + */ + private void checkForRealmChains(Iterable delegatedRealms, Settings globalSettings) { + final Map settingsByRealm = RealmSettings.getRealmSettings(globalSettings); + for (Realm realm : delegatedRealms) { + final Settings realmSettings = settingsByRealm.get(realm.name()); + if (realmSettings != null && DelegatedAuthorizationSettings.AUTHZ_REALMS.exists(realmSettings)) { + throw new IllegalArgumentException("cannot use realm [" + realm + + "] as an authorization realm - it is already delegating authorization to [" + + DelegatedAuthorizationSettings.AUTHZ_REALMS.get(realmSettings) + "]"); + } + } + } + + private Realm findRealm(String name, Iterable allRealms) { + for (Realm realm : allRealms) { + if (name.equals(realm.name())) { + return realm; + } + } + throw new IllegalArgumentException("configured authorization realm [" + name + "] does not exist (or is not enabled)"); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/RealmUserLookup.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/RealmUserLookup.java new file mode 100644 index 00000000000..428b7c1e4a1 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/RealmUserLookup.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.authc.support; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.xpack.core.common.IteratingActionListener; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.user.User; + +import java.util.Collections; +import java.util.List; + +public class RealmUserLookup { + + private final List realms; + private final ThreadContext threadContext; + + public RealmUserLookup(List realms, ThreadContext threadContext) { + this.realms = realms; + this.threadContext = threadContext; + } + + public List getRealms() { + return Collections.unmodifiableList(realms); + } + + public boolean hasRealms() { + return realms.isEmpty() == false; + } + + /** + * Lookup the {@code principal} in the list of {@link #realms}. + * The realms are consulted in order. When one realm responds with a non-null {@link User}, this + * is returned with the matching realm, through the {@code listener}. + * If no user if found (including the case where the {@link #realms} list is empty), then + * {@link ActionListener#onResponse(Object)} is called with a {@code null} {@link Tuple}. + */ + public void lookup(String principal, ActionListener> listener) { + final IteratingActionListener, ? extends Realm> userLookupListener = + new IteratingActionListener<>(listener, + (realm, lookupUserListener) -> realm.lookupUser(principal, + ActionListener.wrap(foundUser -> { + if (foundUser != null) { + lookupUserListener.onResponse(new Tuple<>(foundUser, realm)); + } else { + lookupUserListener.onResponse(null); + } + }, + lookupUserListener::onFailure)), + realms, threadContext); + try { + userLookupListener.run(); + } catch (Exception e) { + listener.onFailure(e); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java index 2c9bb8ce3c0..642bc167f7d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java @@ -57,7 +57,7 @@ import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; import org.elasticsearch.xpack.core.security.support.Automatons; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackSecurityUser; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.audit.AuditTrailService; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizedIndices.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizedIndices.java index 07845a131b7..3068a3993d3 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizedIndices.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizedIndices.java @@ -8,7 +8,7 @@ package org.elasticsearch.xpack.security.authz; import org.elasticsearch.cluster.metadata.AliasOrIndex; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.xpack.core.security.authz.permission.Role; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.util.ArrayList; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java index c388fd5627c..34aed55bb29 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java @@ -25,9 +25,9 @@ import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest; import org.elasticsearch.xpack.core.security.authz.IndicesAndAliasesResolverField; import java.util.ArrayList; @@ -418,7 +418,7 @@ class IndicesAndAliasesResolver { private RemoteClusterResolver(Settings settings, ClusterSettings clusterSettings) { super(settings); - clusters = new CopyOnWriteArraySet<>(buildRemoteClustersSeeds(settings).keySet()); + clusters = new CopyOnWriteArraySet<>(buildRemoteClustersDynamicConfig(settings).keySet()); listenForUpdates(clusterSettings); } @@ -428,7 +428,7 @@ class IndicesAndAliasesResolver { } @Override - protected void updateRemoteCluster(String clusterAlias, List addresses) { + protected void updateRemoteCluster(String clusterAlias, List addresses, String proxyAddress) { if (addresses.isEmpty()) { clusters.remove(clusterAlias); } else { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCache.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCache.java index e15ff2f4d0c..a49bfdfbe16 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCache.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCache.java @@ -3,6 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.security.authz.accesscontrol; import org.apache.lucene.search.QueryCachingPolicy; @@ -13,6 +14,7 @@ import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.indices.IndicesQueryCache; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; @@ -29,12 +31,19 @@ public final class OptOutQueryCache extends AbstractIndexComponent implements Qu private final IndicesQueryCache indicesQueryCache; private final ThreadContext context; private final String indexName; + private final XPackLicenseState licenseState; - public OptOutQueryCache(IndexSettings indexSettings, IndicesQueryCache indicesQueryCache, ThreadContext context) { + public OptOutQueryCache( + final IndexSettings indexSettings, + final IndicesQueryCache indicesQueryCache, + final ThreadContext context, + final XPackLicenseState licenseState) { super(indexSettings); this.indicesQueryCache = indicesQueryCache; this.context = Objects.requireNonNull(context, "threadContext must not be null"); this.indexName = indexSettings.getIndex().getName(); + this.licenseState = Objects.requireNonNull(licenseState, "licenseState"); + licenseState.addListener(() -> this.clear("license state changed")); } @Override @@ -50,6 +59,12 @@ public final class OptOutQueryCache extends AbstractIndexComponent implements Qu @Override public Weight doCache(Weight weight, QueryCachingPolicy policy) { + // TODO: this is not concurrently safe since the license state can change between reads + if (licenseState.isSecurityEnabled() == false || licenseState.isAuthAllowed() == false) { + logger.debug("not opting out of the query cache; authorization is not allowed"); + return indicesQueryCache.doCache(weight, policy); + } + IndicesAccessControl indicesAccessControl = context.getTransient( AuthorizationServiceField.INDICES_PERMISSIONS_KEY); if (indicesAccessControl == null) { @@ -96,4 +111,5 @@ public final class OptOutQueryCache extends AbstractIndexComponent implements Qu // we can cache, all fields are ok return true; } + } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/ingest/SetSecurityUserProcessor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/ingest/SetSecurityUserProcessor.java index 6db19d8edeb..0c30af1879c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/ingest/SetSecurityUserProcessor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/ingest/SetSecurityUserProcessor.java @@ -10,7 +10,7 @@ import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; import org.elasticsearch.xpack.core.security.authc.Authentication; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.util.Arrays; import java.util.EnumSet; @@ -43,7 +43,7 @@ public final class SetSecurityUserProcessor extends AbstractProcessor { } @Override - public void execute(IngestDocument ingestDocument) throws Exception { + public IngestDocument execute(IngestDocument ingestDocument) throws Exception { Authentication authentication = Authentication.getAuthentication(threadContext); if (authentication == null) { throw new IllegalStateException("No user authenticated, only use this processor via authenticated user"); @@ -86,6 +86,7 @@ public final class SetSecurityUserProcessor extends AbstractProcessor { } } ingestDocument.setFieldValue(field, userObject); + return ingestDocument; } @Override diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java index 10b8e65c168..b280b3a89a2 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java @@ -20,7 +20,7 @@ import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.io.IOException; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestChangePasswordAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestChangePasswordAction.java index 7e33844e99b..1b64b3ce2ba 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestChangePasswordAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestChangePasswordAction.java @@ -21,7 +21,7 @@ import org.elasticsearch.xpack.core.security.action.user.ChangePasswordResponse; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.core.security.rest.RestRequestFilter; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUsersAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUsersAction.java index 3e8cf26ad7d..1ab80954e9b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUsersAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUsersAction.java @@ -18,7 +18,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestBuilderListener; import org.elasticsearch.xpack.core.security.action.user.GetUsersResponse; import org.elasticsearch.xpack.core.security.client.SecurityClient; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestPutUserAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestPutUserAction.java index 78229adfee9..d6fc6aae381 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestPutUserAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestPutUserAction.java @@ -58,10 +58,13 @@ public class RestPutUserAction extends SecurityBaseRestHandler implements RestRe return channel -> requestBuilder.execute(new RestBuilderListener(channel) { @Override public RestResponse buildResponse(PutUserResponse putUserResponse, XContentBuilder builder) throws Exception { - return new BytesRestResponse(RestStatus.OK, - builder.startObject() - .field("user", putUserResponse) - .endObject()); + builder.startObject() + .startObject("user"); // TODO in 7.0 remove wrapping of response in the user object and just return the object + putUserResponse.toXContent(builder, request); + builder.endObject(); + + putUserResponse.toXContent(builder, request); + return new BytesRestResponse(RestStatus.OK, builder.endObject()); } }); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java index 761af81b08e..fcbae00ba09 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java @@ -25,9 +25,8 @@ import org.elasticsearch.transport.netty4.Netty4TcpChannel; import org.elasticsearch.transport.nio.NioTcpChannel; import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.authc.Authentication; -import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.action.SecurityActionMapper; import org.elasticsearch.xpack.security.authc.AuthenticationService; import org.elasticsearch.xpack.security.authz.AuthorizationService; @@ -116,50 +115,28 @@ public interface ServerTransportFilter { } } - final Version version = transportChannel.getVersion().equals(Version.V_5_4_0) ? Version.CURRENT : transportChannel.getVersion(); + final Version version = transportChannel.getVersion(); authcService.authenticate(securityAction, request, (User)null, ActionListener.wrap((authentication) -> { - if (reservedRealmEnabled && authentication.getVersion().before(Version.V_5_2_0) && - KibanaUser.NAME.equals(authentication.getUser().authenticatedUser().principal())) { - executeAsCurrentVersionKibanaUser(securityAction, request, transportChannel, listener, authentication); - } else if (securityAction.equals(TransportService.HANDSHAKE_ACTION_NAME) && - SystemUser.is(authentication.getUser()) == false) { - securityContext.executeAsUser(SystemUser.INSTANCE, (ctx) -> { - final Authentication replaced = Authentication.getAuthentication(threadContext); - final AuthorizationUtils.AsyncAuthorizer asyncAuthorizer = - new AuthorizationUtils.AsyncAuthorizer(replaced, listener, (userRoles, runAsRoles) -> { - authzService.authorize(replaced, securityAction, request, userRoles, runAsRoles); - listener.onResponse(null); - }); - asyncAuthorizer.authorize(authzService); - }, version); - } else { + if (securityAction.equals(TransportService.HANDSHAKE_ACTION_NAME) && + SystemUser.is(authentication.getUser()) == false) { + securityContext.executeAsUser(SystemUser.INSTANCE, (ctx) -> { + final Authentication replaced = Authentication.getAuthentication(threadContext); final AuthorizationUtils.AsyncAuthorizer asyncAuthorizer = - new AuthorizationUtils.AsyncAuthorizer(authentication, listener, (userRoles, runAsRoles) -> { - authzService.authorize(authentication, securityAction, request, userRoles, runAsRoles); - listener.onResponse(null); - }); + new AuthorizationUtils.AsyncAuthorizer(replaced, listener, (userRoles, runAsRoles) -> { + authzService.authorize(replaced, securityAction, request, userRoles, runAsRoles); + listener.onResponse(null); + }); asyncAuthorizer.authorize(authzService); - } - }, listener::onFailure)); - } - - private void executeAsCurrentVersionKibanaUser(String securityAction, TransportRequest request, TransportChannel transportChannel, - ActionListener listener, Authentication authentication) { - // the authentication came from an older node - so let's replace the user with our version - final User kibanaUser = new KibanaUser(authentication.getUser().enabled()); - if (kibanaUser.enabled()) { - securityContext.executeAsUser(kibanaUser, (original) -> { - final Authentication replacedUserAuth = securityContext.getAuthentication(); + }, version); + } else { final AuthorizationUtils.AsyncAuthorizer asyncAuthorizer = - new AuthorizationUtils.AsyncAuthorizer(replacedUserAuth, listener, (userRoles, runAsRoles) -> { - authzService.authorize(replacedUserAuth, securityAction, request, userRoles, runAsRoles); + new AuthorizationUtils.AsyncAuthorizer(authentication, listener, (userRoles, runAsRoles) -> { + authzService.authorize(authentication, securityAction, request, userRoles, runAsRoles); listener.onResponse(null); }); asyncAuthorizer.authorize(authzService); - }, transportChannel.getVersion()); - } else { - throw new IllegalStateException("a disabled user should never be sent. " + kibanaUser); - } + } + }, listener::onFailure)); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/AbstractPrivilegeTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/AbstractPrivilegeTestCase.java index 246b584a83b..9317e9f8dcb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/AbstractPrivilegeTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/AbstractPrivilegeTestCase.java @@ -7,10 +7,9 @@ package org.elasticsearch.integration; import org.apache.http.HttpEntity; import org.apache.http.StatusLine; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHeader; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.settings.SecureString; @@ -18,9 +17,7 @@ import org.elasticsearch.test.SecuritySingleNodeTestCase; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import java.io.IOException; -import java.util.HashMap; import java.util.Locale; -import java.util.Map; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -28,64 +25,59 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; /** - * a helper class that contains a couple of HTTP helper methods + * A helper class that contains a couple of HTTP helper methods. */ public abstract class AbstractPrivilegeTestCase extends SecuritySingleNodeTestCase { - protected void assertAccessIsAllowed(String user, String method, String uri, String body, - Map params) throws IOException { - Response response = getRestClient().performRequest(method, uri, params, entityOrNull(body), - new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, - UsernamePasswordToken.basicAuthHeaderValue(user, new SecureString("passwd".toCharArray())))); + protected void assertAccessIsAllowed(String user, Request request) throws IOException { + setUser(request, user); + Response response = getRestClient().performRequest(request); StatusLine statusLine = response.getStatusLine(); - String message = String.format(Locale.ROOT, "%s %s: Expected no error got %s %s with body %s", method, uri, - statusLine.getStatusCode(), statusLine.getReasonPhrase(), EntityUtils.toString(response.getEntity())); + String message = String.format(Locale.ROOT, "%s %s: Expected no error got %s %s with body %s", + request.getMethod(), request.getEndpoint(), statusLine.getStatusCode(), + statusLine.getReasonPhrase(), EntityUtils.toString(response.getEntity())); assertThat(message, statusLine.getStatusCode(), is(not(greaterThanOrEqualTo(400)))); } protected void assertAccessIsAllowed(String user, String method, String uri, String body) throws IOException { - assertAccessIsAllowed(user, method, uri, body, new HashMap<>()); + Request request = new Request(method, uri); + request.setJsonEntity(body); + assertAccessIsAllowed(user, request); } protected void assertAccessIsAllowed(String user, String method, String uri) throws IOException { - assertAccessIsAllowed(user, method, uri, null, new HashMap<>()); + assertAccessIsAllowed(user, new Request(method, uri)); } - protected void assertAccessIsDenied(String user, String method, String uri, String body) throws IOException { - assertAccessIsDenied(user, method, uri, body, new HashMap<>()); - } - - protected void assertAccessIsDenied(String user, String method, String uri) throws IOException { - assertAccessIsDenied(user, method, uri, null, new HashMap<>()); - } - - protected void assertAccessIsDenied(String user, String method, String uri, String body, - Map params) throws IOException { - ResponseException responseException = expectThrows(ResponseException.class, - () -> getRestClient().performRequest(method, uri, params, entityOrNull(body), - new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, - UsernamePasswordToken.basicAuthHeaderValue(user, new SecureString("passwd".toCharArray()))))); + protected void assertAccessIsDenied(String user, Request request) throws IOException { + setUser(request, user); + ResponseException responseException = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request)); StatusLine statusLine = responseException.getResponse().getStatusLine(); - String message = String.format(Locale.ROOT, "%s %s body %s: Expected 403, got %s %s with body %s", method, uri, body, + String requestBody = request.getEntity() == null ? "" : "with body " + EntityUtils.toString(request.getEntity()); + String message = String.format(Locale.ROOT, "%s %s body %s: Expected 403, got %s %s with body %s", + request.getMethod(), request.getEndpoint(), requestBody, statusLine.getStatusCode(), statusLine.getReasonPhrase(), EntityUtils.toString(responseException.getResponse().getEntity())); assertThat(message, statusLine.getStatusCode(), is(403)); } + protected void assertAccessIsDenied(String user, String method, String uri, String body) throws IOException { + Request request = new Request(method, uri); + request.setJsonEntity(body); + assertAccessIsDenied(user, request); + } - protected void assertBodyHasAccessIsDenied(String user, String method, String uri, String body) throws IOException { - assertBodyHasAccessIsDenied(user, method, uri, body, new HashMap<>()); + protected void assertAccessIsDenied(String user, String method, String uri) throws IOException { + assertAccessIsDenied(user, new Request(method, uri)); } /** * Like {@code assertAcessIsDenied}, but for _bulk requests since the entire * request will not be failed, just the individual ones */ - protected void assertBodyHasAccessIsDenied(String user, String method, String uri, String body, - Map params) throws IOException { - Response resp = getRestClient().performRequest(method, uri, params, entityOrNull(body), - new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, - UsernamePasswordToken.basicAuthHeaderValue(user, new SecureString("passwd".toCharArray())))); + protected void assertBodyHasAccessIsDenied(String user, Request request) throws IOException { + setUser(request, user); + Response resp = getRestClient().performRequest(request); StatusLine statusLine = resp.getStatusLine(); assertThat(statusLine.getStatusCode(), is(200)); HttpEntity bodyEntity = resp.getEntity(); @@ -93,11 +85,15 @@ public abstract class AbstractPrivilegeTestCase extends SecuritySingleNodeTestCa assertThat(bodyStr, containsString("unauthorized for user [" + user + "]")); } - private static HttpEntity entityOrNull(String body) { - HttpEntity entity = null; - if (body != null) { - entity = new StringEntity(body, ContentType.APPLICATION_JSON); - } - return entity; + protected void assertBodyHasAccessIsDenied(String user, String method, String uri, String body) throws IOException { + Request request = new Request(method, uri); + request.setJsonEntity(body); + assertBodyHasAccessIsDenied(user, request); + } + + private void setUser(Request request, String user) { + RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); + options.addHeader("Authorization", UsernamePasswordToken.basicAuthHeaderValue(user, new SecureString("passwd".toCharArray()))); + request.setOptions(options); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRealmsCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRealmsCacheTests.java index 0b6321e5960..fc02a5c4d62 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRealmsCacheTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRealmsCacheTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.client.SecurityClient; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.Realms; import org.junit.BeforeClass; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java index 03d0310a136..bf81fd77dc5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.integration; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; +import org.elasticsearch.client.Request; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; @@ -15,9 +16,7 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import java.nio.file.Path; -import java.util.Map; -import static java.util.Collections.singletonMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.is; @@ -132,10 +131,12 @@ public class ClusterPrivilegeTests extends AbstractPrivilegeTestCase { assertAccessIsDenied("user_c", "PUT", "/_snapshot/my-repo", repoJson); assertAccessIsAllowed("user_a", "PUT", "/_snapshot/my-repo", repoJson); - Map params = singletonMap("refresh", "true"); - assertAccessIsDenied("user_a", "PUT", "/someindex/bar/1", "{ \"name\" : \"elasticsearch\" }", params); - assertAccessIsDenied("user_b", "PUT", "/someindex/bar/1", "{ \"name\" : \"elasticsearch\" }", params); - assertAccessIsAllowed("user_c", "PUT", "/someindex/bar/1", "{ \"name\" : \"elasticsearch\" }", params); + Request createBar = new Request("PUT", "/someindex/bar/1"); + createBar.setJsonEntity("{ \"name\" : \"elasticsearch\" }"); + createBar.addParameter("refresh", "true"); + assertAccessIsDenied("user_a", createBar); + assertAccessIsDenied("user_b", createBar); + assertAccessIsAllowed("user_c", createBar); assertAccessIsDenied("user_b", "PUT", "/_snapshot/my-repo/my-snapshot", "{ \"indices\": \"someindex\" }"); assertAccessIsDenied("user_c", "PUT", "/_snapshot/my-repo/my-snapshot", "{ \"indices\": \"someindex\" }"); @@ -152,10 +153,11 @@ public class ClusterPrivilegeTests extends AbstractPrivilegeTestCase { assertAccessIsDenied("user_b", "DELETE", "/someindex"); assertAccessIsAllowed("user_c", "DELETE", "/someindex"); - params = singletonMap("wait_for_completion", "true"); - assertAccessIsDenied("user_b", "POST", "/_snapshot/my-repo/my-snapshot/_restore", null, params); - assertAccessIsDenied("user_c", "POST", "/_snapshot/my-repo/my-snapshot/_restore", null, params); - assertAccessIsAllowed("user_a", "POST", "/_snapshot/my-repo/my-snapshot/_restore", null, params); + Request restoreSnapshotRequest = new Request("POST", "/_snapshot/my-repo/my-snapshot/_restore"); + restoreSnapshotRequest.addParameter("wait_for_completion", "true"); + assertAccessIsDenied("user_b", restoreSnapshotRequest); + assertAccessIsDenied("user_c", restoreSnapshotRequest); + assertAccessIsAllowed("user_a", restoreSnapshotRequest); assertAccessIsDenied("user_a", "GET", "/someindex/bar/1"); assertAccessIsDenied("user_b", "GET", "/someindex/bar/1"); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java index 57262822982..ed82808af76 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java @@ -13,11 +13,8 @@ import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.junit.Before; -import java.util.Collections; import java.util.Locale; -import java.util.Map; -import static java.util.Collections.singletonMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; import static org.hamcrest.Matchers.is; @@ -143,11 +140,12 @@ public class IndexPrivilegeTests extends AbstractPrivilegeTestCase { @Before public void insertBaseDocumentsAsAdmin() throws Exception { // indices: a,b,c,abc - Map params = singletonMap("refresh", "true"); - assertAccessIsAllowed("admin", "PUT", "/a/foo/1", jsonDoc, params); - assertAccessIsAllowed("admin", "PUT", "/b/foo/1", jsonDoc, params); - assertAccessIsAllowed("admin", "PUT", "/c/foo/1", jsonDoc, params); - assertAccessIsAllowed("admin", "PUT", "/abc/foo/1", jsonDoc, params); + for (String index : new String[] {"a", "b", "c", "abc"}) { + Request request = new Request("PUT", "/" + index + "/foo/1"); + request.setJsonEntity(jsonDoc); + request.addParameter("refresh", "true"); + assertAccessIsAllowed("admin", request); + } } private static String randomIndex() { @@ -402,8 +400,6 @@ public class IndexPrivilegeTests extends AbstractPrivilegeTestCase { } private void assertUserExecutes(String user, String action, String index, boolean userIsAllowed) throws Exception { - Map refreshParams = Collections.emptyMap();//singletonMap("refresh", "true"); - switch (action) { case "all" : if (userIsAllowed) { @@ -438,7 +434,7 @@ public class IndexPrivilegeTests extends AbstractPrivilegeTestCase { assertAccessIsAllowed(user, "POST", "/" + index + "/_open"); assertAccessIsAllowed(user, "POST", "/" + index + "/_cache/clear"); // indexing a document to have the mapping available, and wait for green state to make sure index is created - assertAccessIsAllowed("admin", "PUT", "/" + index + "/foo/1", jsonDoc, refreshParams); + assertAccessIsAllowed("admin", "PUT", "/" + index + "/foo/1", jsonDoc); assertNoTimeout(client().admin().cluster().prepareHealth(index).setWaitForGreenStatus().get()); assertAccessIsAllowed(user, "GET", "/" + index + "/_mapping/foo/field/name"); assertAccessIsAllowed(user, "GET", "/" + index + "/_settings"); @@ -535,8 +531,8 @@ public class IndexPrivilegeTests extends AbstractPrivilegeTestCase { case "delete" : String jsonDoc = "{ \"name\" : \"docToDelete\"}"; - assertAccessIsAllowed("admin", "PUT", "/" + index + "/foo/docToDelete", jsonDoc, refreshParams); - assertAccessIsAllowed("admin", "PUT", "/" + index + "/foo/docToDelete2", jsonDoc, refreshParams); + assertAccessIsAllowed("admin", "PUT", "/" + index + "/foo/docToDelete", jsonDoc); + assertAccessIsAllowed("admin", "PUT", "/" + index + "/foo/docToDelete2", jsonDoc); if (userIsAllowed) { assertAccessIsAllowed(user, "DELETE", "/" + index + "/foo/docToDelete"); } else { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java index af5b73d889d..63a38b12a9e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.core.security.user.APMSystemUser; import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; @@ -88,7 +89,7 @@ public abstract class NativeRealmIntegTestCase extends SecurityIntegTestCase { RequestOptions.Builder optionsBuilder = RequestOptions.DEFAULT.toBuilder(); optionsBuilder.addHeader("Authorization", UsernamePasswordToken.basicAuthHeaderValue(ElasticUser.NAME, reservedPassword)); RequestOptions options = optionsBuilder.build(); - for (String username : Arrays.asList(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME)) { + for (String username : Arrays.asList(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME, APMSystemUser.NAME)) { Request request = new Request("PUT", "/_xpack/security/user/" + username + "/_password"); request.setJsonEntity("{\"password\": \"" + new String(reservedPassword.getChars()) + "\"}"); request.setOptions(options); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java index 9bb0e44eb66..7143182c162 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java @@ -7,6 +7,7 @@ package org.elasticsearch.test; import io.netty.util.ThreadDeathWatcher; import io.netty.util.concurrent.GlobalEventExecutor; + import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; @@ -44,7 +45,6 @@ import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.LocalStateSecurity; - import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.AfterClass; import org.junit.Before; @@ -420,14 +420,18 @@ public abstract class SecurityIntegTestCase extends ESIntegTestCase { createIndex(indices); if (frequently()) { + boolean aliasAdded = false; IndicesAliasesRequestBuilder builder = client().admin().indices().prepareAliases(); for (String index : indices) { if (frequently()) { //one alias per index with prefix "alias-" builder.addAlias(index, "alias-" + index); + aliasAdded = true; } } - if (randomBoolean()) { + // If we get to this point and we haven't added an alias to the request we need to add one + // or the request will fail so use noAliasAdded to force adding the alias in this case + if (aliasAdded == false || randomBoolean()) { //one alias pointing to all indices for (String index : indices) { builder.addAlias(index, "alias"); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SettingsFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SettingsFilterTests.java index 1886dd4249b..3bf3bb4dc86 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SettingsFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SettingsFilterTests.java @@ -20,7 +20,9 @@ import org.hamcrest.Matcher; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.TrustManagerFactory; + import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -121,7 +123,7 @@ public class SettingsFilterTests extends ESTestCase { List settingsFilterList = new ArrayList<>(); settingsFilterList.addAll(securityPlugin.getSettingsFilter()); // custom settings, potentially added by a plugin - SettingsModule settingsModule = new SettingsModule(settings, settingList, settingsFilterList); + SettingsModule settingsModule = new SettingsModule(settings, settingList, settingsFilterList, Collections.emptySet()); Injector injector = Guice.createInjector(settingsModule); SettingsFilter settingsFilter = injector.getInstance(SettingsFilter.class); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java index 1d4da71e11b..b659adf22cf 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java @@ -14,7 +14,7 @@ public class FIPS140JKSKeystoreBootstrapCheckTests extends ESTestCase { public void testNoKeystoreIsAllowed() { final Settings.Builder settings = Settings.builder() .put("xpack.security.fips_mode.enabled", "true"); - assertFalse(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertFalse(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testSSLKeystoreTypeIsNotAllowed() { @@ -22,7 +22,7 @@ public class FIPS140JKSKeystoreBootstrapCheckTests extends ESTestCase { .put("xpack.security.fips_mode.enabled", "true") .put("xpack.ssl.keystore.path", "/this/is/the/path") .put("xpack.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testSSLImplicitKeystoreTypeIsNotAllowed() { @@ -30,7 +30,7 @@ public class FIPS140JKSKeystoreBootstrapCheckTests extends ESTestCase { .put("xpack.security.fips_mode.enabled", "true") .put("xpack.ssl.keystore.path", "/this/is/the/path") .put("xpack.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testTransportSSLKeystoreTypeIsNotAllowed() { @@ -38,7 +38,7 @@ public class FIPS140JKSKeystoreBootstrapCheckTests extends ESTestCase { .put("xpack.security.fips_mode.enabled", "true") .put("xpack.security.transport.ssl.keystore.path", "/this/is/the/path") .put("xpack.security.transport.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testHttpSSLKeystoreTypeIsNotAllowed() { @@ -46,7 +46,7 @@ public class FIPS140JKSKeystoreBootstrapCheckTests extends ESTestCase { .put("xpack.security.fips_mode.enabled", "true") .put("xpack.security.http.ssl.keystore.path", "/this/is/the/path") .put("xpack.security.http.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testRealmKeystoreTypeIsNotAllowed() { @@ -54,13 +54,13 @@ public class FIPS140JKSKeystoreBootstrapCheckTests extends ESTestCase { .put("xpack.security.fips_mode.enabled", "true") .put("xpack.security.authc.realms.ldap.ssl.keystore.path", "/this/is/the/path") .put("xpack.security.authc.realms.ldap.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testImplicitRealmKeystoreTypeIsNotAllowed() { final Settings.Builder settings = Settings.builder() .put("xpack.security.fips_mode.enabled", "true") .put("xpack.security.authc.realms.ldap.ssl.keystore.path", "/this/is/the/path"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java index a2ec8f9fb20..fb4c9e21a25 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java @@ -17,27 +17,29 @@ import org.elasticsearch.test.ESTestCase; public class FIPS140LicenseBootstrapCheckTests extends ESTestCase { public void testBootstrapCheck() throws Exception { - assertTrue(new FIPS140LicenseBootstrapCheck(false) - .check(new BootstrapContext(Settings.EMPTY, MetaData.EMPTY_META_DATA)).isSuccess()); - assertTrue(new FIPS140LicenseBootstrapCheck(randomBoolean()) + assertTrue(new FIPS140LicenseBootstrapCheck() .check(new BootstrapContext(Settings.EMPTY, MetaData.EMPTY_META_DATA)).isSuccess()); + assertTrue(new FIPS140LicenseBootstrapCheck() + .check(new BootstrapContext(Settings.builder().put("xpack.security.fips_mode.enabled", randomBoolean()).build(), MetaData + .EMPTY_META_DATA)).isSuccess()); - License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); MetaData.Builder builder = MetaData.builder(); + License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); TestUtils.putLicense(builder, license); MetaData metaData = builder.build(); + if (FIPS140LicenseBootstrapCheck.ALLOWED_LICENSE_OPERATION_MODES.contains(license.operationMode())) { - assertTrue(new FIPS140LicenseBootstrapCheck(true).check(new BootstrapContext( + assertTrue(new FIPS140LicenseBootstrapCheck().check(new BootstrapContext( Settings.builder().put("xpack.security.fips_mode.enabled", true).build(), metaData)).isSuccess()); - assertTrue(new FIPS140LicenseBootstrapCheck(false).check(new BootstrapContext( + assertTrue(new FIPS140LicenseBootstrapCheck().check(new BootstrapContext( Settings.builder().put("xpack.security.fips_mode.enabled", false).build(), metaData)).isSuccess()); } else { - assertTrue(new FIPS140LicenseBootstrapCheck(false).check(new BootstrapContext( + assertTrue(new FIPS140LicenseBootstrapCheck().check(new BootstrapContext( Settings.builder().put("xpack.security.fips_mode.enabled", false).build(), metaData)).isSuccess()); - assertTrue(new FIPS140LicenseBootstrapCheck(true).check(new BootstrapContext( + assertTrue(new FIPS140LicenseBootstrapCheck().check(new BootstrapContext( Settings.builder().put("xpack.security.fips_mode.enabled", true).build(), metaData)).isFailure()); assertEquals("FIPS mode is only allowed with a Platinum or Trial license", - new FIPS140LicenseBootstrapCheck(true).check(new BootstrapContext( + new FIPS140LicenseBootstrapCheck().check(new BootstrapContext( Settings.builder().put("xpack.security.fips_mode.enabled", true).build(), metaData)).getMessage()); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java index 8632400866a..6376ca211dc 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java @@ -25,7 +25,7 @@ public class FIPS140PasswordHashingAlgorithmBootstrapCheckTests extends ESTestCa .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), "PBKDF2_10000") .build(); final BootstrapCheck.BootstrapCheckResult result = - new FIPS140PasswordHashingAlgorithmBootstrapCheck(settings).check(new BootstrapContext(settings, null)); + new FIPS140PasswordHashingAlgorithmBootstrapCheck().check(new BootstrapContext(settings, null)); assertFalse(result.isFailure()); } @@ -35,7 +35,7 @@ public class FIPS140PasswordHashingAlgorithmBootstrapCheckTests extends ESTestCa .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), "PBKDF2") .build(); final BootstrapCheck.BootstrapCheckResult result = - new FIPS140PasswordHashingAlgorithmBootstrapCheck(settings).check(new BootstrapContext(settings, null)); + new FIPS140PasswordHashingAlgorithmBootstrapCheck().check(new BootstrapContext(settings, null)); assertFalse(result.isFailure()); } } @@ -55,7 +55,7 @@ public class FIPS140PasswordHashingAlgorithmBootstrapCheckTests extends ESTestCa } final Settings settings = builder.build(); final BootstrapCheck.BootstrapCheckResult result = - new FIPS140PasswordHashingAlgorithmBootstrapCheck(settings).check(new BootstrapContext(settings, null)); + new FIPS140PasswordHashingAlgorithmBootstrapCheck().check(new BootstrapContext(settings, null)); assertThat(result.isFailure(), equalTo(fipsModeEnabled)); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityContextTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityContextTests.java index 2b9e540f3bf..e3b1cd31246 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityContextTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityContextTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.junit.Before; import java.io.IOException; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java index 1ac5490dc0c..577c7ddb249 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java @@ -33,7 +33,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authc.AuthenticationField; import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.AuthenticationService; import org.elasticsearch.xpack.security.authz.AuthorizationService; import org.junit.Before; @@ -79,7 +79,7 @@ public class SecurityActionFilterTests extends ESTestCase { ClusterState state = mock(ClusterState.class); DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("id1", buildNewFakeTransportAddress(), Version.CURRENT)) - .add(new DiscoveryNode("id2", buildNewFakeTransportAddress(), Version.V_5_4_0)) + .add(new DiscoveryNode("id2", buildNewFakeTransportAddress(), Version.V_6_0_0)) .build(); when(state.nodes()).thenReturn(nodes); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java index 0809276932d..7c951c0014e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.audit.AuditTrailService; import java.util.Collections; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java index f939b175e48..f1363214b07 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.audit.AuditTrailService; import java.util.Collections; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java index 3371b901647..17a45f23893 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java @@ -56,7 +56,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.TokenService; import org.elasticsearch.xpack.security.authc.UserToken; @@ -316,7 +316,7 @@ public class TransportSamlInvalidateSessionActionTests extends SamlTestCase { new RealmRef("native", NativeRealmSettings.TYPE, "node01"), null); final Map metadata = samlRealm.createTokenMetadata(nameId, session); final PlainActionFuture> future = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, future, metadata); + tokenService.createUserToken(authentication, authentication, future, metadata, true); return future.actionGet(); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java index 1ce8b1aff13..291c102f396 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java @@ -46,7 +46,7 @@ import org.elasticsearch.xpack.core.security.action.saml.SamlLogoutResponse; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.TokenService; @@ -222,7 +222,7 @@ public class TransportSamlLogoutActionTests extends SamlTestCase { new SamlNameId(NameID.TRANSIENT, nameId, null, null, null), session); final PlainActionFuture> future = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, future, tokenMetaData); + tokenService.createUserToken(authentication, authentication, future, tokenMetaData, true); final UserToken userToken = future.actionGet().v1(); mockGetTokenFromId(userToken, client); final String tokenString = tokenService.getUserTokenString(userToken); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenActionTests.java new file mode 100644 index 00000000000..a6b92d79f15 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenActionTests.java @@ -0,0 +1,195 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.action.token; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.get.GetAction; +import org.elasticsearch.action.get.GetRequestBuilder; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.get.MultiGetAction; +import org.elasticsearch.action.get.MultiGetItemResponse; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.get.MultiGetRequestBuilder; +import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.index.IndexAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.update.UpdateAction; +import org.elasticsearch.action.update.UpdateRequestBuilder; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenAction; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenRequest; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenResponse; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.authc.AuthenticationService; +import org.elasticsearch.xpack.security.authc.TokenService; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; +import org.junit.After; +import org.junit.Before; + +import java.time.Clock; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportCreateTokenActionTests extends ESTestCase { + + private static final Settings SETTINGS = Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "TokenServiceTests") + .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true).build(); + + private ThreadPool threadPool; + private Client client; + private SecurityIndexManager securityIndex; + private ClusterService clusterService; + private AtomicReference idxReqReference; + private AuthenticationService authenticationService; + + @Before + public void setupClient() { + threadPool = new TestThreadPool(getTestName()); + client = mock(Client.class); + idxReqReference = new AtomicReference<>(); + authenticationService = mock(AuthenticationService.class); + when(client.threadPool()).thenReturn(threadPool); + when(client.settings()).thenReturn(SETTINGS); + doAnswer(invocationOnMock -> { + GetRequestBuilder builder = new GetRequestBuilder(client, GetAction.INSTANCE); + builder.setIndex((String) invocationOnMock.getArguments()[0]) + .setType((String) invocationOnMock.getArguments()[1]) + .setId((String) invocationOnMock.getArguments()[2]); + return builder; + }).when(client).prepareGet(anyString(), anyString(), anyString()); + when(client.prepareMultiGet()).thenReturn(new MultiGetRequestBuilder(client, MultiGetAction.INSTANCE)); + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + MultiGetResponse response = mock(MultiGetResponse.class); + MultiGetItemResponse[] responses = new MultiGetItemResponse[2]; + when(response.getResponses()).thenReturn(responses); + + GetResponse oldGetResponse = mock(GetResponse.class); + when(oldGetResponse.isExists()).thenReturn(false); + responses[0] = new MultiGetItemResponse(oldGetResponse, null); + + GetResponse getResponse = mock(GetResponse.class); + responses[1] = new MultiGetItemResponse(getResponse, null); + when(getResponse.isExists()).thenReturn(false); + listener.onResponse(response); + return Void.TYPE; + }).when(client).multiGet(any(MultiGetRequest.class), any(ActionListener.class)); + when(client.prepareIndex(any(String.class), any(String.class), any(String.class))) + .thenReturn(new IndexRequestBuilder(client, IndexAction.INSTANCE)); + when(client.prepareUpdate(any(String.class), any(String.class), any(String.class))) + .thenReturn(new UpdateRequestBuilder(client, UpdateAction.INSTANCE)); + doAnswer(invocationOnMock -> { + idxReqReference.set((IndexRequest) invocationOnMock.getArguments()[1]); + ActionListener responseActionListener = (ActionListener) invocationOnMock.getArguments()[2]; + responseActionListener.onResponse(new IndexResponse()); + return null; + }).when(client).execute(eq(IndexAction.INSTANCE), any(IndexRequest.class), any(ActionListener.class)); + + // setup lifecycle service + securityIndex = mock(SecurityIndexManager.class); + doAnswer(invocationOnMock -> { + Runnable runnable = (Runnable) invocationOnMock.getArguments()[1]; + runnable.run(); + return null; + }).when(securityIndex).prepareIndexIfNeededThenExecute(any(Consumer.class), any(Runnable.class)); + + doAnswer(invocationOnMock -> { + UsernamePasswordToken token = (UsernamePasswordToken) invocationOnMock.getArguments()[2]; + User user = new User(token.principal()); + Authentication authentication = new Authentication(user, new Authentication.RealmRef("fake", "mock", "n1"), null); + authentication.writeToContext(threadPool.getThreadContext()); + ActionListener authListener = (ActionListener) invocationOnMock.getArguments()[3]; + authListener.onResponse(authentication); + return Void.TYPE; + }).when(authenticationService).authenticate(eq(CreateTokenAction.NAME), any(CreateTokenRequest.class), + any(UsernamePasswordToken.class), any(ActionListener.class)); + + this.clusterService = ClusterServiceUtils.createClusterService(threadPool); + } + + @After + public void stopThreadPool() throws Exception { + if (threadPool != null) { + terminate(threadPool); + } + } + + public void testClientCredentialsCreatesWithoutRefreshToken() throws Exception { + final TokenService tokenService = new TokenService(SETTINGS, Clock.systemUTC(), client, securityIndex, clusterService); + Authentication authentication = new Authentication(new User("joe"), new Authentication.RealmRef("realm", "type", "node"), null); + authentication.writeToContext(threadPool.getThreadContext()); + + final TransportCreateTokenAction action = new TransportCreateTokenAction(SETTINGS, threadPool, + mock(TransportService.class), new ActionFilters(Collections.emptySet()), tokenService, + authenticationService); + final CreateTokenRequest createTokenRequest = new CreateTokenRequest(); + createTokenRequest.setGrantType("client_credentials"); + + PlainActionFuture tokenResponseFuture = new PlainActionFuture<>(); + action.doExecute(null, createTokenRequest, tokenResponseFuture); + CreateTokenResponse createTokenResponse = tokenResponseFuture.get(); + assertNull(createTokenResponse.getRefreshToken()); + assertNotNull(createTokenResponse.getTokenString()); + + assertNotNull(idxReqReference.get()); + Map sourceMap = idxReqReference.get().sourceAsMap(); + assertNotNull(sourceMap); + assertNotNull(sourceMap.get("access_token")); + assertNull(sourceMap.get("refresh_token")); + } + + public void testPasswordGrantTypeCreatesWithRefreshToken() throws Exception { + final TokenService tokenService = new TokenService(SETTINGS, Clock.systemUTC(), client, securityIndex, clusterService); + Authentication authentication = new Authentication(new User("joe"), new Authentication.RealmRef("realm", "type", "node"), null); + authentication.writeToContext(threadPool.getThreadContext()); + + final TransportCreateTokenAction action = new TransportCreateTokenAction(SETTINGS, threadPool, + mock(TransportService.class), new ActionFilters(Collections.emptySet()), tokenService, + authenticationService); + final CreateTokenRequest createTokenRequest = new CreateTokenRequest(); + createTokenRequest.setGrantType("password"); + createTokenRequest.setUsername("user"); + createTokenRequest.setPassword(new SecureString("password".toCharArray())); + + PlainActionFuture tokenResponseFuture = new PlainActionFuture<>(); + action.doExecute(null, createTokenRequest, tokenResponseFuture); + CreateTokenResponse createTokenResponse = tokenResponseFuture.get(); + assertNotNull(createTokenResponse.getRefreshToken()); + assertNotNull(createTokenResponse.getTokenString()); + + assertNotNull(idxReqReference.get()); + Map sourceMap = idxReqReference.get().sourceAsMap(); + assertNotNull(sourceMap); + assertNotNull(sourceMap.get("access_token")); + assertNotNull(sourceMap.get("refresh_token")); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/PutUserRequestTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/PutUserRequestTests.java index 952448db486..e4ebe3da93a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/PutUserRequestTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/PutUserRequestTests.java @@ -3,6 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.security.action.user; import org.elasticsearch.action.ActionRequestValidationException; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java index 7862097d000..a8e24648058 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import java.util.Collections; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java index 410c164ffe7..aabaa40381f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; import org.mockito.invocation.InvocationOnMock; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserActionTests.java index 0c1ddbd9ba7..4e6e0b3551b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserActionTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; import org.mockito.invocation.InvocationOnMock; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java index ebdb1455591..33cec720608 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.xpack.core.security.action.user.GetUsersRequest; import org.elasticsearch.xpack.core.security.action.user.GetUsersResponse; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; @@ -169,8 +169,8 @@ public class TransportGetUsersActionTests extends ESTestCase { final int size = randomIntBetween(1, allReservedUsers.size()); final List reservedUsers = randomSubsetOf(size, allReservedUsers); final List names = reservedUsers.stream().map(User::principal).collect(Collectors.toList()); - TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ActionFilters.class), usersStore, transportService, reservedRealm); @@ -209,8 +209,8 @@ public class TransportGetUsersActionTests extends ESTestCase { ReservedRealmTests.mockGetAllReservedUserInfo(usersStore, Collections.emptyMap()); ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), settings, usersStore, new AnonymousUser(settings), securityIndex, threadPool); - TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ActionFilters.class), usersStore, transportService, reservedRealm); @@ -256,8 +256,8 @@ public class TransportGetUsersActionTests extends ESTestCase { randomFrom(Collections.singletonList(new User("joe")), Arrays.asList(new User("jane"), new User("fred")), randomUsers()); final String[] storeUsernames = storeUsers.stream().map(User::principal).collect(Collectors.toList()).toArray(Strings.EMPTY_ARRAY); NativeUsersStore usersStore = mock(NativeUsersStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ActionFilters.class), usersStore, transportService, mock(ReservedRealm.class)); @@ -304,8 +304,8 @@ public class TransportGetUsersActionTests extends ESTestCase { randomFrom(Collections.singletonList(new User("joe")), Arrays.asList(new User("jane"), new User("fred")), randomUsers()); final String[] storeUsernames = storeUsers.stream().map(User::principal).collect(Collectors.toList()).toArray(Strings.EMPTY_ARRAY); NativeUsersStore usersStore = mock(NativeUsersStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ActionFilters.class), usersStore, transportService, mock(ReservedRealm.class)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesActionTests.java index d7795d3ab91..a2e283e1b36 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesActionTests.java @@ -34,7 +34,7 @@ import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivileg import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authz.AuthorizationService; import org.elasticsearch.xpack.security.authz.store.NativePrivilegeStore; import org.hamcrest.Matchers; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java index 36ba3f46b5e..b6037932f8a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java @@ -25,7 +25,7 @@ import org.elasticsearch.xpack.core.security.action.user.PutUserResponse; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledActionTests.java index 4ca7ab97f73..d811b6359b1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledActionTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; import org.mockito.invocation.InvocationOnMock; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java index ba9a67eb48f..b346fc6857e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.transport.TransportMessage; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.transport.filter.IPFilter; import org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule; import org.junit.Before; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailMutedTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailMutedTests.java index 923c918f011..9bc5c989d1f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailMutedTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailMutedTests.java @@ -26,7 +26,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail.State; import org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule; import org.junit.After; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java index dcb8d8b7569..cb1b69708bd 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java @@ -55,7 +55,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.LocalStateSecurity; import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail.Message; import org.elasticsearch.xpack.security.support.SecurityIndexManager; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailFilterTests.java index a3a9d05704f..4c9df8fd9d3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailFilterTests.java @@ -28,7 +28,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail.AuditEventMetaInfo; import org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrailTests.MockMessage; import org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrailTests.RestContent; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java index c8e14e668c9..1059e22abd6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java @@ -35,7 +35,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.rest.RemoteHostHeader; import org.elasticsearch.xpack.security.transport.filter.IPFilter; import org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index 4a40e0d543b..1640ab727fe 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -64,7 +64,7 @@ import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.audit.AuditTrailService; import org.elasticsearch.xpack.security.authc.AuthenticationService.Authenticator; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; @@ -896,7 +896,7 @@ public class AuthenticationServiceTests extends ESTestCase { PlainActionFuture> tokenFuture = new PlainActionFuture<>(); try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { Authentication originatingAuth = new Authentication(new User("creator"), new RealmRef("test", "test", "test"), null); - tokenService.createUserToken(expected, originatingAuth, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(expected, originatingAuth, tokenFuture, Collections.emptyMap(), true); } String token = tokenService.getUserTokenString(tokenFuture.get().v1()); mockGetTokenFromId(tokenFuture.get().v1(), client); @@ -975,7 +975,7 @@ public class AuthenticationServiceTests extends ESTestCase { PlainActionFuture> tokenFuture = new PlainActionFuture<>(); try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { Authentication originatingAuth = new Authentication(new User("creator"), new RealmRef("test", "test", "test"), null); - tokenService.createUserToken(expected, originatingAuth, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(expected, originatingAuth, tokenFuture, Collections.emptyMap(), true); } String token = tokenService.getUserTokenString(tokenFuture.get().v1()); mockGetTokenFromId(tokenFuture.get().v1(), client); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java index 1da7d68c91c..9d795826298 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; import org.junit.Before; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java index ec4a97b7f39..e6cc2dcccdf 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java @@ -341,6 +341,39 @@ public class TokenAuthIntegTests extends SecurityIntegTestCase { assertEquals(SecuritySettingsSource.TEST_USER_NAME, response.user().principal()); } + public void testClientCredentialsGrant() throws Exception { + Client client = client().filterWithHeader(Collections.singletonMap("Authorization", + UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); + SecurityClient securityClient = new SecurityClient(client); + CreateTokenResponse createTokenResponse = securityClient.prepareCreateToken() + .setGrantType("client_credentials") + .get(); + assertNull(createTokenResponse.getRefreshToken()); + + AuthenticateRequest request = new AuthenticateRequest(); + request.username(SecuritySettingsSource.TEST_SUPERUSER); + PlainActionFuture authFuture = new PlainActionFuture<>(); + client.filterWithHeader(Collections.singletonMap("Authorization", "Bearer " + createTokenResponse.getTokenString())) + .execute(AuthenticateAction.INSTANCE, request, authFuture); + AuthenticateResponse response = authFuture.get(); + assertEquals(SecuritySettingsSource.TEST_SUPERUSER, response.user().principal()); + + // invalidate + PlainActionFuture invalidateResponseFuture = new PlainActionFuture<>(); + InvalidateTokenRequest invalidateTokenRequest = + new InvalidateTokenRequest(createTokenResponse.getTokenString(), InvalidateTokenRequest.Type.ACCESS_TOKEN); + securityClient.invalidateToken(invalidateTokenRequest, invalidateResponseFuture); + assertTrue(invalidateResponseFuture.get().isCreated()); + + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> { + PlainActionFuture responseFuture = new PlainActionFuture<>(); + client.filterWithHeader(Collections.singletonMap("Authorization", "Bearer " + createTokenResponse.getTokenString())) + .execute(AuthenticateAction.INSTANCE, request, responseFuture); + responseFuture.actionGet(); + }); + } + @Before public void waitForSecurityIndexWritable() throws Exception { assertSecurityIndexActive(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index c529ea8747b..b92b4cad39a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -48,7 +48,7 @@ import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authc.TokenMetaData; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.watcher.watch.ClockMock; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.AfterClass; @@ -157,7 +157,7 @@ public class TokenServiceTests extends ESTestCase { TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); final UserToken token = tokenFuture.get().v1(); assertNotNull(token); mockGetTokenFromId(token); @@ -203,7 +203,7 @@ public class TokenServiceTests extends ESTestCase { TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); final UserToken token = tokenFuture.get().v1(); assertNotNull(token); mockGetTokenFromId(token); @@ -227,7 +227,7 @@ public class TokenServiceTests extends ESTestCase { } PlainActionFuture> newTokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, newTokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, newTokenFuture, Collections.emptyMap(), true); final UserToken newToken = newTokenFuture.get().v1(); assertNotNull(newToken); assertNotEquals(tokenService.getUserTokenString(newToken), tokenService.getUserTokenString(token)); @@ -262,7 +262,7 @@ public class TokenServiceTests extends ESTestCase { otherTokenService.refreshMetaData(tokenService.getTokenMetaData()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); final UserToken token = tokenFuture.get().v1(); assertNotNull(token); mockGetTokenFromId(token); @@ -292,7 +292,7 @@ public class TokenServiceTests extends ESTestCase { TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); final UserToken token = tokenFuture.get().v1(); assertNotNull(token); mockGetTokenFromId(token); @@ -322,7 +322,7 @@ public class TokenServiceTests extends ESTestCase { } PlainActionFuture> newTokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, newTokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, newTokenFuture, Collections.emptyMap(), true); final UserToken newToken = newTokenFuture.get().v1(); assertNotNull(newToken); assertNotEquals(tokenService.getUserTokenString(newToken), tokenService.getUserTokenString(token)); @@ -353,7 +353,7 @@ public class TokenServiceTests extends ESTestCase { TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); final UserToken token = tokenFuture.get().v1(); assertNotNull(token); mockGetTokenFromId(token); @@ -383,7 +383,7 @@ public class TokenServiceTests extends ESTestCase { Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); UserToken token = tokenFuture.get().v1(); assertThat(tokenService.getUserTokenString(token), notNullValue()); @@ -397,7 +397,7 @@ public class TokenServiceTests extends ESTestCase { new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); final UserToken token = tokenFuture.get().v1(); assertNotNull(token); doAnswer(invocationOnMock -> { @@ -451,7 +451,7 @@ public class TokenServiceTests extends ESTestCase { TokenService tokenService = new TokenService(tokenServiceEnabledSettings, clock, client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); final UserToken token = tokenFuture.get().v1(); mockGetTokenFromId(token); @@ -501,7 +501,8 @@ public class TokenServiceTests extends ESTestCase { .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), false) .build(), Clock.systemUTC(), client, securityIndex, clusterService); - IllegalStateException e = expectThrows(IllegalStateException.class, () -> tokenService.createUserToken(null, null, null, null)); + IllegalStateException e = expectThrows(IllegalStateException.class, + () -> tokenService.createUserToken(null, null, null, null, true)); assertEquals("tokens are not enabled", e.getMessage()); PlainActionFuture future = new PlainActionFuture<>(); @@ -559,7 +560,7 @@ public class TokenServiceTests extends ESTestCase { new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); final UserToken token = tokenFuture.get().v1(); assertNotNull(token); mockGetTokenFromId(token); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/UserTokenTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/UserTokenTests.java index c79d77718ca..1a8f8dc3b5d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/UserTokenTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/UserTokenTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.io.IOException; import java.time.Clock; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java index 6ebf6dca2cf..c2a7ea495a1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java @@ -44,7 +44,7 @@ import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; import org.junit.Before; import org.junit.BeforeClass; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java index c7a7c4f07bb..f280e85f4ab 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java @@ -26,11 +26,12 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.user.APMSystemUser; import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.Before; @@ -81,7 +82,8 @@ public class NativeUsersStoreTests extends ESTestCase { public void testPasswordUpsertWhenSetEnabledOnReservedUser() throws Exception { final NativeUsersStore nativeUsersStore = startNativeUsersStore(); - final String user = randomFrom(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME); + final String user = randomFrom(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, + BeatsSystemUser.NAME, APMSystemUser.NAME); final PlainActionFuture future = new PlainActionFuture<>(); nativeUsersStore.setEnabled(user, true, WriteRequest.RefreshPolicy.IMMEDIATE, future); @@ -99,7 +101,8 @@ public class NativeUsersStoreTests extends ESTestCase { public void testBlankPasswordInIndexImpliesDefaultPassword() throws Exception { final NativeUsersStore nativeUsersStore = startNativeUsersStore(); - final String user = randomFrom(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME); + final String user = randomFrom(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, + BeatsSystemUser.NAME, APMSystemUser.NAME); final Map values = new HashMap<>(); values.put(ENABLED_FIELD, Boolean.TRUE); values.put(PASSWORD_FIELD, BLANK_PASSWORD); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java index 1824597a6ad..8f7116dd971 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.test.NativeRealmIntegTestCase; import org.elasticsearch.xpack.core.security.action.user.ChangePasswordResponse; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.core.security.user.APMSystemUser; import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; @@ -20,6 +21,7 @@ import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; import org.junit.BeforeClass; import java.util.Arrays; +import java.util.List; import static java.util.Collections.singletonMap; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; @@ -49,7 +51,9 @@ public class ReservedRealmIntegTests extends NativeRealmIntegTestCase { } public void testAuthenticate() { - for (String username : Arrays.asList(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME)) { + final List usernames = Arrays.asList(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, + BeatsSystemUser.NAME, APMSystemUser.NAME); + for (String username : usernames) { ClusterHealthResponse response = client() .filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(username, getReservedPassword()))) .admin() @@ -67,7 +71,9 @@ public class ReservedRealmIntegTests extends NativeRealmIntegTestCase { */ public void testAuthenticateAfterEnablingUser() { final SecurityClient c = securityClient(); - for (String username : Arrays.asList(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME)) { + final List usernames = Arrays.asList(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, + BeatsSystemUser.NAME, APMSystemUser.NAME); + for (String username : usernames) { c.prepareSetEnabled(username, true).get(); ClusterHealthResponse response = client() .filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(username, getReservedPassword()))) @@ -81,7 +87,8 @@ public class ReservedRealmIntegTests extends NativeRealmIntegTestCase { } public void testChangingPassword() { - String username = randomFrom(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME); + String username = randomFrom(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, + BeatsSystemUser.NAME, APMSystemUser.NAME); final char[] newPassword = "supersecretvalue".toCharArray(); if (randomBoolean()) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java index 04e0afcf882..36d1690b8b2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java @@ -21,12 +21,13 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.esnative.ClientReservedRealm; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.user.APMSystemUser; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.UsernamesField; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore.ReservedUserInfo; import org.elasticsearch.xpack.security.support.SecurityIndexManager; @@ -262,7 +263,8 @@ public class ReservedRealmTests extends ESTestCase { PlainActionFuture> userFuture = new PlainActionFuture<>(); reservedRealm.users(userFuture); assertThat(userFuture.actionGet(), - containsInAnyOrder(new ElasticUser(true), new KibanaUser(true), new LogstashSystemUser(true), new BeatsSystemUser(true))); + containsInAnyOrder(new ElasticUser(true), new KibanaUser(true), new LogstashSystemUser(true), + new BeatsSystemUser(true), new APMSystemUser((true)))); } public void testGetUsersDisabled() { @@ -394,7 +396,7 @@ public class ReservedRealmTests extends ESTestCase { new AnonymousUser(Settings.EMPTY), securityIndex, threadPool); PlainActionFuture listener = new PlainActionFuture<>(); - final String principal = randomFrom(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME); + final String principal = randomFrom(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME, APMSystemUser.NAME); doAnswer((i) -> { ActionListener callback = (ActionListener) i.getArguments()[1]; callback.onResponse(null); @@ -416,14 +418,15 @@ public class ReservedRealmTests extends ESTestCase { new AnonymousUser(Settings.EMPTY), securityIndex, threadPool); PlainActionFuture listener = new PlainActionFuture<>(); - final String principal = randomFrom(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME); + final String principal = randomFrom(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME, APMSystemUser.NAME); reservedRealm.doAuthenticate(new UsernamePasswordToken(principal, mockSecureSettings.getString("bootstrap.password")), listener); final AuthenticationResult result = listener.get(); assertThat(result.getStatus(), is(AuthenticationResult.Status.TERMINATE)); } private User randomReservedUser(boolean enabled) { - return randomFrom(new ElasticUser(enabled), new KibanaUser(enabled), new LogstashSystemUser(enabled), new BeatsSystemUser(enabled)); + return randomFrom(new ElasticUser(enabled), new KibanaUser(enabled), new LogstashSystemUser(enabled), + new BeatsSystemUser(enabled), new APMSystemUser(enabled)); } /* @@ -444,23 +447,19 @@ public class ReservedRealmTests extends ESTestCase { } private void verifyVersionPredicate(String principal, Predicate versionPredicate) { - assertThat(versionPredicate.test(Version.V_5_0_0_rc1), is(false)); switch (principal) { case LogstashSystemUser.NAME: - assertThat(versionPredicate.test(Version.V_5_0_0), is(false)); - assertThat(versionPredicate.test(Version.V_5_1_1), is(false)); - assertThat(versionPredicate.test(Version.V_5_2_0), is(true)); assertThat(versionPredicate.test(Version.V_6_3_0), is(true)); break; case BeatsSystemUser.NAME: - assertThat(versionPredicate.test(Version.V_5_6_9), is(false)); assertThat(versionPredicate.test(Version.V_6_2_3), is(false)); assertThat(versionPredicate.test(Version.V_6_3_0), is(true)); break; + case APMSystemUser.NAME: + assertThat(versionPredicate.test(Version.V_6_4_0), is(false)); + assertThat(versionPredicate.test(Version.V_6_5_0), is(true)); + break; default: - assertThat(versionPredicate.test(Version.V_5_0_0), is(true)); - assertThat(versionPredicate.test(Version.V_5_1_1), is(true)); - assertThat(versionPredicate.test(Version.V_5_2_0), is(true)); assertThat(versionPredicate.test(Version.V_6_3_0), is(true)); break; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java index aec2a4eb820..1310980fc5f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.junit.Before; import org.mockito.stubbing.Answer; @@ -86,7 +86,8 @@ public class FileRealmTests extends ESTestCase { public void testAuthenticateCaching() throws Exception { Settings settings = Settings.builder() - .put("cache.hash_algo", Hasher.values()[randomIntBetween(0, Hasher.values().length - 1)].name().toLowerCase(Locale.ROOT)).build(); + .put("cache.hash_algo", Hasher.values()[randomIntBetween(0, Hasher.values().length - 1)].name().toLowerCase(Locale.ROOT)) + .build(); RealmConfig config = new RealmConfig("file-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), threadContext); when(userPasswdStore.verifyPassword(eq("user1"), eq(new SecureString("test123")), any(Supplier.class))) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStoreTests.java index ee89c2efe4f..739952af63e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStoreTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.xpack.core.security.audit.logfile.CapturingLogger; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.support.Hasher; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.junit.After; import org.junit.Before; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmAuthenticateFailedTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmAuthenticateFailedTests.java index 11aefb758fb..7c5904d048a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmAuthenticateFailedTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmAuthenticateFailedTests.java @@ -11,13 +11,20 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.security.authc.support.MockLookupRealm; +import org.elasticsearch.xpack.core.security.user.User; import org.ietf.jgss.GSSException; +import java.nio.charset.StandardCharsets; import java.nio.file.Path; +import java.util.Collections; import java.util.List; import javax.security.auth.login.LoginException; @@ -29,7 +36,9 @@ import static org.hamcrest.Matchers.notNullValue; import static org.mockito.AdditionalMatchers.aryEq; import static org.mockito.Matchers.any; import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; public class KerberosRealmAuthenticateFailedTests extends KerberosRealmTestCase { @@ -105,4 +114,30 @@ public class KerberosRealmAuthenticateFailedTests extends KerberosRealmTestCase any(ActionListener.class)); } } + + public void testDelegatedAuthorizationFailedToResolve() throws Exception { + final String username = randomPrincipalName(); + final MockLookupRealm otherRealm = new MockLookupRealm(new RealmConfig("other_realm", Settings.EMPTY, globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings))); + final User lookupUser = new User(randomAlphaOfLength(5)); + otherRealm.registerUser(lookupUser); + + settings = Settings.builder().put(settings).putList("authorization_realms", "other_realm").build(); + final KerberosRealm kerberosRealm = createKerberosRealm(Collections.singletonList(otherRealm), username); + final byte[] decodedTicket = "base64encodedticket".getBytes(StandardCharsets.UTF_8); + final Path keytabPath = config.env().configFile().resolve(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH.get(config.settings())); + final boolean krbDebug = KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE.get(config.settings()); + mockKerberosTicketValidator(decodedTicket, keytabPath, krbDebug, new Tuple<>(username, "out-token"), null); + final KerberosAuthenticationToken kerberosAuthenticationToken = new KerberosAuthenticationToken(decodedTicket); + + final PlainActionFuture future = new PlainActionFuture<>(); + kerberosRealm.authenticate(kerberosAuthenticationToken, future); + + AuthenticationResult result = future.actionGet(); + assertThat(result.getStatus(), is(equalTo(AuthenticationResult.Status.CONTINUE))); + verify(mockKerberosTicketValidator, times(1)).validateTicket(aryEq(decodedTicket), eq(keytabPath), eq(krbDebug), + any(ActionListener.class)); + verify(mockNativeRoleMappingStore).refreshRealmOnChange(kerberosRealm); + verifyNoMoreInteractions(mockKerberosTicketValidator, mockNativeRoleMappingStore); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java index 7ce8ee39c03..69ebe15c5d7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper.UserData; import org.ietf.jgss.GSSException; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java index 69b246cd7ca..dd83da49a0b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java @@ -13,15 +13,17 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; import org.elasticsearch.xpack.core.security.support.Exceptions; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import org.elasticsearch.xpack.security.support.SecurityIndexManager; @@ -30,6 +32,7 @@ import org.junit.Before; import java.nio.file.Path; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; @@ -58,6 +61,7 @@ public abstract class KerberosRealmTestCase extends ESTestCase { protected KerberosTicketValidator mockKerberosTicketValidator; protected NativeRoleMappingStore mockNativeRoleMappingStore; + protected XPackLicenseState licenseState; protected static final Set roles = Sets.newHashSet("admin", "kibana_user"); @@ -69,6 +73,8 @@ public abstract class KerberosRealmTestCase extends ESTestCase { globalSettings = Settings.builder().put("path.home", dir).build(); settings = KerberosTestCase.buildKerberosRealmSettings(KerberosTestCase.writeKeyTab(dir.resolve("key.keytab"), "asa").toString(), 100, "10m", true, randomBoolean()); + licenseState = mock(XPackLicenseState.class); + when(licenseState.isAuthorizationRealmAllowed()).thenReturn(true); } @After @@ -102,12 +108,18 @@ public abstract class KerberosRealmTestCase extends ESTestCase { } protected KerberosRealm createKerberosRealm(final String... userForRoleMapping) { + return createKerberosRealm(Collections.emptyList(), userForRoleMapping); + } + + protected KerberosRealm createKerberosRealm(final List delegatedRealms, final String... userForRoleMapping) { config = new RealmConfig("test-kerb-realm", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); mockNativeRoleMappingStore = roleMappingStore(Arrays.asList(userForRoleMapping)); mockKerberosTicketValidator = mock(KerberosTicketValidator.class); final KerberosRealm kerberosRealm = new KerberosRealm(config, mockNativeRoleMappingStore, mockKerberosTicketValidator, threadPool, null); + Collections.shuffle(delegatedRealms, random()); + kerberosRealm.initialize(delegatedRealms, licenseState); return kerberosRealm; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java index 80e61a5545f..d35068fd07a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java @@ -6,35 +6,38 @@ package org.elasticsearch.xpack.security.authc.kerberos; +import org.apache.lucene.util.Constants; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.security.authc.support.MockLookupRealm; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper.UserData; import org.ietf.jgss.GSSException; import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.SeekableByteChannel; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.attribute.AclEntry; -import java.nio.file.attribute.AclEntryPermission; -import java.nio.file.attribute.AclEntryType; -import java.nio.file.attribute.AclFileAttributeView; -import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.StandardOpenOption; +import java.nio.file.attribute.FileAttribute; +import java.nio.file.attribute.PosixFilePermission; import java.nio.file.attribute.PosixFilePermissions; -import java.nio.file.attribute.UserPrincipal; import java.util.Arrays; -import java.util.List; +import java.util.Collections; +import java.util.EnumSet; import java.util.Locale; import java.util.Set; @@ -47,6 +50,7 @@ import static org.mockito.AdditionalMatchers.aryEq; import static org.mockito.Matchers.any; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; @@ -110,52 +114,47 @@ public class KerberosRealmTests extends KerberosRealmTestCase { assertThat(future.actionGet(), is(nullValue())); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32768") - public void testKerberosRealmWithInvalidKeytabPathConfigurations() throws IOException { - final String keytabPathCase = randomFrom("keytabPathAsDirectory", "keytabFileDoesNotExist", "keytabPathWithNoReadPermissions"); - final String expectedErrorMessage; - final String keytabPath; - switch (keytabPathCase) { - case "keytabPathAsDirectory": - final String dirName = randomAlphaOfLength(5); - Files.createDirectory(dir.resolve(dirName)); - keytabPath = dir.resolve(dirName).toString(); - expectedErrorMessage = "configured service key tab file [" + keytabPath + "] is a directory"; - break; - case "keytabFileDoesNotExist": - keytabPath = dir.resolve(randomAlphaOfLength(5) + ".keytab").toString(); - expectedErrorMessage = "configured service key tab file [" + keytabPath + "] does not exist"; - break; - case "keytabPathWithNoReadPermissions": - final String fileName = randomAlphaOfLength(5); - final Path keytabFilePath = Files.createTempFile(dir, fileName, ".keytab"); - Files.write(keytabFilePath, randomAlphaOfLength(5).getBytes(StandardCharsets.UTF_8)); - final Set supportedAttributes = keytabFilePath.getFileSystem().supportedFileAttributeViews(); - if (supportedAttributes.contains("posix")) { - final PosixFileAttributeView fileAttributeView = Files.getFileAttributeView(keytabFilePath, PosixFileAttributeView.class); - fileAttributeView.setPermissions(PosixFilePermissions.fromString("---------")); - } else if (supportedAttributes.contains("acl")) { - final UserPrincipal principal = Files.getOwner(keytabFilePath); - final AclFileAttributeView view = Files.getFileAttributeView(keytabFilePath, AclFileAttributeView.class); - final AclEntry entry = AclEntry.newBuilder() - .setType(AclEntryType.DENY) - .setPrincipal(principal) - .setPermissions(AclEntryPermission.READ_DATA, AclEntryPermission.READ_ATTRIBUTES).build(); - final List acl = view.getAcl(); - acl.add(0, entry); - view.setAcl(acl); - } else { - throw new UnsupportedOperationException( - String.format(Locale.ROOT, "Don't know how to make file [%s] non-readable on a file system with attributes [%s]", - keytabFilePath, supportedAttributes)); - } - keytabPath = keytabFilePath.toString(); - expectedErrorMessage = "configured service key tab file [" + keytabPath + "] must have read permission"; - break; - default: - throw new IllegalArgumentException("Unknown test case :" + keytabPathCase); - } + public void testKerberosRealmThrowsErrorWhenKeytabPathIsConfiguredAsDirectory() throws IOException { + final String dirName = randomAlphaOfLength(5); + Files.createDirectory(dir.resolve(dirName)); + final String keytabPath = dir.resolve(dirName).toString(); + final String expectedErrorMessage = "configured service key tab file [" + keytabPath + "] is a directory"; + assertKerberosRealmConstructorFails(keytabPath, expectedErrorMessage); + } + + public void testKerberosRealmThrowsErrorWhenKeytabFileDoesNotExist() throws IOException { + final String keytabPath = dir.resolve(randomAlphaOfLength(5) + ".keytab").toString(); + final String expectedErrorMessage = "configured service key tab file [" + keytabPath + "] does not exist"; + + assertKerberosRealmConstructorFails(keytabPath, expectedErrorMessage); + } + + public void testKerberosRealmThrowsErrorWhenKeytabFileHasNoReadPermissions() throws IOException { + assumeFalse("Not running this test on Windows, as it requires additional access permissions for test framework.", + Constants.WINDOWS); + final Set supportedAttributes = dir.getFileSystem().supportedFileAttributeViews(); + final String keytabFileName = randomAlphaOfLength(5) + ".keytab"; + final Path keytabPath; + if (supportedAttributes.contains("posix")) { + final Set filePerms = PosixFilePermissions.fromString("---------"); + final FileAttribute> fileAttributes = PosixFilePermissions.asFileAttribute(filePerms); + try (SeekableByteChannel byteChannel = Files.newByteChannel(dir.resolve(keytabFileName), + EnumSet.of(StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE), fileAttributes)) { + byteChannel.write(ByteBuffer.wrap(randomByteArrayOfLength(10))); + } + keytabPath = dir.resolve(keytabFileName); + } else { + throw new UnsupportedOperationException( + String.format(Locale.ROOT, "Don't know how to make file [%s] non-readable on a file system with attributes [%s]", + dir.resolve(keytabFileName), supportedAttributes)); + } + final String expectedErrorMessage = "configured service key tab file [" + keytabPath + "] must have read permission"; + + assertKerberosRealmConstructorFails(keytabPath.toString(), expectedErrorMessage); + } + + private void assertKerberosRealmConstructorFails(final String keytabPath, final String expectedErrorMessage) { settings = KerberosTestCase.buildKerberosRealmSettings(keytabPath, 100, "10m", true, randomBoolean()); config = new RealmConfig("test-kerb-realm", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); @@ -165,4 +164,38 @@ public class KerberosRealmTests extends KerberosRealmTestCase { () -> new KerberosRealm(config, mockNativeRoleMappingStore, mockKerberosTicketValidator, threadPool, null)); assertThat(iae.getMessage(), is(equalTo(expectedErrorMessage))); } + + public void testDelegatedAuthorization() throws Exception { + final String username = randomPrincipalName(); + final String expectedUsername = maybeRemoveRealmName(username); + final MockLookupRealm otherRealm = spy(new MockLookupRealm(new RealmConfig("other_realm", Settings.EMPTY, globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)))); + final User lookupUser = new User(expectedUsername, new String[] { "admin-role" }, expectedUsername, + expectedUsername + "@example.com", Collections.singletonMap("k1", "v1"), true); + otherRealm.registerUser(lookupUser); + + settings = Settings.builder().put(settings).putList("authorization_realms", "other_realm").build(); + final KerberosRealm kerberosRealm = createKerberosRealm(Collections.singletonList(otherRealm), username); + final User expectedUser = lookupUser; + final byte[] decodedTicket = "base64encodedticket".getBytes(StandardCharsets.UTF_8); + final Path keytabPath = config.env().configFile().resolve(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH.get(config.settings())); + final boolean krbDebug = KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE.get(config.settings()); + mockKerberosTicketValidator(decodedTicket, keytabPath, krbDebug, new Tuple<>(username, "out-token"), null); + final KerberosAuthenticationToken kerberosAuthenticationToken = new KerberosAuthenticationToken(decodedTicket); + + PlainActionFuture future = new PlainActionFuture<>(); + kerberosRealm.authenticate(kerberosAuthenticationToken, future); + assertSuccessAuthenticationResult(expectedUser, "out-token", future.actionGet()); + + future = new PlainActionFuture<>(); + kerberosRealm.authenticate(kerberosAuthenticationToken, future); + assertSuccessAuthenticationResult(expectedUser, "out-token", future.actionGet()); + + verify(mockKerberosTicketValidator, times(2)).validateTicket(aryEq(decodedTicket), eq(keytabPath), eq(krbDebug), + any(ActionListener.class)); + verify(mockNativeRoleMappingStore).refreshRealmOnChange(kerberosRealm); + verifyNoMoreInteractions(mockKerberosTicketValidator, mockNativeRoleMappingStore); + verify(otherRealm, times(2)).lookupUser(eq(expectedUsername), any(ActionListener.class)); + } } + diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java index 0f88148a9a9..f97afc1d52c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java @@ -72,6 +72,15 @@ public abstract class KerberosTestCase extends ESTestCase { unsupportedLocaleLanguages.add("ks"); unsupportedLocaleLanguages.add("ckb"); unsupportedLocaleLanguages.add("ne"); + unsupportedLocaleLanguages.add("dz"); + unsupportedLocaleLanguages.add("mzn"); + unsupportedLocaleLanguages.add("mr"); + unsupportedLocaleLanguages.add("as"); + unsupportedLocaleLanguages.add("bn"); + unsupportedLocaleLanguages.add("lrc"); + unsupportedLocaleLanguages.add("my"); + unsupportedLocaleLanguages.add("ps"); + unsupportedLocaleLanguages.add("ur"); } @BeforeClass diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java index a8f555bc3a3..2f5147ca2b1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java @@ -22,6 +22,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.license.TestUtils; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -35,7 +37,7 @@ import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySe import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.DnRoleMapperSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.VerificationMode; import org.elasticsearch.xpack.security.authc.ldap.ActiveDirectorySessionFactory.DownLevelADAuthenticator; @@ -48,6 +50,7 @@ import org.junit.BeforeClass; import java.security.AccessController; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -91,6 +94,7 @@ public class ActiveDirectoryRealmTests extends ESTestCase { private ThreadPool threadPool; private Settings globalSettings; private SSLService sslService; + private XPackLicenseState licenseState; @BeforeClass public static void setNumberOfLdapServers() { @@ -125,6 +129,7 @@ public class ActiveDirectoryRealmTests extends ESTestCase { resourceWatcherService = new ResourceWatcherService(Settings.EMPTY, threadPool); globalSettings = Settings.builder().put("path.home", createTempDir()).build(); sslService = new SSLService(globalSettings, TestEnvironment.newEnvironment(globalSettings)); + licenseState = new TestUtils.UpdatableLicenseState(); } @After @@ -163,6 +168,7 @@ public class ActiveDirectoryRealmTests extends ESTestCase { ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + realm.initialize(Collections.singleton(realm), licenseState); PlainActionFuture future = new PlainActionFuture<>(); realm.authenticate(new UsernamePasswordToken("CN=ironman", new SecureString(PASSWORD)), future); @@ -179,6 +185,7 @@ public class ActiveDirectoryRealmTests extends ESTestCase { ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + realm.initialize(Collections.singleton(realm), licenseState); // Thor does not have a UPN of form CN=Thor@ad.test.elasticsearch.com PlainActionFuture future = new PlainActionFuture<>(); @@ -203,6 +210,7 @@ public class ActiveDirectoryRealmTests extends ESTestCase { ActiveDirectorySessionFactory sessionFactory = spy(new ActiveDirectorySessionFactory(config, sslService, threadPool)); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + realm.initialize(Collections.singleton(realm), licenseState); int count = randomIntBetween(2, 10); for (int i = 0; i < count; i++) { @@ -221,6 +229,7 @@ public class ActiveDirectoryRealmTests extends ESTestCase { ActiveDirectorySessionFactory sessionFactory = spy(new ActiveDirectorySessionFactory(config, sslService, threadPool)); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + realm.initialize(Collections.singleton(realm), licenseState); int count = randomIntBetween(2, 10); for (int i = 0; i < count; i++) { @@ -239,6 +248,7 @@ public class ActiveDirectoryRealmTests extends ESTestCase { ActiveDirectorySessionFactory sessionFactory = spy(new ActiveDirectorySessionFactory(config, sslService, threadPool)); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + realm.initialize(Collections.singleton(realm), licenseState); int count = randomIntBetween(2, 10); for (int i = 0; i < count; i++) { @@ -287,6 +297,7 @@ public class ActiveDirectoryRealmTests extends ESTestCase { try (ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool)) { DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + realm.initialize(Collections.singleton(realm), licenseState); PlainActionFuture future = new PlainActionFuture<>(); realm.lookupUser("CN=Thor", future); @@ -304,6 +315,7 @@ public class ActiveDirectoryRealmTests extends ESTestCase { ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + realm.initialize(Collections.singleton(realm), licenseState); PlainActionFuture future = new PlainActionFuture<>(); realm.authenticate(new UsernamePasswordToken("CN=ironman", new SecureString(PASSWORD)), future); @@ -320,6 +332,7 @@ public class ActiveDirectoryRealmTests extends ESTestCase { ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + realm.initialize(Collections.singleton(realm), licenseState); PlainActionFuture future = new PlainActionFuture<>(); realm.authenticate(new UsernamePasswordToken("CN=Thor", new SecureString(PASSWORD)), future); @@ -338,6 +351,7 @@ public class ActiveDirectoryRealmTests extends ESTestCase { ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + realm.initialize(Collections.singleton(realm), licenseState); PlainActionFuture> future = new PlainActionFuture<>(); realm.usageStats(future); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/CancellableLdapRunnableTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/CancellableLdapRunnableTests.java index 2807c501a5d..18b84df6d61 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/CancellableLdapRunnableTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/CancellableLdapRunnableTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.xpack.security.authc.ldap; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.ldap.LdapRealm.CancellableLdapRunnable; import java.util.concurrent.CountDownLatch; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java index 5c98e2347cf..fb20527575d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; @@ -25,18 +26,21 @@ import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.LdapSessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import org.elasticsearch.xpack.core.security.authc.support.DnRoleMapperSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.VerificationMode; import org.elasticsearch.xpack.security.authc.ldap.support.LdapTestCase; import org.elasticsearch.xpack.security.authc.ldap.support.SessionFactory; import org.elasticsearch.xpack.security.authc.support.DnRoleMapper; +import org.elasticsearch.xpack.security.authc.support.MockLookupRealm; import org.junit.After; import org.junit.Before; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -50,11 +54,14 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class LdapRealmTests extends LdapTestCase { @@ -68,6 +75,7 @@ public class LdapRealmTests extends LdapTestCase { private ResourceWatcherService resourceWatcherService; private Settings defaultGlobalSettings; private SSLService sslService; + private XPackLicenseState licenseState; @Before public void init() throws Exception { @@ -75,6 +83,8 @@ public class LdapRealmTests extends LdapTestCase { resourceWatcherService = new ResourceWatcherService(Settings.EMPTY, threadPool); defaultGlobalSettings = Settings.builder().put("path.home", createTempDir()).build(); sslService = new SSLService(defaultGlobalSettings, TestEnvironment.newEnvironment(defaultGlobalSettings)); + licenseState = mock(XPackLicenseState.class); + when(licenseState.isAuthorizationRealmAllowed()).thenReturn(true); } @After @@ -87,10 +97,12 @@ public class LdapRealmTests extends LdapTestCase { String groupSearchBase = "o=sevenSeas"; String userTemplate = VALID_USER_TEMPLATE; Settings settings = buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE); - RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, + TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), threadPool); + ldap.initialize(Collections.singleton(ldap), licenseState); PlainActionFuture future = new PlainActionFuture<>(); ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); @@ -111,11 +123,13 @@ public class LdapRealmTests extends LdapTestCase { Settings settings = Settings.builder() .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.ONE_LEVEL)) .build(); - RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, + TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), threadPool); + ldap.initialize(Collections.singleton(ldap), licenseState); PlainActionFuture future = new PlainActionFuture<>(); ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); @@ -136,12 +150,14 @@ public class LdapRealmTests extends LdapTestCase { Settings settings = Settings.builder() .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) .build(); - RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, + TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); ldapFactory = spy(ldapFactory); LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), threadPool); + ldap.initialize(Collections.singleton(ldap), licenseState); PlainActionFuture future = new PlainActionFuture<>(); ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); @@ -161,12 +177,15 @@ public class LdapRealmTests extends LdapTestCase { Settings settings = Settings.builder() .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) .build(); - RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, + TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); DnRoleMapper roleMapper = buildGroupAsRoleMapper(resourceWatcherService); ldapFactory = spy(ldapFactory); LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, roleMapper, threadPool); + ldap.initialize(Collections.singleton(ldap), licenseState); + PlainActionFuture future = new PlainActionFuture<>(); ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); future.actionGet(); @@ -194,12 +213,15 @@ public class LdapRealmTests extends LdapTestCase { .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) .put(CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING.getKey(), -1) .build(); - RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, + TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); ldapFactory = spy(ldapFactory); LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), threadPool); + ldap.initialize(Collections.singleton(ldap), licenseState); + PlainActionFuture future = new PlainActionFuture<>(); ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); future.actionGet(); @@ -211,6 +233,48 @@ public class LdapRealmTests extends LdapTestCase { verify(ldapFactory, times(2)).session(anyString(), any(SecureString.class), any(ActionListener.class)); } + public void testDelegatedAuthorization() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userTemplate = VALID_USER_TEMPLATE; + final Settings.Builder builder = Settings.builder() + .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) + .putList(DelegatedAuthorizationSettings.AUTHZ_REALMS.getKey(), "mock_lookup"); + + if (randomBoolean()) { + // maybe disable caching + builder.put(CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING.getKey(), -1); + } + + final Settings realmSettings = builder.build(); + final Environment env = TestEnvironment.newEnvironment(defaultGlobalSettings); + RealmConfig config = new RealmConfig("test-ldap-realm", realmSettings, defaultGlobalSettings, env, threadPool.getThreadContext()); + + final LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); + final DnRoleMapper roleMapper = buildGroupAsRoleMapper(resourceWatcherService); + final LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, roleMapper, threadPool); + + final MockLookupRealm mockLookup = new MockLookupRealm(new RealmConfig("mock_lookup", Settings.EMPTY, defaultGlobalSettings, env, + threadPool.getThreadContext())); + + ldap.initialize(Arrays.asList(ldap, mockLookup), licenseState); + mockLookup.initialize(Arrays.asList(ldap, mockLookup), licenseState); + + PlainActionFuture future = new PlainActionFuture<>(); + ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); + final AuthenticationResult result1 = future.actionGet(); + assertThat(result1.getStatus(), equalTo(AuthenticationResult.Status.CONTINUE)); + assertThat(result1.getMessage(), + equalTo("the principal [" + VALID_USERNAME + "] was authenticated, but no user could be found in realms [mock/mock_lookup]")); + + future = new PlainActionFuture<>(); + final User fakeUser = new User(VALID_USERNAME, "fake_role"); + mockLookup.registerUser(fakeUser); + ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); + final AuthenticationResult result2 = future.actionGet(); + assertThat(result2.getStatus(), equalTo(AuthenticationResult.Status.SUCCESS)); + assertThat(result2.getUser(), sameInstance(fakeUser)); + } + public void testLdapRealmSelectsLdapSessionFactory() throws Exception { String groupSearchBase = "o=sevenSeas"; String userTemplate = VALID_USER_TEMPLATE; @@ -279,7 +343,8 @@ public class LdapRealmTests extends LdapTestCase { .put("group_search.scope", LdapSearchScope.SUB_TREE) .put("ssl.verification_mode", VerificationMode.CERTIFICATE) .build(); - RealmConfig config = new RealmConfig("test-ldap-realm-user-search", settings, defaultGlobalSettings, TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = new RealmConfig("test-ldap-realm-user-search", settings, defaultGlobalSettings, + TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> LdapRealm.sessionFactory(config, null, threadPool, LdapRealmSettings.LDAP_TYPE)); assertThat(e.getMessage(), @@ -295,7 +360,8 @@ public class LdapRealmTests extends LdapTestCase { .put("group_search.scope", LdapSearchScope.SUB_TREE) .put("ssl.verification_mode", VerificationMode.CERTIFICATE) .build(); - RealmConfig config = new RealmConfig("test-ldap-realm-user-search", settings, defaultGlobalSettings, TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = new RealmConfig("test-ldap-realm-user-search", settings, defaultGlobalSettings, + TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> LdapRealm.sessionFactory(config, null, threadPool, LdapRealmSettings.LDAP_TYPE)); assertThat(e.getMessage(), @@ -312,11 +378,13 @@ public class LdapRealmTests extends LdapTestCase { .put(DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING.getKey(), getDataPath("/org/elasticsearch/xpack/security/authc/support/role_mapping.yml")) .build(); - RealmConfig config = new RealmConfig("test-ldap-realm-userdn", settings, defaultGlobalSettings, TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = new RealmConfig("test-ldap-realm-userdn", settings, defaultGlobalSettings, + TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, new DnRoleMapper(config, resourceWatcherService), threadPool); + ldap.initialize(Collections.singleton(ldap), licenseState); PlainActionFuture future = new PlainActionFuture<>(); ldap.authenticate(new UsernamePasswordToken("Horatio Hornblower", new SecureString(PASSWORD)), future); @@ -339,10 +407,12 @@ public class LdapRealmTests extends LdapTestCase { String groupSearchBase = "o=sevenSeas"; String userTemplate = VALID_USER_TEMPLATE; Settings settings = buildLdapSettings(new String[] { url.toString() }, userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE); - RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, + TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), threadPool); + ldap.initialize(Collections.singleton(ldap), licenseState); PlainActionFuture future = new PlainActionFuture<>(); ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); @@ -386,6 +456,7 @@ public class LdapRealmTests extends LdapTestCase { LdapSessionFactory ldapFactory = new LdapSessionFactory(config, new SSLService(globalSettings, env), threadPool); LdapRealm realm = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, new DnRoleMapper(config, resourceWatcherService), threadPool); + realm.initialize(Collections.singleton(realm), licenseState); PlainActionFuture> future = new PlainActionFuture<>(); realm.usageStats(future); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactoryTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactoryTests.java index f719546cade..a22cc9fba17 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactoryTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactoryTests.java @@ -63,7 +63,8 @@ public class LdapSessionFactoryTests extends LdapTestCase { .put("path.home", createTempDir()) .build(); - RealmConfig config = new RealmConfig("ldap_realm", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + RealmConfig config = new RealmConfig("ldap_realm", settings, globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); String user = "Horatio Hornblower"; SecureString userPass = new SecureString("pass"); @@ -87,8 +88,9 @@ public class LdapSessionFactoryTests extends LdapTestCase { "wrongname={0},ou=people,o=sevenSeas", "cn={0},ou=people,o=sevenSeas", //this last one should work }; - RealmConfig config = new RealmConfig("ldap_realm", buildLdapSettings(ldapUrls(), userTemplates, groupSearchBase, - LdapSearchScope.SUB_TREE), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + RealmConfig config = new RealmConfig("ldap_realm", + buildLdapSettings(ldapUrls(), userTemplates, groupSearchBase, LdapSearchScope.SUB_TREE), + globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); @@ -110,8 +112,9 @@ public class LdapSessionFactoryTests extends LdapTestCase { "wrongname={0},ou=people,o=sevenSeas", "asdf={0},ou=people,o=sevenSeas", //none of these should work }; - RealmConfig config = new RealmConfig("ldap_realm", buildLdapSettings(ldapUrls(), userTemplates, groupSearchBase, - LdapSearchScope.SUB_TREE), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + RealmConfig config = new RealmConfig("ldap_realm", + buildLdapSettings(ldapUrls(), userTemplates, groupSearchBase, LdapSearchScope.SUB_TREE), + globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); LdapSessionFactory ldapFac = new LdapSessionFactory(config, sslService, threadPool); @@ -128,8 +131,9 @@ public class LdapSessionFactoryTests extends LdapTestCase { public void testGroupLookupSubtree() throws Exception { String groupSearchBase = "o=sevenSeas"; String userTemplate = "cn={0},ou=people,o=sevenSeas"; - RealmConfig config = new RealmConfig("ldap_realm", buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, - LdapSearchScope.SUB_TREE), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + RealmConfig config = new RealmConfig("ldap_realm", + buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE), + globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); LdapSessionFactory ldapFac = new LdapSessionFactory(config, sslService, threadPool); @@ -147,8 +151,9 @@ public class LdapSessionFactoryTests extends LdapTestCase { public void testGroupLookupOneLevel() throws Exception { String groupSearchBase = "ou=crews,ou=groups,o=sevenSeas"; String userTemplate = "cn={0},ou=people,o=sevenSeas"; - RealmConfig config = new RealmConfig("ldap_realm", buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, - LdapSearchScope.ONE_LEVEL), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + RealmConfig config = new RealmConfig("ldap_realm", + buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.ONE_LEVEL), + globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); LdapSessionFactory ldapFac = new LdapSessionFactory(config, sslService, threadPool); @@ -165,8 +170,9 @@ public class LdapSessionFactoryTests extends LdapTestCase { public void testGroupLookupBase() throws Exception { String groupSearchBase = "cn=HMS Lydia,ou=crews,ou=groups,o=sevenSeas"; String userTemplate = "cn={0},ou=people,o=sevenSeas"; - RealmConfig config = new RealmConfig("ldap_realm", buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, - LdapSearchScope.BASE), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + RealmConfig config = new RealmConfig("ldap_realm", + buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.BASE), + globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); LdapSessionFactory ldapFac = new LdapSessionFactory(config, sslService, threadPool); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java index 2410d8c4649..45ccaf6a147 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java @@ -12,17 +12,21 @@ import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.support.NoOpLogger; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; +import org.elasticsearch.xpack.security.authc.support.MockLookupRealm; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; import org.junit.Before; import org.mockito.Mockito; @@ -43,9 +47,11 @@ import java.util.regex.Pattern; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -56,16 +62,20 @@ import static org.mockito.Mockito.when; public class PkiRealmTests extends ESTestCase { private Settings globalSettings; + private XPackLicenseState licenseState; @Before public void setup() throws Exception { globalSettings = Settings.builder() .put("path.home", createTempDir()) .build(); + licenseState = mock(XPackLicenseState.class); + when(licenseState.isAuthorizationRealmAllowed()).thenReturn(true); } public void testTokenSupport() { - RealmConfig config = new RealmConfig("", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + RealmConfig config = new RealmConfig("", Settings.EMPTY, globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); PkiRealm realm = new PkiRealm(config, mock(UserRoleMapper.class)); assertThat(realm.supports(null), is(false)); @@ -98,28 +108,14 @@ public class PkiRealmTests extends ESTestCase { } private void assertSuccessfulAuthentication(Set roles) throws Exception { - String dn = "CN=Elasticsearch Test Node,"; - final String expectedUsername = "Elasticsearch Test Node"; - X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); - X509AuthenticationToken token = new X509AuthenticationToken(new X509Certificate[] { certificate }, "Elasticsearch Test Node", dn); - UserRoleMapper roleMapper = mock(UserRoleMapper.class); - PkiRealm realm = new PkiRealm(new RealmConfig("", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(globalSettings)), roleMapper); + X509AuthenticationToken token = buildToken(); + UserRoleMapper roleMapper = buildRoleMapper(roles, token.dn()); + PkiRealm realm = buildRealm(roleMapper, Settings.EMPTY); verify(roleMapper).refreshRealmOnChange(realm); - Mockito.doAnswer(invocation -> { - final UserRoleMapper.UserData userData = (UserRoleMapper.UserData) invocation.getArguments()[0]; - final ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; - if (userData.getDn().equals(dn)) { - listener.onResponse(roles); - } else { - listener.onFailure(new IllegalArgumentException("Expected DN '" + dn + "' but was '" + userData + "'")); - } - return null; - }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); - PlainActionFuture future = new PlainActionFuture<>(); - realm.authenticate(token, future); - final AuthenticationResult result = future.actionGet(); + final String expectedUsername = token.principal(); + final AuthenticationResult result = authenticate(token, realm); + final PlainActionFuture future; assertThat(result.getStatus(), is(AuthenticationResult.Status.SUCCESS)); User user = result.getUser(); assertThat(user, is(notNullValue())); @@ -149,17 +145,54 @@ public class PkiRealmTests extends ESTestCase { verifyNoMoreInteractions(roleMapper); } + private UserRoleMapper buildRoleMapper(Set roles, String dn) { + UserRoleMapper roleMapper = mock(UserRoleMapper.class); + Mockito.doAnswer(invocation -> { + final UserRoleMapper.UserData userData = (UserRoleMapper.UserData) invocation.getArguments()[0]; + final ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; + if (userData.getDn().equals(dn)) { + listener.onResponse(roles); + } else { + listener.onFailure(new IllegalArgumentException("Expected DN '" + dn + "' but was '" + userData + "'")); + } + return null; + }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); + return roleMapper; + } + + private PkiRealm buildRealm(UserRoleMapper roleMapper, Settings realmSettings, Realm... otherRealms) { + PkiRealm realm = new PkiRealm(new RealmConfig("", realmSettings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)), roleMapper); + List allRealms = CollectionUtils.arrayAsArrayList(otherRealms); + allRealms.add(realm); + Collections.shuffle(allRealms, random()); + realm.initialize(allRealms, licenseState); + return realm; + } + + private X509AuthenticationToken buildToken() throws Exception { + X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); + return new X509AuthenticationToken(new X509Certificate[]{certificate}, "Elasticsearch Test Node", "CN=Elasticsearch Test Node,"); + } + + private AuthenticationResult authenticate(X509AuthenticationToken token, PkiRealm realm) { + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(token, future); + return future.actionGet(); + } + public void testCustomUsernamePattern() throws Exception { + ThreadContext threadContext = new ThreadContext(globalSettings); X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); UserRoleMapper roleMapper = mock(UserRoleMapper.class); - PkiRealm realm = new PkiRealm(new RealmConfig("", Settings.builder().put("username_pattern", "OU=(.*?),").build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)), - roleMapper); + PkiRealm realm = new PkiRealm(new RealmConfig("", Settings.builder().put("username_pattern", "OU=(.*?),").build(), globalSettings, + TestEnvironment.newEnvironment(globalSettings), threadContext), roleMapper); + realm.initialize(Collections.emptyList(), licenseState); Mockito.doAnswer(invocation -> { ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; listener.onResponse(Collections.emptySet()); return null; }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); threadContext.putTransient(PkiRealm.PKI_CERT_HEADER_NAME, new X509Certificate[] { certificate }); X509AuthenticationToken token = realm.token(threadContext); @@ -182,15 +215,16 @@ public class PkiRealmTests extends ESTestCase { .put("truststore.path", getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks")) .setSecureSettings(secureSettings) .build(); + ThreadContext threadContext = new ThreadContext(globalSettings); PkiRealm realm = new PkiRealm(new RealmConfig("", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(globalSettings)), roleMapper); + threadContext), roleMapper); + realm.initialize(Collections.emptyList(), licenseState); Mockito.doAnswer(invocation -> { ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; listener.onResponse(Collections.emptySet()); return null; }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); threadContext.putTransient(PkiRealm.PKI_CERT_HEADER_NAME, new X509Certificate[] { certificate }); X509AuthenticationToken token = realm.token(threadContext); @@ -213,15 +247,16 @@ public class PkiRealmTests extends ESTestCase { getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.jks")) .setSecureSettings(secureSettings) .build(); + final ThreadContext threadContext = new ThreadContext(globalSettings); PkiRealm realm = new PkiRealm(new RealmConfig("", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(globalSettings)), roleMapper); + threadContext), roleMapper); + realm.initialize(Collections.emptyList(), licenseState); Mockito.doAnswer(invocation -> { ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; listener.onResponse(Collections.emptySet()); return null; }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); threadContext.putTransient(PkiRealm.PKI_CERT_HEADER_NAME, new X509Certificate[] { certificate }); X509AuthenticationToken token = realm.token(threadContext); @@ -307,6 +342,33 @@ public class PkiRealmTests extends ESTestCase { assertSettingDeprecationsAndWarnings(new Setting[] { SSLConfigurationSettings.withoutPrefix().legacyTruststorePassword }); } + public void testDelegatedAuthorization() throws Exception { + final X509AuthenticationToken token = buildToken(); + + final MockLookupRealm otherRealm = new MockLookupRealm(new RealmConfig("other_realm", Settings.EMPTY, globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings))); + final User lookupUser = new User(token.principal()); + otherRealm.registerUser(lookupUser); + + final Settings realmSettings = Settings.builder() + .putList("authorization_realms", "other_realm") + .build(); + final UserRoleMapper roleMapper = buildRoleMapper(Collections.emptySet(), token.dn()); + final PkiRealm pkiRealm = buildRealm(roleMapper, realmSettings, otherRealm); + + AuthenticationResult result = authenticate(token, pkiRealm); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.SUCCESS)); + assertThat(result.getUser(), sameInstance(lookupUser)); + + // check that the authorizing realm is consulted even for cached principals + final User lookupUser2 = new User(token.principal()); + otherRealm.registerUser(lookupUser2); + + result = authenticate(token, pkiRealm); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.SUCCESS)); + assertThat(result.getUser(), sameInstance(lookupUser2)); + } + static X509Certificate readCert(Path path) throws Exception { try (InputStream in = Files.newInputStream(path)) { CertificateFactory factory = CertificateFactory.getInstance("X.509"); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java index 980abc46831..2ecfdb50230 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java @@ -14,18 +14,24 @@ import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.CertParsingUtils; import org.elasticsearch.xpack.core.ssl.PemUtils; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.TestsSSLService; +import org.elasticsearch.xpack.security.authc.support.MockLookupRealm; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.hamcrest.Matchers; import org.junit.Before; import org.mockito.Mockito; import org.opensaml.saml.common.xml.SAMLConstants; @@ -71,6 +77,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * Basic unit tests for the SAMLRealm @@ -83,9 +90,16 @@ public class SamlRealmTests extends SamlTestCase { private static final String REALM_NAME = "my-saml"; private static final String REALM_SETTINGS_PREFIX = "xpack.security.authc.realms." + REALM_NAME; + private Settings globalSettings; + private Environment env; + private ThreadContext threadContext; + @Before - public void initRealm() throws PrivilegedActionException { + public void setupEnv() throws PrivilegedActionException { SamlUtils.initialize(logger); + globalSettings = Settings.builder().put("path.home", createTempDir()).build(); + env = TestEnvironment.newEnvironment(globalSettings); + threadContext = new ThreadContext(globalSettings); } public void testReadIdpMetadataFromFile() throws Exception { @@ -140,15 +154,70 @@ public class SamlRealmTests extends SamlTestCase { } public void testAuthenticateWithRoleMapping() throws Exception { + final UserRoleMapper roleMapper = mock(UserRoleMapper.class); + AtomicReference userData = new AtomicReference<>(); + Mockito.doAnswer(invocation -> { + assert invocation.getArguments().length == 2; + userData.set((UserRoleMapper.UserData) invocation.getArguments()[0]); + ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; + listener.onResponse(Collections.singleton("superuser")); + return null; + }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); + final boolean useNameId = randomBoolean(); final boolean principalIsEmailAddress = randomBoolean(); final Boolean populateUserMetadata = randomFrom(Boolean.TRUE, Boolean.FALSE, null); + + AuthenticationResult result = performAuthentication(roleMapper, useNameId, principalIsEmailAddress, populateUserMetadata, false); + assertThat(result.getUser().roles(), arrayContainingInAnyOrder("superuser")); + if (populateUserMetadata == Boolean.FALSE) { + // TODO : "saml_nameid" should be null too, but the logout code requires it for now. + assertThat(result.getUser().metadata().get("saml_uid"), nullValue()); + } else { + final String nameIdValue = principalIsEmailAddress ? "clint.barton@shield.gov" : "clint.barton"; + final String uidValue = principalIsEmailAddress ? "cbarton@shield.gov" : "cbarton"; + assertThat(result.getUser().metadata().get("saml_nameid"), equalTo(nameIdValue)); + assertThat(result.getUser().metadata().get("saml_uid"), instanceOf(Iterable.class)); + assertThat((Iterable) result.getUser().metadata().get("saml_uid"), contains(uidValue)); + } + + assertThat(userData.get().getUsername(), equalTo(useNameId ? "clint.barton" : "cbarton")); + assertThat(userData.get().getGroups(), containsInAnyOrder("avengers", "shield")); + } + + public void testAuthenticateWithAuthorizingRealm() throws Exception { final UserRoleMapper roleMapper = mock(UserRoleMapper.class); + Mockito.doAnswer(invocation -> { + assert invocation.getArguments().length == 2; + ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; + listener.onFailure(new RuntimeException("Role mapping should not be called")); + return null; + }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); + + final boolean useNameId = randomBoolean(); + final boolean principalIsEmailAddress = randomBoolean(); + + AuthenticationResult result = performAuthentication(roleMapper, useNameId, principalIsEmailAddress, null, true); + assertThat(result.getUser().roles(), arrayContainingInAnyOrder("lookup_user_role")); + assertThat(result.getUser().fullName(), equalTo("Clinton Barton")); + assertThat(result.getUser().metadata().entrySet(), Matchers.iterableWithSize(1)); + assertThat(result.getUser().metadata().get("is_lookup"), Matchers.equalTo(true)); + } + + private AuthenticationResult performAuthentication(UserRoleMapper roleMapper, boolean useNameId, boolean principalIsEmailAddress, + Boolean populateUserMetadata, boolean useAuthorizingRealm) throws Exception { final EntityDescriptor idp = mockIdp(); final SpConfiguration sp = new SpConfiguration("", "https://saml/", null, null, null, Collections.emptyList()); final SamlAuthenticator authenticator = mock(SamlAuthenticator.class); final SamlLogoutRequestHandler logoutHandler = mock(SamlLogoutRequestHandler.class); + final String userPrincipal = useNameId ? "clint.barton" : "cbarton"; + final String nameIdValue = principalIsEmailAddress ? "clint.barton@shield.gov" : "clint.barton"; + final String uidValue = principalIsEmailAddress ? "cbarton@shield.gov" : "cbarton"; + + final MockLookupRealm lookupRealm = new MockLookupRealm( + new RealmConfig("mock_lookup", Settings.EMPTY,globalSettings, env, threadContext)); + final Settings.Builder settingsBuilder = Settings.builder() .put(SamlRealmSettings.PRINCIPAL_ATTRIBUTE.name(), useNameId ? "nameid" : "uid") .put(SamlRealmSettings.GROUPS_ATTRIBUTE.name(), "groups") @@ -161,15 +230,20 @@ public class SamlRealmTests extends SamlTestCase { if (populateUserMetadata != null) { settingsBuilder.put(SamlRealmSettings.POPULATE_USER_METADATA.getKey(), populateUserMetadata.booleanValue()); } + if (useAuthorizingRealm) { + settingsBuilder.putList(DelegatedAuthorizationSettings.AUTHZ_REALMS.getKey(), lookupRealm.name()); + lookupRealm.registerUser(new User(userPrincipal, new String[]{ "lookup_user_role" }, "Clinton Barton", "cbarton@shield.gov", + Collections.singletonMap("is_lookup", true), true)); + } + final Settings realmSettings = settingsBuilder.build(); - final RealmConfig config = realmConfigFromRealmSettings(realmSettings); - final SamlRealm realm = new SamlRealm(config, roleMapper, authenticator, logoutHandler, () -> idp, sp); + + initializeRealms(realm, lookupRealm); + final SamlToken token = new SamlToken(new byte[0], Collections.singletonList("")); - final String nameIdValue = principalIsEmailAddress ? "clint.barton@shield.gov" : "clint.barton"; - final String uidValue = principalIsEmailAddress ? "cbarton@shield.gov" : "cbarton"; final SamlAttributes attributes = new SamlAttributes( new SamlNameId(NameIDType.PERSISTENT, nameIdValue, idp.getEntityID(), sp.getEntityId(), null), randomAlphaOfLength(16), @@ -178,36 +252,27 @@ public class SamlRealmTests extends SamlTestCase { new SamlAttributes.SamlAttribute("urn:oid:1.3.6.1.4.1.5923.1.5.1.1", "groups", Arrays.asList("avengers", "shield")), new SamlAttributes.SamlAttribute("urn:oid:0.9.2342.19200300.100.1.3", "mail", Arrays.asList("cbarton@shield.gov")) )); - Mockito.when(authenticator.authenticate(token)).thenReturn(attributes); - - AtomicReference userData = new AtomicReference<>(); - Mockito.doAnswer(invocation -> { - assert invocation.getArguments().length == 2; - userData.set((UserRoleMapper.UserData) invocation.getArguments()[0]); - ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; - listener.onResponse(Collections.singleton("superuser")); - return null; - }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); + when(authenticator.authenticate(token)).thenReturn(attributes); final PlainActionFuture future = new PlainActionFuture<>(); realm.authenticate(token, future); final AuthenticationResult result = future.get(); assertThat(result, notNullValue()); assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.SUCCESS)); - assertThat(result.getUser().principal(), equalTo(useNameId ? "clint.barton" : "cbarton")); + assertThat(result.getUser().principal(), equalTo(userPrincipal)); assertThat(result.getUser().email(), equalTo("cbarton@shield.gov")); - assertThat(result.getUser().roles(), arrayContainingInAnyOrder("superuser")); - if (populateUserMetadata == Boolean.FALSE) { - // TODO : "saml_nameid" should be null too, but the logout code requires it for now. - assertThat(result.getUser().metadata().get("saml_uid"), nullValue()); - } else { - assertThat(result.getUser().metadata().get("saml_nameid"), equalTo(nameIdValue)); - assertThat(result.getUser().metadata().get("saml_uid"), instanceOf(Iterable.class)); - assertThat((Iterable) result.getUser().metadata().get("saml_uid"), contains(uidValue)); - } - assertThat(userData.get().getUsername(), equalTo(useNameId ? "clint.barton" : "cbarton")); - assertThat(userData.get().getGroups(), containsInAnyOrder("avengers", "shield")); + return result; + } + + private void initializeRealms(Realm... realms) { + XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isAuthorizationRealmAllowed()).thenReturn(true); + + final List realmList = Arrays.asList(realms); + for (Realm realm : realms) { + realm.initialize(realmList, licenseState); + } } public void testAttributeSelectionWithRegex() throws Exception { @@ -291,7 +356,7 @@ public class SamlRealmTests extends SamlTestCase { Collections.singletonList( new SamlAttributes.SamlAttribute("urn:oid:0.9.2342.19200300.100.1.3", "mail", Collections.singletonList(mail)) )); - Mockito.when(authenticator.authenticate(token)).thenReturn(attributes); + when(authenticator.authenticate(token)).thenReturn(attributes); final PlainActionFuture future = new PlainActionFuture<>(); realm.authenticate(token, future); @@ -515,8 +580,8 @@ public class SamlRealmTests extends SamlTestCase { final EntityDescriptor idp = mockIdp(); final IDPSSODescriptor role = mock(IDPSSODescriptor.class); final SingleLogoutService slo = SamlUtils.buildObject(SingleLogoutService.class, SingleLogoutService.DEFAULT_ELEMENT_NAME); - Mockito.when(idp.getRoleDescriptors(IDPSSODescriptor.DEFAULT_ELEMENT_NAME)).thenReturn(Collections.singletonList(role)); - Mockito.when(role.getSingleLogoutServices()).thenReturn(Collections.singletonList(slo)); + when(idp.getRoleDescriptors(IDPSSODescriptor.DEFAULT_ELEMENT_NAME)).thenReturn(Collections.singletonList(role)); + when(role.getSingleLogoutServices()).thenReturn(Collections.singletonList(slo)); slo.setBinding(SAMLConstants.SAML2_REDIRECT_BINDING_URI); slo.setLocation("https://logout.saml/"); @@ -553,7 +618,7 @@ public class SamlRealmTests extends SamlTestCase { private EntityDescriptor mockIdp() { final EntityDescriptor descriptor = mock(EntityDescriptor.class); - Mockito.when(descriptor.getEntityID()).thenReturn("https://idp.saml/"); + when(descriptor.getEntityID()).thenReturn("https://idp.saml/"); return descriptor; } @@ -585,9 +650,7 @@ public class SamlRealmTests extends SamlTestCase { } private RealmConfig realmConfigFromRealmSettings(Settings realmSettings) { - final Settings globalSettings = Settings.builder().put("path.home", createTempDir()).build(); - final Environment env = TestEnvironment.newEnvironment(globalSettings); - return new RealmConfig(REALM_NAME, realmSettings, globalSettings, env, new ThreadContext(globalSettings)); + return new RealmConfig(REALM_NAME, realmSettings, globalSettings, env, threadContext); } private RealmConfig realmConfigFromGlobalSettings(Settings globalSettings) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java index e6830be18c5..e9e8908c584 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.junit.After; import org.junit.Before; @@ -31,6 +31,7 @@ import java.util.List; import java.util.Locale; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import static java.util.Collections.emptyMap; import static org.hamcrest.Matchers.arrayContaining; @@ -39,6 +40,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; @@ -341,6 +343,33 @@ public class CachingUsernamePasswordRealmTests extends ESTestCase { assertThat(e.getMessage(), containsString("lookup exception")); } + public void testReturnDifferentObjectFromCache() throws Exception { + final AtomicReference userArg = new AtomicReference<>(); + final AtomicReference result = new AtomicReference<>(); + Realm realm = new AlwaysAuthenticateCachingRealm(globalSettings, threadPool) { + @Override + protected void handleCachedAuthentication(User user, ActionListener listener) { + userArg.set(user); + listener.onResponse(result.get()); + } + }; + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("user", new SecureString("pass")), future); + final AuthenticationResult result1 = future.actionGet(); + assertThat(result1, notNullValue()); + assertThat(result1.getUser(), notNullValue()); + assertThat(result1.getUser().principal(), equalTo("user")); + + final AuthenticationResult result2 = AuthenticationResult.success(new User("user")); + result.set(result2); + + future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("user", new SecureString("pass")), future); + final AuthenticationResult result3 = future.actionGet(); + assertThat(result3, sameInstance(result2)); + assertThat(userArg.get(), sameInstance(result1.getUser())); + } + public void testSingleAuthPerUserLimit() throws Exception { final String username = "username"; final SecureString password = SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DelegatedAuthorizationSupportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DelegatedAuthorizationSupportTests.java new file mode 100644 index 00000000000..8f0d360b759 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DelegatedAuthorizationSupportTests.java @@ -0,0 +1,189 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.support; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.user.User; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.Strings.collectionToDelimitedString; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DelegatedAuthorizationSupportTests extends ESTestCase { + + private List realms; + private Settings globalSettings; + private ThreadContext threadContext; + private Environment env; + + @Before + public void setupRealms() { + globalSettings = Settings.builder() + .put("path.home", createTempDir()) + .build(); + env = TestEnvironment.newEnvironment(globalSettings); + threadContext = new ThreadContext(globalSettings); + + final int realmCount = randomIntBetween(5, 9); + realms = new ArrayList<>(realmCount); + for (int i = 1; i <= realmCount; i++) { + realms.add(new MockLookupRealm(buildRealmConfig("lookup-" + i, Settings.EMPTY))); + } + shuffle(realms); + } + + private List shuffle(List list) { + Collections.shuffle(list, random()); + return list; + } + + private RealmConfig buildRealmConfig(String name, Settings settings) { + return new RealmConfig(name, settings, globalSettings, env, threadContext); + } + + public void testEmptyDelegationList() throws ExecutionException, InterruptedException { + final XPackLicenseState license = getLicenseState(true); + final DelegatedAuthorizationSupport das = new DelegatedAuthorizationSupport(realms, buildRealmConfig("r", Settings.EMPTY), license); + assertThat(das.hasDelegation(), equalTo(false)); + final PlainActionFuture future = new PlainActionFuture<>(); + das.resolve("any", future); + final AuthenticationResult result = future.get(); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.CONTINUE)); + assertThat(result.getUser(), nullValue()); + assertThat(result.getMessage(), equalTo("No [authorization_realms] have been configured")); + } + + public void testMissingRealmInDelegationList() { + final XPackLicenseState license = getLicenseState(true); + final Settings settings = Settings.builder() + .putList("authorization_realms", "no-such-realm") + .build(); + final IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> + new DelegatedAuthorizationSupport(realms, buildRealmConfig("r", settings), license) + ); + assertThat(ex.getMessage(), equalTo("configured authorization realm [no-such-realm] does not exist (or is not enabled)")); + } + + public void testDelegationChainsAreRejected() { + final XPackLicenseState license = getLicenseState(true); + final Settings settings = Settings.builder() + .putList("authorization_realms", "lookup-1", "lookup-2", "lookup-3") + .build(); + globalSettings = Settings.builder() + .put(globalSettings) + .putList("xpack.security.authc.realms.lookup-2.authorization_realms", "lookup-1") + .build(); + final IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> + new DelegatedAuthorizationSupport(realms, buildRealmConfig("realm1", settings), license) + ); + assertThat(ex.getMessage(), + equalTo("cannot use realm [mock/lookup-2] as an authorization realm - it is already delegating authorization to [[lookup-1]]")); + } + + public void testMatchInDelegationList() throws Exception { + final XPackLicenseState license = getLicenseState(true); + final List useRealms = shuffle(randomSubsetOf(randomIntBetween(1, realms.size()), realms)); + final Settings settings = Settings.builder() + .putList("authorization_realms", useRealms.stream().map(Realm::name).collect(Collectors.toList())) + .build(); + final User user = new User("my_user"); + randomFrom(useRealms).registerUser(user); + final DelegatedAuthorizationSupport das = new DelegatedAuthorizationSupport(realms, buildRealmConfig("r", settings), license); + assertThat(das.hasDelegation(), equalTo(true)); + final PlainActionFuture future = new PlainActionFuture<>(); + das.resolve("my_user", future); + final AuthenticationResult result = future.get(); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.SUCCESS)); + assertThat(result.getUser(), sameInstance(user)); + } + + public void testRealmsAreOrdered() throws Exception { + final XPackLicenseState license = getLicenseState(true); + final List useRealms = shuffle(randomSubsetOf(randomIntBetween(3, realms.size()), realms)); + final List names = useRealms.stream().map(Realm::name).collect(Collectors.toList()); + final Settings settings = Settings.builder() + .putList("authorization_realms", names) + .build(); + final List users = new ArrayList<>(names.size()); + final String username = randomAlphaOfLength(8); + for (MockLookupRealm r : useRealms) { + final User user = new User(username, "role_" + r.name()); + users.add(user); + r.registerUser(user); + } + + final DelegatedAuthorizationSupport das = new DelegatedAuthorizationSupport(realms, buildRealmConfig("r", settings), license); + assertThat(das.hasDelegation(), equalTo(true)); + final PlainActionFuture future = new PlainActionFuture<>(); + das.resolve(username, future); + final AuthenticationResult result = future.get(); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.SUCCESS)); + assertThat(result.getUser(), sameInstance(users.get(0))); + assertThat(result.getUser().roles(), arrayContaining("role_" + useRealms.get(0).name())); + } + + public void testNoMatchInDelegationList() throws Exception { + final XPackLicenseState license = getLicenseState(true); + final List useRealms = shuffle(randomSubsetOf(randomIntBetween(1, realms.size()), realms)); + final Settings settings = Settings.builder() + .putList("authorization_realms", useRealms.stream().map(Realm::name).collect(Collectors.toList())) + .build(); + final DelegatedAuthorizationSupport das = new DelegatedAuthorizationSupport(realms, buildRealmConfig("r", settings), license); + assertThat(das.hasDelegation(), equalTo(true)); + final PlainActionFuture future = new PlainActionFuture<>(); + das.resolve("my_user", future); + final AuthenticationResult result = future.get(); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.CONTINUE)); + assertThat(result.getUser(), nullValue()); + assertThat(result.getMessage(), equalTo("the principal [my_user] was authenticated, but no user could be found in realms [" + + collectionToDelimitedString(useRealms.stream().map(Realm::toString).collect(Collectors.toList()), ",") + "]")); + } + + public void testLicenseRejection() throws Exception { + final XPackLicenseState license = getLicenseState(false); + final Settings settings = Settings.builder() + .putList("authorization_realms", realms.get(0).name()) + .build(); + final DelegatedAuthorizationSupport das = new DelegatedAuthorizationSupport(realms, buildRealmConfig("r", settings), license); + assertThat(das.hasDelegation(), equalTo(true)); + final PlainActionFuture future = new PlainActionFuture<>(); + das.resolve("my_user", future); + final AuthenticationResult result = future.get(); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.CONTINUE)); + assertThat(result.getUser(), nullValue()); + assertThat(result.getMessage(), equalTo("authorization_realms are not permitted")); + assertThat(result.getException(), instanceOf(ElasticsearchSecurityException.class)); + assertThat(result.getException().getMessage(), equalTo("current license is non-compliant for [authorization_realms]")); + } + + private XPackLicenseState getLicenseState(boolean authzRealmsAllowed) { + final XPackLicenseState license = mock(XPackLicenseState.class); + when(license.isAuthorizationRealmAllowed()).thenReturn(authzRealmsAllowed); + return license; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/MockLookupRealm.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/MockLookupRealm.java new file mode 100644 index 00000000000..01700347f50 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/MockLookupRealm.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.authc.support; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.user.User; + +import java.util.HashMap; +import java.util.Map; + +public class MockLookupRealm extends Realm { + + private final Map lookup; + + public MockLookupRealm(RealmConfig config) { + super("mock", config); + lookup = new HashMap<>(); + } + + public void registerUser(User user) { + this.lookup.put(user.principal(), user); + } + + @Override + public boolean supports(AuthenticationToken token) { + return false; + } + + @Override + public AuthenticationToken token(ThreadContext context) { + return null; + } + + @Override + public void authenticate(AuthenticationToken token, ActionListener listener) { + listener.onResponse(AuthenticationResult.notHandled()); + } + + @Override + public void lookupUser(String username, ActionListener listener) { + listener.onResponse(lookup.get(username)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/RealmUserLookupTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/RealmUserLookupTests.java new file mode 100644 index 00000000000..78be4b3ddf4 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/RealmUserLookupTests.java @@ -0,0 +1,128 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.authc.support; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.user.User; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; + +public class RealmUserLookupTests extends ESTestCase { + + private Settings globalSettings; + private ThreadContext threadContext; + private Environment env; + + @Before + public void setup() { + globalSettings = Settings.builder() + .put("path.home", createTempDir()) + .build(); + env = TestEnvironment.newEnvironment(globalSettings); + threadContext = new ThreadContext(globalSettings); + } + + public void testNoRealms() throws Exception { + final RealmUserLookup lookup = new RealmUserLookup(Collections.emptyList(), threadContext); + final PlainActionFuture> listener = new PlainActionFuture<>(); + lookup.lookup(randomAlphaOfLengthBetween(3, 12), listener); + final Tuple tuple = listener.get(); + assertThat(tuple, nullValue()); + } + + public void testUserFound() throws Exception { + final List realms = buildRealms(randomIntBetween(5, 9)); + final RealmUserLookup lookup = new RealmUserLookup(realms, threadContext); + + final MockLookupRealm matchRealm = randomFrom(realms); + final User user = new User(randomAlphaOfLength(5)); + matchRealm.registerUser(user); + + final PlainActionFuture> listener = new PlainActionFuture<>(); + lookup.lookup(user.principal(), listener); + final Tuple tuple = listener.get(); + assertThat(tuple, notNullValue()); + assertThat(tuple.v1(), notNullValue()); + assertThat(tuple.v1(), sameInstance(user)); + assertThat(tuple.v2(), notNullValue()); + assertThat(tuple.v2(), sameInstance(matchRealm)); + } + + public void testUserNotFound() throws Exception { + final List realms = buildRealms(randomIntBetween(5, 9)); + final RealmUserLookup lookup = new RealmUserLookup(realms, threadContext); + + final String username = randomAlphaOfLength(5); + + final PlainActionFuture> listener = new PlainActionFuture<>(); + lookup.lookup(username, listener); + final Tuple tuple = listener.get(); + assertThat(tuple, nullValue()); + } + + public void testRealmException() { + final Realm realm = new Realm("test", new RealmConfig("test", Settings.EMPTY, globalSettings, env, threadContext)) { + @Override + public boolean supports(AuthenticationToken token) { + return false; + } + + @Override + public AuthenticationToken token(ThreadContext context) { + return null; + } + + @Override + public void authenticate(AuthenticationToken token, ActionListener listener) { + listener.onResponse(AuthenticationResult.notHandled()); + } + + @Override + public void lookupUser(String username, ActionListener listener) { + listener.onFailure(new RuntimeException("FAILURE")); + } + }; + final RealmUserLookup lookup = new RealmUserLookup(Collections.singletonList(realm), threadContext); + final PlainActionFuture> listener = new PlainActionFuture<>(); + lookup.lookup("anyone", listener); + final RuntimeException e = expectThrows(RuntimeException.class, listener::actionGet); + assertThat(e.getMessage(), equalTo("FAILURE")); + } + + private List buildRealms(int realmCount) { + final List realms = new ArrayList<>(realmCount); + for (int i = 1; i <= realmCount; i++) { + final RealmConfig config = new RealmConfig("lookup-" + i, Settings.EMPTY, globalSettings, env, threadContext); + final MockLookupRealm realm = new MockLookupRealm(config); + for (int j = 0; j < 5; j++) { + realm.registerUser(new User(randomAlphaOfLengthBetween(6, 12))); + } + realms.add(realm); + } + Collections.shuffle(realms, random()); + return realms; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index 2bee8fa09e3..052ba385510 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -25,7 +25,7 @@ import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression.FieldValue; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.security.support.SecurityIndexManager; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index 65c558d3d81..8ccac83c86f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -122,7 +122,7 @@ import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.audit.AuditTrailService; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; @@ -183,7 +183,7 @@ public class AuthorizationServiceTests extends ESTestCase { rolesStore = mock(CompositeRolesStore.class); clusterService = mock(ClusterService.class); final Settings settings = Settings.builder() - .put("search.remote.other_cluster.seeds", "localhost:9999") + .put("cluster.remote.other_cluster.seeds", "localhost:9999") .build(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java index a581d1abbb5..9c9f2b1b1a4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authc.AuthenticationField; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackSecurityUser; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.junit.Before; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java index d31f9f37b91..c48ac456898 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCa import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; import org.elasticsearch.xpack.security.support.SecurityIndexManager; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java index cf9c09759ea..39b70e0a879 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java @@ -46,12 +46,12 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xpack.core.graph.action.GraphExploreAction; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest; import org.elasticsearch.xpack.core.security.authc.DefaultAuthenticationFailureHandler; import org.elasticsearch.xpack.core.security.authz.IndicesAndAliasesResolverField; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; @@ -60,7 +60,7 @@ import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCa import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; import org.elasticsearch.xpack.core.security.user.AnonymousUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackSecurityUser; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.audit.AuditTrailService; @@ -110,8 +110,8 @@ public class IndicesAndAliasesResolverTests extends ESTestCase { .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 2)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(0, 1)) - .put("search.remote.remote.seeds", "127.0.0.1:" + randomIntBetween(9301, 9350)) - .put("search.remote.other_remote.seeds", "127.0.0.1:" + randomIntBetween(9351, 9399)) + .put("cluster.remote.remote.seeds", "127.0.0.1:" + randomIntBetween(9301, 9350)) + .put("cluster.remote.other_remote.seeds", "127.0.0.1:" + randomIntBetween(9351, 9399)) .build(); indexNameExpressionResolver = new IndexNameExpressionResolver(Settings.EMPTY); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java index 07686838ad0..08e4b1123c7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.security.authz; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -18,10 +19,11 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.XPackClientPlugin; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; -import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivileges; import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivileges; import org.elasticsearch.xpack.core.security.support.MetadataUtils; import org.hamcrest.Matchers; @@ -208,7 +210,10 @@ public class RoleDescriptorTests extends ESTestCase { } public void testSerialization() throws Exception { + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, null); + logger.info("Testing serialization with version {}", version); BytesStreamOutput output = new BytesStreamOutput(); + output.setVersion(version); RoleDescriptor.IndicesPrivileges[] groups = new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices("i1", "i2") @@ -235,6 +240,7 @@ public class RoleDescriptorTests extends ESTestCase { final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin(Settings.EMPTY).getNamedWriteables()); StreamInput streamInput = new NamedWriteableAwareStreamInput(ByteBufferStreamInput.wrap(BytesReference.toBytes(output.bytes())), registry); + streamInput.setVersion(version); final RoleDescriptor serialized = RoleDescriptor.readFrom(streamInput); assertEquals(descriptor, serialized); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListenerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListenerTests.java index 087749da240..fac88e8af09 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListenerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListenerTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.transport.TransportRequest.Empty; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authc.AuthenticationField; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.audit.AuditTrailService; import static org.elasticsearch.mock.orig.Mockito.verifyNoMoreInteractions; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java index 825ce4ee44c..34a0685c2fd 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java @@ -195,7 +195,7 @@ public class IndicesPermissionTests extends ESTestCase { assertEquals(readIndicesPrivileges, indicesPrivileges.build()); out = new BytesStreamOutput(); - out.setVersion(Version.V_5_0_0); + out.setVersion(Version.V_6_0_0); indicesPrivileges = RoleDescriptor.IndicesPrivileges.builder(); indicesPrivileges.grantedFields(allowed); indicesPrivileges.deniedFields(denied); @@ -205,7 +205,7 @@ public class IndicesPermissionTests extends ESTestCase { indicesPrivileges.build().writeTo(out); out.close(); in = out.bytes().streamInput(); - in.setVersion(Version.V_5_0_0); + in.setVersion(Version.V_6_0_0); RoleDescriptor.IndicesPrivileges readIndicesPrivileges2 = RoleDescriptor.IndicesPrivileges.createFrom(in); assertEquals(readIndicesPrivileges, readIndicesPrivileges2); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java index fe180c9c5cc..efe154f8d78 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java @@ -11,10 +11,20 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.indices.IndicesQueryCache; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; @@ -24,6 +34,12 @@ import org.junit.Before; import java.io.IOException; import java.util.HashSet; +import static org.mockito.Matchers.same; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + /** Simple tests for opt out query cache*/ public class OptOutQueryCacheTests extends ESTestCase { IndexSearcher searcher; @@ -50,7 +66,7 @@ public class OptOutQueryCacheTests extends ESTestCase { BooleanQuery.Builder builder = new BooleanQuery.Builder(); builder.add(new TermQuery(new Term("foo", "bar")), BooleanClause.Occur.MUST); builder.add(new TermQuery(new Term("no", "baz")), BooleanClause.Occur.MUST_NOT); - Weight weight = builder.build().createWeight(searcher, false, 1f); + Weight weight = builder.build().createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, 1f); // whenever the allowed fields match the fields in the query and we do not deny access to any fields we allow caching. IndicesAccessControl.IndexAccessControl permissions = new IndicesAccessControl.IndexAccessControl(true, @@ -107,6 +123,88 @@ public class OptOutQueryCacheTests extends ESTestCase { assertFalse(OptOutQueryCache.cachingIsSafe(weight, permissions)); } + public void testOptOutQueryCacheSecurityIsNotEnabled() { + final Settings.Builder settings = Settings.builder() + .put("index.version.created", Version.CURRENT) + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0); + final IndexMetaData indexMetaData = IndexMetaData.builder("index").settings(settings).build(); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, Settings.EMPTY); + final IndicesQueryCache indicesQueryCache = mock(IndicesQueryCache.class); + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isSecurityEnabled()).thenReturn(false); + when(licenseState.isAuthAllowed()).thenReturn(randomBoolean()); + final OptOutQueryCache cache = new OptOutQueryCache(indexSettings, indicesQueryCache, threadContext, licenseState); + final Weight weight = mock(Weight.class); + final QueryCachingPolicy policy = mock(QueryCachingPolicy.class); + cache.doCache(weight, policy); + verify(indicesQueryCache).doCache(same(weight), same(policy)); + } + + public void testOptOutQueryCacheAuthIsNotAllowed() { + final Settings.Builder settings = Settings.builder() + .put("index.version.created", Version.CURRENT) + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0); + final IndexMetaData indexMetaData = IndexMetaData.builder("index").settings(settings).build(); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, Settings.EMPTY); + final IndicesQueryCache indicesQueryCache = mock(IndicesQueryCache.class); + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isSecurityEnabled()).thenReturn(randomBoolean()); + when(licenseState.isAuthAllowed()).thenReturn(false); + final OptOutQueryCache cache = new OptOutQueryCache(indexSettings, indicesQueryCache, threadContext, licenseState); + final Weight weight = mock(Weight.class); + final QueryCachingPolicy policy = mock(QueryCachingPolicy.class); + cache.doCache(weight, policy); + verify(indicesQueryCache).doCache(same(weight), same(policy)); + } + + public void testOptOutQueryCacheNoIndicesPermissions() { + final Settings.Builder settings = Settings.builder() + .put("index.version.created", Version.CURRENT) + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0); + final IndexMetaData indexMetaData = IndexMetaData.builder("index").settings(settings).build(); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, Settings.EMPTY); + final IndicesQueryCache indicesQueryCache = mock(IndicesQueryCache.class); + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isSecurityEnabled()).thenReturn(true); + when(licenseState.isAuthAllowed()).thenReturn(true); + final OptOutQueryCache cache = new OptOutQueryCache(indexSettings, indicesQueryCache, threadContext, licenseState); + final Weight weight = mock(Weight.class); + final QueryCachingPolicy policy = mock(QueryCachingPolicy.class); + final Weight w = cache.doCache(weight, policy); + assertSame(w, weight); + verifyNoMoreInteractions(indicesQueryCache); + } + + public void testOptOutQueryCacheIndexDoesNotHaveFieldLevelSecurity() { + final Settings.Builder settings = Settings.builder() + .put("index.version.created", Version.CURRENT) + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0); + final IndexMetaData indexMetaData = IndexMetaData.builder("index").settings(settings).build(); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, Settings.EMPTY); + final IndicesQueryCache indicesQueryCache = mock(IndicesQueryCache.class); + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final IndicesAccessControl.IndexAccessControl indexAccessControl = mock(IndicesAccessControl.IndexAccessControl.class); + when(indexAccessControl.getFieldPermissions()).thenReturn(new FieldPermissions()); + final IndicesAccessControl indicesAccessControl = mock(IndicesAccessControl.class); + when(indicesAccessControl.getIndexPermissions("index")).thenReturn(indexAccessControl); + threadContext.putTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, indicesAccessControl); + final XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isSecurityEnabled()).thenReturn(true); + when(licenseState.isAuthAllowed()).thenReturn(true); + final OptOutQueryCache cache = new OptOutQueryCache(indexSettings, indicesQueryCache, threadContext, licenseState); + final Weight weight = mock(Weight.class); + final QueryCachingPolicy policy = mock(QueryCachingPolicy.class); + cache.doCache(weight, policy); + verify(indicesQueryCache).doCache(same(weight), same(policy)); + } + private static FieldPermissionsDefinition fieldPermissionDef(String[] granted, String[] denied) { return new FieldPermissionsDefinition(granted, denied); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java index a2c70db3b63..4e5271c520a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; @@ -55,7 +56,6 @@ import java.util.List; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; -import static org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.contains; @@ -259,7 +259,7 @@ public class NativeRolesStoreTests extends ESTestCase { .put(IndexMetaData.builder(securityIndexName).settings(settings)) .put(new IndexTemplateMetaData(SecurityIndexManager.SECURITY_TEMPLATE_NAME, 0, 0, Collections.singletonList(securityIndexName), Settings.EMPTY, ImmutableOpenMap.of(), - ImmutableOpenMap.of(), ImmutableOpenMap.of())) + ImmutableOpenMap.of())) .build(); if (withAlias) { @@ -267,8 +267,8 @@ public class NativeRolesStoreTests extends ESTestCase { } Index index = new Index(securityIndexName, UUID.randomUUID().toString()); - ShardRouting shardRouting = ShardRouting.newUnassigned(new ShardId(index, 0), true, EXISTING_STORE_INSTANCE, - new UnassignedInfo(Reason.INDEX_CREATED, "")); + ShardRouting shardRouting = ShardRouting.newUnassigned(new ShardId(index, 0), true, + RecoverySource.ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(Reason.INDEX_CREATED, "")); IndexShardRoutingTable table = new IndexShardRoutingTable.Builder(new ShardId(index, 0)) .addShard(shardRouting.initialize(randomAlphaOfLength(8), null, shardRouting.getExpectedShardSize()).moveToStarted()) .build(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ingest/SetSecurityUserProcessorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ingest/SetSecurityUserProcessorTests.java index 05c2882f3ac..26c59a1ef54 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ingest/SetSecurityUserProcessorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ingest/SetSecurityUserProcessorTests.java @@ -11,7 +11,7 @@ import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationField; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.ingest.SetSecurityUserProcessor.Property; import java.util.Collections; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java index 7d10198c6ae..76e84f83137 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; @@ -50,7 +51,6 @@ import org.elasticsearch.xpack.core.template.TemplateUtils; import org.hamcrest.Matchers; import org.junit.Before; -import static org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_TEMPLATE_NAME; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.TEMPLATE_VERSION_PATTERN; @@ -106,8 +106,8 @@ public class SecurityIndexManagerTests extends ESTestCase { final ClusterState.Builder clusterStateBuilder = createClusterState(INDEX_NAME, TEMPLATE_NAME); Index index = new Index(INDEX_NAME, UUID.randomUUID().toString()); - ShardRouting shardRouting = ShardRouting.newUnassigned(new ShardId(index, 0), true, EXISTING_STORE_INSTANCE, - new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")); + ShardRouting shardRouting = ShardRouting.newUnassigned(new ShardId(index, 0), true, + RecoverySource.ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")); String nodeId = ESTestCase.randomAlphaOfLength(8); IndexShardRoutingTable table = new IndexShardRoutingTable.Builder(new ShardId(index, 0)) .addShard(shardRouting.initialize(nodeId, null, shardRouting.getExpectedShardSize()) @@ -165,7 +165,8 @@ public class SecurityIndexManagerTests extends ESTestCase { clusterStateBuilder.routingTable(RoutingTable.builder() .add(IndexRoutingTable.builder(prevIndex) .addIndexShard(new IndexShardRoutingTable.Builder(new ShardId(prevIndex, 0)) - .addShard(ShardRouting.newUnassigned(new ShardId(prevIndex, 0), true, EXISTING_STORE_INSTANCE, + .addShard(ShardRouting.newUnassigned(new ShardId(prevIndex, 0), true, + RecoverySource.ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")) .initialize(UUIDs.randomBase64UUID(random()), null, 0L) .moveToUnassigned(new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, ""))) @@ -347,10 +348,10 @@ public class SecurityIndexManagerTests extends ESTestCase { assertTrue(SecurityIndexManager.checkTemplateExistsAndVersionMatches( SecurityIndexManager.SECURITY_TEMPLATE_NAME, clusterState, logger, - Version.V_5_0_0::before)); + Version.V_6_0_0::before)); assertFalse(SecurityIndexManager.checkTemplateExistsAndVersionMatches( SecurityIndexManager.SECURITY_TEMPLATE_NAME, clusterState, logger, - Version.V_5_0_0::after)); + Version.V_6_0_0::after)); } public void testUpToDateMappingsAreIdentifiedAsUpToDate() throws IOException { @@ -448,4 +449,4 @@ public class SecurityIndexManagerTests extends ESTestCase { } return templateBuilder; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityTestUtils.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityTestUtils.java index aa4982cce3f..12474b7a04d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityTestUtils.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityTestUtils.java @@ -5,21 +5,17 @@ */ package org.elasticsearch.xpack.security.test; -import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.cluster.health.ClusterIndexHealth; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.RecoverySource.ExistingStoreRecoverySource; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.io.Streams; -import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; @@ -39,9 +35,7 @@ import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; import static java.nio.file.StandardOpenOption.CREATE; import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; import static java.nio.file.StandardOpenOption.WRITE; -import static org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; -import static org.junit.Assert.assertEquals; public class SecurityTestUtils { @@ -74,7 +68,7 @@ public class SecurityTestUtils { public static RoutingTable buildIndexRoutingTable(String indexName) { Index index = new Index(indexName, UUID.randomUUID().toString()); - ShardRouting shardRouting = ShardRouting.newUnassigned(new ShardId(index, 0), true, EXISTING_STORE_INSTANCE, + ShardRouting shardRouting = ShardRouting.newUnassigned(new ShardId(index, 0), true, ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")); String nodeId = ESTestCase.randomAlphaOfLength(8); IndexShardRoutingTable table = new IndexShardRoutingTable.Builder(new ShardId(index, 0)) @@ -95,60 +89,4 @@ public class SecurityTestUtils { metaDataBuilder.put(IndexMetaData.builder(indexMetaData).putAlias(aliasMetaData)); return metaDataBuilder.build(); } - - public static ClusterIndexHealth getClusterIndexHealth(ClusterHealthStatus status) { - IndexMetaData metaData = IndexMetaData.builder("foo").settings(Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .build()) - .build(); - final IndexRoutingTable routingTable; - switch (status) { - case RED: - routingTable = IndexRoutingTable.builder(metaData.getIndex()) - .addIndexShard(new IndexShardRoutingTable.Builder(new ShardId(metaData.getIndex(), 0)) - .addShard(ShardRouting.newUnassigned(new ShardId(metaData.getIndex(), 0), true, EXISTING_STORE_INSTANCE, - new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")) - .initialize(ESTestCase.randomAlphaOfLength(8), null, 0L)) - .addShard(ShardRouting.newUnassigned(new ShardId(metaData.getIndex(), 0), false, - RecoverySource.PeerRecoverySource.INSTANCE, - new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")) - .initialize(ESTestCase.randomAlphaOfLength(8), null, 0L)) - .build()) - .build(); - break; - case YELLOW: - routingTable = IndexRoutingTable.builder(metaData.getIndex()) - .addIndexShard(new IndexShardRoutingTable.Builder(new ShardId(metaData.getIndex(), 0)) - .addShard(ShardRouting.newUnassigned(new ShardId(metaData.getIndex(), 0), true, EXISTING_STORE_INSTANCE, - new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")) - .initialize(ESTestCase.randomAlphaOfLength(8), null, 0L).moveToStarted()) - .addShard(ShardRouting.newUnassigned(new ShardId(metaData.getIndex(), 0), false, - RecoverySource.PeerRecoverySource.INSTANCE, - new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")) - .initialize(ESTestCase.randomAlphaOfLength(8), null, 0L)) - .build()) - .build(); - break; - case GREEN: - routingTable = IndexRoutingTable.builder(metaData.getIndex()) - .addIndexShard(new IndexShardRoutingTable.Builder(new ShardId(metaData.getIndex(), 0)) - .addShard(ShardRouting.newUnassigned(new ShardId(metaData.getIndex(), 0), true, EXISTING_STORE_INSTANCE, - new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")) - .initialize(ESTestCase.randomAlphaOfLength(8), null, 0L).moveToStarted()) - .addShard(ShardRouting.newUnassigned(new ShardId(metaData.getIndex(), 0), false, - RecoverySource.PeerRecoverySource.INSTANCE, - new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")) - .initialize(ESTestCase.randomAlphaOfLength(8), null, 0L).moveToStarted()) - .build()) - .build(); - break; - default: - throw new IllegalStateException("unknown status: " + status); - } - ClusterIndexHealth health = new ClusterIndexHealth(metaData, routingTable); - assertEquals(status, health.getStatus()); - return health; - } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java index 09072f99fc2..dd7dda48ae8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java @@ -33,7 +33,7 @@ import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.AuthenticationService; import org.elasticsearch.xpack.security.authz.AuthorizationService; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java index 08a991eb3ec..17df337d291 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java @@ -27,9 +27,8 @@ import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; -import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.authc.AuthenticationService; import org.elasticsearch.xpack.security.authz.AuthorizationService; @@ -37,12 +36,10 @@ import org.junit.Before; import java.io.IOException; import java.util.Collections; -import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.mock.orig.Mockito.times; import static org.elasticsearch.xpack.core.security.support.Exceptions.authenticationError; import static org.elasticsearch.xpack.core.security.support.Exceptions.authorizationError; -import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.equalTo; import static org.mockito.Matchers.any; import static org.mockito.Matchers.eq; @@ -220,47 +217,6 @@ public class ServerTransportFilterTests extends ESTestCase { verifyNoMoreInteractions(authcService, authzService); } - public void testHandlesKibanaUserCompatibility() throws Exception { - TransportRequest request = mock(TransportRequest.class); - User user = new User("kibana", "kibana"); - Authentication authentication = mock(Authentication.class); - final Version version = Version.fromId(randomIntBetween(Version.V_5_0_0_ID, Version.V_5_2_0_ID - 100)); - when(authentication.getVersion()).thenReturn(version); - when(authentication.getUser()).thenReturn(user); - doAnswer((i) -> { - ActionListener callback = - (ActionListener) i.getArguments()[3]; - callback.onResponse(authentication); - return Void.TYPE; - }).when(authcService).authenticate(eq("_action"), eq(request), eq((User)null), any(ActionListener.class)); - AtomicReference rolesRef = new AtomicReference<>(); - final Role empty = Role.EMPTY; - doAnswer((i) -> { - ActionListener callback = - (ActionListener) i.getArguments()[1]; - rolesRef.set(((User) i.getArguments()[0]).roles()); - callback.onResponse(empty); - return Void.TYPE; - }).when(authzService).roles(any(User.class), any(ActionListener.class)); - ServerTransportFilter filter = getClientOrNodeFilter(); - PlainActionFuture future = new PlainActionFuture<>(); - when(channel.getVersion()).thenReturn(version); - filter.inbound("_action", request, channel, future); - assertNotNull(rolesRef.get()); - assertThat(rolesRef.get(), arrayContaining("kibana_system")); - - // test with a version that doesn't need changing - filter = getClientOrNodeFilter(); - rolesRef.set(null); - user = new KibanaUser(true); - when(authentication.getUser()).thenReturn(user); - when(authentication.getVersion()).thenReturn(Version.V_5_2_0); - future = new PlainActionFuture<>(); - filter.inbound("_action", request, channel, future); - assertNotNull(rolesRef.get()); - assertThat(rolesRef.get(), arrayContaining("kibana_system")); - } - private ServerTransportFilter getClientOrNodeFilter() throws IOException { return randomBoolean() ? getNodeFilter(true) : getClientFilter(true); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4TransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4TransportTests.java new file mode 100644 index 00000000000..5181f3a747e --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4TransportTests.java @@ -0,0 +1,383 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.netty4; + +import io.netty.bootstrap.ServerBootstrap; +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.handler.ssl.SslHandler; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.AbstractSimpleTransportTestCase; +import org.elasticsearch.transport.BindTransportException; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.ConnectionProfile; +import org.elasticsearch.transport.TcpChannel; +import org.elasticsearch.transport.TcpTransport; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.common.socket.SocketAccess; +import org.elasticsearch.xpack.core.security.transport.netty4.SecurityNetty4Transport; +import org.elasticsearch.xpack.core.ssl.SSLConfiguration; +import org.elasticsearch.xpack.core.ssl.SSLService; + +import javax.net.SocketFactory; +import javax.net.ssl.HandshakeCompletedListener; +import javax.net.ssl.SNIHostName; +import javax.net.ssl.SNIMatcher; +import javax.net.ssl.SNIServerName; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLParameters; +import javax.net.ssl.SSLSocket; +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketTimeoutException; +import java.net.UnknownHostException; +import java.nio.file.Path; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.elasticsearch.xpack.core.security.SecurityField.setting; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; + +public class SimpleSecurityNetty4TransportTests extends AbstractSimpleTransportTestCase { + + private static final ConnectionProfile SINGLE_CHANNEL_PROFILE; + + static { + ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); + builder.addConnections(1, + TransportRequestOptions.Type.BULK, + TransportRequestOptions.Type.PING, + TransportRequestOptions.Type.RECOVERY, + TransportRequestOptions.Type.REG, + TransportRequestOptions.Type.STATE); + SINGLE_CHANNEL_PROFILE = builder.build(); + } + + private SSLService createSSLService() { + Path testnodeCert = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt"); + Path testnodeKey = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem"); + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("xpack.ssl.secure_key_passphrase", "testnode"); + Settings settings = Settings.builder() + .put("xpack.security.transport.ssl.enabled", true) + .put("xpack.ssl.key", testnodeKey) + .put("xpack.ssl.certificate", testnodeCert) + .put("path.home", createTempDir()) + .setSecureSettings(secureSettings) + .build(); + try { + return new SSLService(settings, TestEnvironment.newEnvironment(settings)); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public MockTransportService nettyFromThreadPool(Settings settings, ThreadPool threadPool, final Version version, + ClusterSettings clusterSettings, boolean doHandshake) { + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); + NetworkService networkService = new NetworkService(Collections.emptyList()); + Settings settings1 = Settings.builder() + .put(settings) + .put("xpack.security.transport.ssl.enabled", true).build(); + Transport transport = new SecurityNetty4Transport(settings1, threadPool, + networkService, BigArrays.NON_RECYCLING_INSTANCE, namedWriteableRegistry, + new NoneCircuitBreakerService(), createSSLService()) { + + @Override + protected Version executeHandshake(DiscoveryNode node, TcpChannel channel, TimeValue timeout) throws IOException, + InterruptedException { + if (doHandshake) { + return super.executeHandshake(node, channel, timeout); + } else { + return version.minimumCompatibilityVersion(); + } + } + + @Override + protected Version getCurrentVersion() { + return version; + } + + }; + MockTransportService mockTransportService = + MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings, + Collections.emptySet()); + mockTransportService.start(); + return mockTransportService; + } + + @Override + protected MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings, boolean doHandshake) { + settings = Settings.builder().put(settings) + .put(TcpTransport.PORT.getKey(), "0") + .build(); + MockTransportService transportService = nettyFromThreadPool(settings, threadPool, version, clusterSettings, doHandshake); + transportService.start(); + return transportService; + } + + public void testConnectException() throws UnknownHostException { + try { + serviceA.connectToNode(new DiscoveryNode("C", new TransportAddress(InetAddress.getByName("localhost"), 9876), + emptyMap(), emptySet(), Version.CURRENT)); + fail("Expected ConnectTransportException"); + } catch (ConnectTransportException e) { + assertThat(e.getMessage(), containsString("connect_exception")); + assertThat(e.getMessage(), containsString("[127.0.0.1:9876]")); + Throwable cause = e.getCause(); + assertThat(cause, instanceOf(IOException.class)); + } + } + + public void testBindUnavailableAddress() { + // this is on a lower level since it needs access to the TransportService before it's started + int port = serviceA.boundAddress().publishAddress().getPort(); + Settings settings = Settings.builder() + .put(Node.NODE_NAME_SETTING.getKey(), "foobar") + .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "") + .put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") + .put("transport.tcp.port", port) + .build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + BindTransportException bindTransportException = expectThrows(BindTransportException.class, () -> { + MockTransportService transportService = nettyFromThreadPool(settings, threadPool, Version.CURRENT, clusterSettings, true); + try { + transportService.start(); + } finally { + transportService.stop(); + transportService.close(); + } + }); + assertEquals("Failed to bind to [" + port + "]", bindTransportException.getMessage()); + } + + @SuppressForbidden(reason = "Need to open socket connection") + public void testRenegotiation() throws Exception { + SSLService sslService = createSSLService(); + final SSLConfiguration sslConfiguration = sslService.getSSLConfiguration("xpack.ssl"); + SocketFactory factory = sslService.sslSocketFactory(sslConfiguration); + try (SSLSocket socket = (SSLSocket) factory.createSocket()) { + SocketAccess.doPrivileged(() -> socket.connect(serviceA.boundAddress().publishAddress().address())); + + CountDownLatch handshakeLatch = new CountDownLatch(1); + HandshakeCompletedListener firstListener = event -> handshakeLatch.countDown(); + socket.addHandshakeCompletedListener(firstListener); + socket.startHandshake(); + handshakeLatch.await(); + socket.removeHandshakeCompletedListener(firstListener); + + OutputStreamStreamOutput stream = new OutputStreamStreamOutput(socket.getOutputStream()); + stream.writeByte((byte) 'E'); + stream.writeByte((byte) 'S'); + stream.writeInt(-1); + stream.flush(); + + socket.startHandshake(); + CountDownLatch renegotiationLatch = new CountDownLatch(1); + HandshakeCompletedListener secondListener = event -> renegotiationLatch.countDown(); + socket.addHandshakeCompletedListener(secondListener); + + AtomicReference error = new AtomicReference<>(); + CountDownLatch catchReadErrorsLatch = new CountDownLatch(1); + Thread renegotiationThread = new Thread(() -> { + try { + socket.setSoTimeout(50); + socket.getInputStream().read(); + } catch (SocketTimeoutException e) { + // Ignore. We expect a timeout. + } catch (IOException e) { + error.set(e); + } finally { + catchReadErrorsLatch.countDown(); + } + }); + renegotiationThread.start(); + renegotiationLatch.await(); + socket.removeHandshakeCompletedListener(secondListener); + catchReadErrorsLatch.await(); + + assertNull(error.get()); + + stream.writeByte((byte) 'E'); + stream.writeByte((byte) 'S'); + stream.writeInt(-1); + stream.flush(); + } + } + + // TODO: These tests currently rely on plaintext transports + + @Override + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33285") + public void testTcpHandshake() { + } + + // TODO: These tests as configured do not currently work with the security transport + + @Override + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33285") + public void testTransportProfilesWithPortAndHost() { + } + + public void testSNIServerNameIsPropagated() throws Exception { + SSLService sslService = createSSLService(); + final ServerBootstrap serverBootstrap = new ServerBootstrap(); + boolean success = false; + try { + serverBootstrap.group(new NioEventLoopGroup(1)); + serverBootstrap.channel(NioServerSocketChannel.class); + + final String sniIp = "sni-hostname"; + final SNIHostName sniHostName = new SNIHostName(sniIp); + final CountDownLatch latch = new CountDownLatch(2); + serverBootstrap.childHandler(new ChannelInitializer() { + + @Override + protected void initChannel(Channel ch) { + SSLEngine serverEngine = sslService.createSSLEngine(sslService.getSSLConfiguration(setting("transport.ssl.")), + null, -1); + serverEngine.setUseClientMode(false); + SSLParameters sslParameters = serverEngine.getSSLParameters(); + sslParameters.setSNIMatchers(Collections.singletonList(new SNIMatcher(0) { + @Override + public boolean matches(SNIServerName sniServerName) { + if (sniHostName.equals(sniServerName)) { + latch.countDown(); + return true; + } else { + return false; + } + } + })); + serverEngine.setSSLParameters(sslParameters); + final SslHandler sslHandler = new SslHandler(serverEngine); + sslHandler.handshakeFuture().addListener(future -> latch.countDown()); + ch.pipeline().addFirst("sslhandler", sslHandler); + } + }); + serverBootstrap.validate(); + ChannelFuture serverFuture = serverBootstrap.bind(getLocalEphemeral()); + serverFuture.await(); + InetSocketAddress serverAddress = (InetSocketAddress) serverFuture.channel().localAddress(); + + try (MockTransportService serviceC = build( + Settings.builder() + .put("name", "TS_TEST") + .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "") + .put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") + .build(), + version0, + null, true)) { + serviceC.acceptIncomingRequests(); + + HashMap attributes = new HashMap<>(); + attributes.put("server_name", sniIp); + DiscoveryNode node = new DiscoveryNode("server_node_id", new TransportAddress(serverAddress), attributes, + EnumSet.allOf(DiscoveryNode.Role.class), Version.CURRENT); + + new Thread(() -> { + try { + serviceC.connectToNode(node, SINGLE_CHANNEL_PROFILE); + } catch (ConnectTransportException ex) { + // Ignore. The other side is not setup to do the ES handshake. So this will fail. + } + }).start(); + + latch.await(); + serverBootstrap.config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS); + success = true; + } + } finally { + if (success == false) { + serverBootstrap.config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS); + } + } + } + + public void testInvalidSNIServerName() throws Exception { + SSLService sslService = createSSLService(); + final ServerBootstrap serverBootstrap = new ServerBootstrap(); + boolean success = false; + try { + serverBootstrap.group(new NioEventLoopGroup(1)); + serverBootstrap.channel(NioServerSocketChannel.class); + + final String sniIp = "invalid_hostname"; + serverBootstrap.childHandler(new ChannelInitializer() { + + @Override + protected void initChannel(Channel ch) { + SSLEngine serverEngine = sslService.createSSLEngine(sslService.getSSLConfiguration(setting("transport.ssl.")), + null, -1); + serverEngine.setUseClientMode(false); + final SslHandler sslHandler = new SslHandler(serverEngine); + ch.pipeline().addFirst("sslhandler", sslHandler); + } + }); + serverBootstrap.validate(); + ChannelFuture serverFuture = serverBootstrap.bind(getLocalEphemeral()); + serverFuture.await(); + InetSocketAddress serverAddress = (InetSocketAddress) serverFuture.channel().localAddress(); + + try (MockTransportService serviceC = build( + Settings.builder() + .put("name", "TS_TEST") + .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "") + .put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") + .build(), + version0, + null, true)) { + serviceC.acceptIncomingRequests(); + + HashMap attributes = new HashMap<>(); + attributes.put("server_name", sniIp); + DiscoveryNode node = new DiscoveryNode("server_node_id", new TransportAddress(serverAddress), attributes, + EnumSet.allOf(DiscoveryNode.Role.class), Version.CURRENT); + + ConnectTransportException connectException = expectThrows(ConnectTransportException.class, + () -> serviceC.connectToNode(node, SINGLE_CHANNEL_PROFILE)); + + assertThat(connectException.getMessage(), containsString("invalid DiscoveryNode server_name [invalid_hostname]")); + + serverBootstrap.config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS); + success = true; + } + } finally { + if (success == false) { + serverBootstrap.config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS); + } + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java index 7397ebc8c7d..1b8e35651b6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java @@ -208,7 +208,14 @@ public class SimpleSecurityNioTransportTests extends AbstractSimpleTransportTest // TODO: These tests currently rely on plaintext transports @Override - @AwaitsFix(bugUrl = "") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33285") public void testTcpHandshake() throws IOException, InterruptedException { } + + // TODO: These tests as configured do not currently work with the security transport + + @Override + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33285") + public void testTransportProfilesWithPortAndHost() { + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/AnonymousUserTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/AnonymousUserTests.java index 32816e40e08..4c72afeb5ce 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/AnonymousUserTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/AnonymousUserTests.java @@ -9,7 +9,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.user.AnonymousUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.equalTo; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/UserSerializationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/UserSerializationTests.java index 6bea620982f..68b54198980 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/UserSerializationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/UserSerializationTests.java @@ -5,15 +5,13 @@ */ package org.elasticsearch.xpack.security.user; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.InternalUserSerializationHelper; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import java.util.Arrays; @@ -60,46 +58,6 @@ public class UserSerializationTests extends ESTestCase { assertThat(readFromAuthUser.authenticatedUser(), is(authUser)); } - public void testRunAsBackcompatRead() throws Exception { - User user = new User(randomAlphaOfLengthBetween(4, 30), - randomBoolean() ? generateRandomStringArray(20, 30, false) : null); - // store the runAs user as the "authenticationUser" here to mimic old format for writing - User authUser = new User(randomAlphaOfLengthBetween(4, 30), generateRandomStringArray(20, 30, false), user); - - BytesStreamOutput output = new BytesStreamOutput(); - User.writeTo(authUser, output); - StreamInput input = output.bytes().streamInput(); - input.setVersion(randomFrom(Version.V_5_0_0, Version.V_5_4_0)); - User readFrom = User.readFrom(input); - - assertThat(readFrom.principal(), is(user.principal())); - assertThat(Arrays.equals(readFrom.roles(), user.roles()), is(true)); - User readFromAuthUser = readFrom.authenticatedUser(); - assertThat(authUser, is(notNullValue())); - assertThat(readFromAuthUser.principal(), is(authUser.principal())); - assertThat(Arrays.equals(readFromAuthUser.roles(), authUser.roles()), is(true)); - } - - public void testRunAsBackcompatWrite() throws Exception { - User user = new User(randomAlphaOfLengthBetween(4, 30), - randomBoolean() ? generateRandomStringArray(20, 30, false) : null); - // store the runAs user as the "authenticationUser" here to mimic old format for writing - User authUser = new User(randomAlphaOfLengthBetween(4, 30), generateRandomStringArray(20, 30, false), user); - - BytesStreamOutput output = new BytesStreamOutput(); - output.setVersion(randomFrom(Version.V_5_0_0, Version.V_5_4_0)); - User.writeTo(authUser, output); - StreamInput input = output.bytes().streamInput(); - User readFrom = User.readFrom(input); - - assertThat(readFrom.principal(), is(user.principal())); - assertThat(Arrays.equals(readFrom.roles(), user.roles()), is(true)); - User readFromAuthUser = readFrom.authenticatedUser(); - assertThat(authUser, is(notNullValue())); - assertThat(readFromAuthUser.principal(), is(authUser.principal())); - assertThat(Arrays.equals(readFromAuthUser.roles(), authUser.roles()), is(true)); - } - public void testSystemUserReadAndWrite() throws Exception { BytesStreamOutput output = new BytesStreamOutput(); diff --git a/x-pack/plugin/sql/build.gradle b/x-pack/plugin/sql/build.gradle index 039e78c1495..62097e76b97 100644 --- a/x-pack/plugin/sql/build.gradle +++ b/x-pack/plugin/sql/build.gradle @@ -19,7 +19,8 @@ archivesBaseName = 'x-pack-sql' integTest.enabled = false dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') compileOnly(project(':modules:lang-painless')) { // exclude ASM to not affect featureAware task on Java 10+ exclude group: "org.ow2.asm" diff --git a/x-pack/plugin/sql/jdbc/build.gradle b/x-pack/plugin/sql/jdbc/build.gradle index a0d9b24c507..1a7d6115e15 100644 --- a/x-pack/plugin/sql/jdbc/build.gradle +++ b/x-pack/plugin/sql/jdbc/build.gradle @@ -8,7 +8,7 @@ archivesBaseName = "x-pack-sql-jdbc" forbiddenApisMain { // does not depend on core, so only jdk and http signatures should be checked - signaturesURLs = [this.class.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } dependencies { diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java index 201ae251ca0..ebdeaef15ca 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java @@ -133,72 +133,37 @@ class JdbcResultSet implements ResultSet, JdbcWrapper { @Override public boolean getBoolean(int columnIndex) throws SQLException { - Object val = column(columnIndex); - try { - return val != null ? (Boolean) val : false; - } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to a boolean", cce); - } + return column(columnIndex) != null ? getObject(columnIndex, Boolean.class) : false; } @Override public byte getByte(int columnIndex) throws SQLException { - Object val = column(columnIndex); - try { - return val != null ? ((Number) val).byteValue() : 0; - } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to a byte", cce); - } + return column(columnIndex) != null ? getObject(columnIndex, Byte.class) : 0; } @Override public short getShort(int columnIndex) throws SQLException { - Object val = column(columnIndex); - try { - return val != null ? ((Number) val).shortValue() : 0; - } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to a short", cce); - } + return column(columnIndex) != null ? getObject(columnIndex, Short.class) : 0; } @Override public int getInt(int columnIndex) throws SQLException { - Object val = column(columnIndex); - try { - return val != null ? ((Number) val).intValue() : 0; - } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to an int", cce); - } + return column(columnIndex) != null ? getObject(columnIndex, Integer.class) : 0; } @Override public long getLong(int columnIndex) throws SQLException { - Object val = column(columnIndex); - try { - return val != null ? ((Number) val).longValue() : 0; - } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to a long", cce); - } + return column(columnIndex) != null ? getObject(columnIndex, Long.class) : 0; } @Override public float getFloat(int columnIndex) throws SQLException { - Object val = column(columnIndex); - try { - return val != null ? ((Number) val).floatValue() : 0; - } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to a float", cce); - } + return column(columnIndex) != null ? getObject(columnIndex, Float.class) : 0; } @Override public double getDouble(int columnIndex) throws SQLException { - Object val = column(columnIndex); - try { - return val != null ? ((Number) val).doubleValue() : 0; - } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to a double", cce); - } + return column(columnIndex) != null ? getObject(columnIndex, Double.class) : 0; } @Override @@ -272,15 +237,29 @@ class JdbcResultSet implements ResultSet, JdbcWrapper { @Override public Date getDate(String columnLabel) throws SQLException { + // TODO: the error message in case the value in the column cannot be converted to a Date refers to a column index + // (for example - "unable to convert column 4 to a long") and not to the column name, which is a bit confusing. + // Should we reconsider this? Maybe by catching the exception here and rethrowing it with the columnLabel instead. return getDate(column(columnLabel)); } private Long dateTime(int columnIndex) throws SQLException { Object val = column(columnIndex); + JDBCType type = cursor.columns().get(columnIndex - 1).type; try { + // TODO: the B6 appendix of the jdbc spec does mention CHAR, VARCHAR, LONGVARCHAR, DATE, TIMESTAMP as supported + // jdbc types that should be handled by getDate and getTime methods. From all of those we support VARCHAR and + // TIMESTAMP. Should we consider the VARCHAR conversion as a later enhancement? + if (JDBCType.TIMESTAMP.equals(type)) { + // the cursor can return an Integer if the date-since-epoch is small enough, XContentParser (Jackson) will + // return the "smallest" data type for numbers when parsing + // TODO: this should probably be handled server side + return val == null ? null : ((Number) val).longValue(); + }; return val == null ? null : (Long) val; } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to a long", cce); + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Long", val, type.getName()), cce); } } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java index 3b5180b71f7..7b638d8bd09 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java @@ -10,7 +10,6 @@ import org.elasticsearch.xpack.sql.type.DataType; import java.sql.Date; import java.sql.JDBCType; -import java.sql.SQLDataException; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; import java.sql.Time; @@ -56,9 +55,10 @@ final class TypeConverter { } - private static final long DAY_IN_MILLIS = 60 * 60 * 24; + private static final long DAY_IN_MILLIS = 60 * 60 * 24 * 1000; private static final Map, JDBCType> javaToJDBC; + static { Map, JDBCType> aMap = Arrays.stream(DataType.values()) .filter(dataType -> dataType.javaClass() != null @@ -120,6 +120,7 @@ final class TypeConverter { } } + static long convertFromCalendarToUTC(long value, Calendar cal) { if (cal == null) { return value; @@ -143,11 +144,15 @@ final class TypeConverter { return (T) convert(val, columnType); } - if (type.isInstance(val)) { + // converting a Long to a Timestamp shouldn't be possible according to the spec, + // it feels a little brittle to check this scenario here and I don't particularly like it + // TODO: can we do any better or should we go over the spec and allow getLong(date) to be valid? + if (!(type == Long.class && columnType == JDBCType.TIMESTAMP) && type.isInstance(val)) { try { return type.cast(val); } catch (ClassCastException cce) { - throw new SQLDataException("Unable to convert " + val.getClass().getName() + " to " + columnType, cce); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a %s", val, + columnType.getName(), type.getName()), cce); } } @@ -205,7 +210,8 @@ final class TypeConverter { if (type == OffsetDateTime.class) { return (T) asOffsetDateTime(val, columnType); } - throw new SQLException("Conversion from type [" + columnType + "] to [" + type.getName() + "] not supported"); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a %s", val, + columnType.getName(), type.getName())); } /** @@ -336,8 +342,11 @@ final class TypeConverter { case FLOAT: case DOUBLE: return Boolean.valueOf(Integer.signum(((Number) val).intValue()) != 0); + case VARCHAR: + return Boolean.valueOf((String) val); default: - throw new SQLException("Conversion from type [" + columnType + "] to [Boolean] not supported"); + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Boolean", val, columnType.getName())); } } @@ -355,10 +364,16 @@ final class TypeConverter { case FLOAT: case DOUBLE: return safeToByte(safeToLong(((Number) val).doubleValue())); + case VARCHAR: + try { + return Byte.valueOf((String) val); + } catch (NumberFormatException e) { + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Byte", val), e); + } default: } - throw new SQLException("Conversion from type [" + columnType + "] to [Byte] not supported"); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Byte", val, columnType.getName())); } private static Short asShort(Object val, JDBCType columnType) throws SQLException { @@ -374,10 +389,16 @@ final class TypeConverter { case FLOAT: case DOUBLE: return safeToShort(safeToLong(((Number) val).doubleValue())); + case VARCHAR: + try { + return Short.valueOf((String) val); + } catch (NumberFormatException e) { + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Short", val), e); + } default: } - throw new SQLException("Conversion from type [" + columnType + "] to [Short] not supported"); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Short", val, columnType.getName())); } private static Integer asInteger(Object val, JDBCType columnType) throws SQLException { @@ -393,10 +414,18 @@ final class TypeConverter { case FLOAT: case DOUBLE: return safeToInt(safeToLong(((Number) val).doubleValue())); + case VARCHAR: + try { + return Integer.valueOf((String) val); + } catch (NumberFormatException e) { + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to an Integer", val), e); + } default: } - throw new SQLException("Conversion from type [" + columnType + "] to [Integer] not supported"); + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to an Integer", val, columnType.getName())); } private static Long asLong(Object val, JDBCType columnType) throws SQLException { @@ -412,12 +441,21 @@ final class TypeConverter { case FLOAT: case DOUBLE: return safeToLong(((Number) val).doubleValue()); - case TIMESTAMP: - return ((Number) val).longValue(); + //TODO: should we support conversion to TIMESTAMP? + //The spec says that getLong() should support the following types conversions: + //TINYINT, SMALLINT, INTEGER, BIGINT, REAL, FLOAT, DOUBLE, DECIMAL, NUMERIC, BIT, BOOLEAN, CHAR, VARCHAR, LONGVARCHAR + //case TIMESTAMP: + // return ((Number) val).longValue(); + case VARCHAR: + try { + return Long.valueOf((String) val); + } catch (NumberFormatException e) { + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Long", val), e); + } default: } - throw new SQLException("Conversion from type [" + columnType + "] to [Long] not supported"); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Long", val, columnType.getName())); } private static Float asFloat(Object val, JDBCType columnType) throws SQLException { @@ -433,10 +471,16 @@ final class TypeConverter { case FLOAT: case DOUBLE: return Float.valueOf((((float) ((Number) val).doubleValue()))); + case VARCHAR: + try { + return Float.valueOf((String) val); + } catch (NumberFormatException e) { + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Float", val), e); + } default: } - throw new SQLException("Conversion from type [" + columnType + "] to [Float] not supported"); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Float", val, columnType.getName())); } private static Double asDouble(Object val, JDBCType columnType) throws SQLException { @@ -451,32 +495,41 @@ final class TypeConverter { case REAL: case FLOAT: case DOUBLE: + return Double.valueOf(((Number) val).doubleValue()); + case VARCHAR: + try { + return Double.valueOf((String) val); + } catch (NumberFormatException e) { + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Double", val), e); + } default: } - throw new SQLException("Conversion from type [" + columnType + "] to [Double] not supported"); + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Double", val, columnType.getName())); } private static Date asDate(Object val, JDBCType columnType) throws SQLException { if (columnType == JDBCType.TIMESTAMP) { return new Date(utcMillisRemoveTime(((Number) val).longValue())); } - throw new SQLException("Conversion from type [" + columnType + "] to [Date] not supported"); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Date", val, columnType.getName())); } private static Time asTime(Object val, JDBCType columnType) throws SQLException { if (columnType == JDBCType.TIMESTAMP) { return new Time(utcMillisRemoveDate(((Number) val).longValue())); } - throw new SQLException("Conversion from type [" + columnType + "] to [Time] not supported"); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Time", val, columnType.getName())); } private static Timestamp asTimestamp(Object val, JDBCType columnType) throws SQLException { if (columnType == JDBCType.TIMESTAMP) { return new Timestamp(((Number) val).longValue()); } - throw new SQLException("Conversion from type [" + columnType + "] to [Timestamp] not supported"); + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Timestamp", val, columnType.getName())); } private static byte[] asByteArray(Object val, JDBCType columnType) { diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatementTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatementTests.java index 9da06f6537c..35a3ec57487 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatementTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatementTests.java @@ -25,6 +25,7 @@ import java.util.Date; import java.util.Locale; import java.util.Map; +import static java.lang.String.format; import static java.sql.JDBCType.BIGINT; import static java.sql.JDBCType.BOOLEAN; import static java.sql.JDBCType.DOUBLE; @@ -68,7 +69,7 @@ public class JdbcPreparedStatementTests extends ESTestCase { JdbcPreparedStatement jps = createJdbcPreparedStatement(); SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, true, Types.TIMESTAMP)); - assertEquals("Conversion from type [BOOLEAN] to [Timestamp] not supported", sqle.getMessage()); + assertEquals("Unable to convert value [true] of type [BOOLEAN] to a Timestamp", sqle.getMessage()); } public void testSettingStringValues() throws SQLException { @@ -92,7 +93,7 @@ public class JdbcPreparedStatementTests extends ESTestCase { JdbcPreparedStatement jps = createJdbcPreparedStatement(); SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, "foo bar", Types.INTEGER)); - assertEquals("Conversion from type [VARCHAR] to [Integer] not supported", sqle.getMessage()); + assertEquals("Unable to convert value [foo bar] of type [VARCHAR] to an Integer", sqle.getMessage()); } public void testSettingByteTypeValues() throws SQLException { @@ -128,7 +129,7 @@ public class JdbcPreparedStatementTests extends ESTestCase { JdbcPreparedStatement jps = createJdbcPreparedStatement(); SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, (byte) 6, Types.TIMESTAMP)); - assertEquals("Conversion from type [TINYINT] to [Timestamp] not supported", sqle.getMessage()); + assertEquals("Unable to convert value [6] of type [TINYINT] to a Timestamp", sqle.getMessage()); } public void testSettingShortTypeValues() throws SQLException { @@ -161,7 +162,7 @@ public class JdbcPreparedStatementTests extends ESTestCase { JdbcPreparedStatement jps = createJdbcPreparedStatement(); SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, (short) 6, Types.TIMESTAMP)); - assertEquals("Conversion from type [SMALLINT] to [Timestamp] not supported", sqle.getMessage()); + assertEquals("Unable to convert value [6] of type [SMALLINT] to a Timestamp", sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> jps.setObject(1, 256, Types.TINYINT)); assertEquals("Numeric " + 256 + " out of range", sqle.getMessage()); @@ -195,7 +196,7 @@ public class JdbcPreparedStatementTests extends ESTestCase { int someInt = randomInt(); SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someInt, Types.TIMESTAMP)); - assertEquals("Conversion from type [INTEGER] to [Timestamp] not supported", sqle.getMessage()); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [INTEGER] to a Timestamp", someInt), sqle.getMessage()); Integer randomIntNotShort = randomIntBetween(32768, Integer.MAX_VALUE); sqle = expectThrows(SQLException.class, () -> jps.setObject(1, randomIntNotShort, Types.SMALLINT)); @@ -236,7 +237,7 @@ public class JdbcPreparedStatementTests extends ESTestCase { long someLong = randomLong(); SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someLong, Types.TIMESTAMP)); - assertEquals("Conversion from type [BIGINT] to [Timestamp] not supported", sqle.getMessage()); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [BIGINT] to a Timestamp", someLong), sqle.getMessage()); Long randomLongNotShort = randomLongBetween(Integer.MAX_VALUE + 1, Long.MAX_VALUE); sqle = expectThrows(SQLException.class, () -> jps.setObject(1, randomLongNotShort, Types.INTEGER)); @@ -277,7 +278,7 @@ public class JdbcPreparedStatementTests extends ESTestCase { float someFloat = randomFloat(); SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someFloat, Types.TIMESTAMP)); - assertEquals("Conversion from type [REAL] to [Timestamp] not supported", sqle.getMessage()); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [REAL] to a Timestamp", someFloat), sqle.getMessage()); Float floatNotInt = 5_155_000_000f; sqle = expectThrows(SQLException.class, () -> jps.setObject(1, floatNotInt, Types.INTEGER)); @@ -316,7 +317,8 @@ public class JdbcPreparedStatementTests extends ESTestCase { double someDouble = randomDouble(); SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someDouble, Types.TIMESTAMP)); - assertEquals("Conversion from type [DOUBLE] to [Timestamp] not supported", sqle.getMessage()); + assertEquals( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [DOUBLE] to a Timestamp", someDouble), sqle.getMessage()); Double doubleNotInt = 5_155_000_000d; sqle = expectThrows(SQLException.class, () -> jps.setObject(1, doubleNotInt, Types.INTEGER)); @@ -361,7 +363,7 @@ public class JdbcPreparedStatementTests extends ESTestCase { public void testSettingTimestampValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - Timestamp someTimestamp = new Timestamp(randomMillisSinceEpoch()); + Timestamp someTimestamp = new Timestamp(randomLong()); jps.setTimestamp(1, someTimestamp); assertEquals(someTimestamp.getTime(), ((Date)value(jps)).getTime()); assertEquals(TIMESTAMP, jdbcType(jps)); @@ -372,7 +374,7 @@ public class JdbcPreparedStatementTests extends ESTestCase { assertEquals(1456708675000L, convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal)); assertEquals(TIMESTAMP, jdbcType(jps)); - long beforeEpochTime = -randomMillisSinceEpoch(); + long beforeEpochTime = randomLongBetween(Long.MIN_VALUE, 0); jps.setTimestamp(1, new Timestamp(beforeEpochTime), nonDefaultCal); assertEquals(beforeEpochTime, convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal)); assertTrue(value(jps) instanceof java.util.Date); @@ -384,7 +386,7 @@ public class JdbcPreparedStatementTests extends ESTestCase { public void testThrownExceptionsWhenSettingTimestampValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - Timestamp someTimestamp = new Timestamp(randomMillisSinceEpoch()); + Timestamp someTimestamp = new Timestamp(randomLong()); SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, someTimestamp, Types.INTEGER)); assertEquals("Conversion from type java.sql.Timestamp to INTEGER not supported", sqle.getMessage()); @@ -416,12 +418,12 @@ public class JdbcPreparedStatementTests extends ESTestCase { public void testSettingSqlDateValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - java.sql.Date someSqlDate = new java.sql.Date(randomMillisSinceEpoch()); + java.sql.Date someSqlDate = new java.sql.Date(randomLong()); jps.setDate(1, someSqlDate); assertEquals(someSqlDate.getTime(), ((Date)value(jps)).getTime()); assertEquals(TIMESTAMP, jdbcType(jps)); - someSqlDate = new java.sql.Date(randomMillisSinceEpoch()); + someSqlDate = new java.sql.Date(randomLong()); Calendar nonDefaultCal = randomCalendar(); jps.setDate(1, someSqlDate, nonDefaultCal); assertEquals(someSqlDate.getTime(), convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal)); @@ -435,17 +437,17 @@ public class JdbcPreparedStatementTests extends ESTestCase { public void testThrownExceptionsWhenSettingSqlDateValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - java.sql.Date someSqlDate = new java.sql.Date(randomMillisSinceEpoch()); + java.sql.Date someSqlDate = new java.sql.Date(randomLong()); SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, - () -> jps.setObject(1, new java.sql.Date(randomMillisSinceEpoch()), Types.DOUBLE)); + () -> jps.setObject(1, new java.sql.Date(randomLong()), Types.DOUBLE)); assertEquals("Conversion from type " + someSqlDate.getClass().getName() + " to DOUBLE not supported", sqle.getMessage()); } public void testSettingCalendarValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); Calendar someCalendar = randomCalendar(); - someCalendar.setTimeInMillis(randomMillisSinceEpoch()); + someCalendar.setTimeInMillis(randomLong()); jps.setObject(1, someCalendar); assertEquals(someCalendar.getTime(), (Date) value(jps)); @@ -472,7 +474,7 @@ public class JdbcPreparedStatementTests extends ESTestCase { public void testSettingDateValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - Date someDate = new Date(randomMillisSinceEpoch()); + Date someDate = new Date(randomLong()); jps.setObject(1, someDate); assertEquals(someDate, (Date) value(jps)); @@ -486,7 +488,7 @@ public class JdbcPreparedStatementTests extends ESTestCase { public void testThrownExceptionsWhenSettingDateValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - Date someDate = new Date(randomMillisSinceEpoch()); + Date someDate = new Date(randomLong()); SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, someDate, Types.BIGINT)); assertEquals("Conversion from type " + someDate.getClass().getName() + " to BIGINT not supported", sqle.getMessage()); @@ -549,10 +551,6 @@ public class JdbcPreparedStatementTests extends ESTestCase { assertEquals("Conversion from type byte[] to DOUBLE not supported", sqle.getMessage()); } - private long randomMillisSinceEpoch() { - return randomLongBetween(0, System.currentTimeMillis()); - } - private JdbcPreparedStatement createJdbcPreparedStatement() throws SQLException { return new JdbcPreparedStatement(null, JdbcConfiguration.create("jdbc:es://l:1", null, 0), "?"); } diff --git a/x-pack/plugin/sql/sql-action/build.gradle b/x-pack/plugin/sql/sql-action/build.gradle index bf79fd824ef..9e53c36bbf6 100644 --- a/x-pack/plugin/sql/sql-action/build.gradle +++ b/x-pack/plugin/sql/sql-action/build.gradle @@ -2,9 +2,6 @@ /* * This project contains transport-level requests and responses that are shared between x-pack plugin and qa tests */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' description = 'Request and response objects shared by the cli, jdbc ' + @@ -34,7 +31,7 @@ dependencies { forbiddenApisMain { //sql does not depend on server, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } dependencyLicenses { @@ -141,14 +138,4 @@ thirdPartyAudit.excludes = [ 'org.zeromq.ZMQ$Context', 'org.zeromq.ZMQ$Socket', 'org.zeromq.ZMQ' -] - -if (JavaVersion.current() <= JavaVersion.VERSION_1_8) { - // Used by Log4J 2.11.1 - thirdPartyAudit.excludes += [ - 'java.io.ObjectInputFilter', - 'java.io.ObjectInputFilter$Config', - 'java.io.ObjectInputFilter$FilterInfo', - 'java.io.ObjectInputFilter$Status' - ] -} \ No newline at end of file +] \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index fdedaf3fc57..00000000000 --- a/x-pack/plugin/sql/sql-action/licenses/lucene-core-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -73dd7703a94ec2357581f65ee7c1c4d618ff310f \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 00000000000..50a21f5c504 --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +06c1e4fa838807059d27aaf5405cfdfe7303369c \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/build.gradle b/x-pack/plugin/sql/sql-cli/build.gradle index b90b07abad3..0b2559c6a84 100644 --- a/x-pack/plugin/sql/sql-cli/build.gradle +++ b/x-pack/plugin/sql/sql-cli/build.gradle @@ -1,3 +1,4 @@ +import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask /* * This project is named sql-cli because it is in the "org.elasticsearch.plugin" @@ -74,11 +75,8 @@ artifacts { } -forbiddenApisMain { - signaturesURLs += file('src/forbidden/cli-signatures.txt').toURI().toURL() -} -forbiddenApisTest { - signaturesURLs += file('src/forbidden/cli-signatures.txt').toURI().toURL() +tasks.withType(ForbiddenApisCliTask) { + signaturesFiles += files('src/forbidden/cli-signatures.txt') } thirdPartyAudit.excludes = [ diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java index 357a4bcb5a7..6431f10a492 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java @@ -27,6 +27,7 @@ import org.elasticsearch.xpack.sql.client.Version; import org.jline.terminal.TerminalBuilder; import java.io.IOException; import java.net.ConnectException; +import java.sql.SQLInvalidAuthorizationSpecException; import java.util.Arrays; import java.util.List; import java.util.logging.LogManager; @@ -139,6 +140,10 @@ public class Cli extends LoggingAwareCommand { // Most likely Elasticsearch is not running throw new UserException(ExitCodes.IO_ERROR, "Cannot connect to the server " + con.connectionString() + " - " + ex.getCause().getMessage()); + } else if (ex.getCause() != null && ex.getCause() instanceof SQLInvalidAuthorizationSpecException) { + throw new UserException(ExitCodes.NOPERM, + "Cannot establish a secure connection to the server " + + con.connectionString() + " - " + ex.getCause().getMessage()); } else { // Most likely we connected to something other than Elasticsearch throw new UserException(ExitCodes.DATA_ERROR, diff --git a/x-pack/plugin/sql/sql-client/build.gradle b/x-pack/plugin/sql/sql-client/build.gradle index fbc411e4459..c4ee030d456 100644 --- a/x-pack/plugin/sql/sql-client/build.gradle +++ b/x-pack/plugin/sql/sql-client/build.gradle @@ -26,7 +26,7 @@ dependencyLicenses { forbiddenApisMain { // does not depend on core, so only jdk and http signatures should be checked - signaturesURLs = [this.class.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } forbiddenApisTest { diff --git a/x-pack/plugin/sql/sql-proto/build.gradle b/x-pack/plugin/sql/sql-proto/build.gradle index 7f26176e3c7..7d28336bfc5 100644 --- a/x-pack/plugin/sql/sql-proto/build.gradle +++ b/x-pack/plugin/sql/sql-proto/build.gradle @@ -2,9 +2,6 @@ /* * This project contains XContent protocol classes shared between server and http client */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' description = 'Request and response objects shared by the cli, jdbc ' + @@ -25,7 +22,7 @@ dependencies { forbiddenApisMain { //sql does not depend on server, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } dependencyLicenses { diff --git a/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 b/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 index 490a5486b42..9af2bd6a011 100644 --- a/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 +++ b/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 @@ -50,18 +50,18 @@ statement )* ')')? statement #debug - | SHOW TABLES (LIKE? pattern)? #showTables - | SHOW COLUMNS (FROM | IN) tableIdentifier #showColumns - | (DESCRIBE | DESC) tableIdentifier #showColumns - | SHOW FUNCTIONS (LIKE? pattern)? #showFunctions + | SHOW TABLES (tableLike=likePattern | tableIdent=tableIdentifier)? #showTables + | SHOW COLUMNS (FROM | IN) (tableLike=likePattern | tableIdent=tableIdentifier) #showColumns + | (DESCRIBE | DESC) (tableLike=likePattern | tableIdent=tableIdentifier) #showColumns + | SHOW FUNCTIONS (likePattern)? #showFunctions | SHOW SCHEMAS #showSchemas | SYS CATALOGS #sysCatalogs - | SYS TABLES (CATALOG LIKE? clusterPattern=pattern)? - (LIKE? tablePattern=pattern)? + | SYS TABLES (CATALOG clusterLike=likePattern)? + (tableLike=likePattern | tableIdent=tableIdentifier)? (TYPE string (',' string)* )? #sysTables | SYS COLUMNS (CATALOG cluster=string)? - (TABLE LIKE? indexPattern=pattern)? - (LIKE? columnPattern=pattern)? #sysColumns + (TABLE tableLike=likePattern | tableIdent=tableIdentifier)? + (columnPattern=likePattern)? #sysColumns | SYS TYPES #sysTypes | SYS TABLE TYPES #sysTableTypes ; @@ -189,6 +189,10 @@ predicate | IS NOT? kind=NULL ; +likePattern + : LIKE pattern + ; + pattern : value=string patternEscape? ; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java index 10586c991b1..0382729aa9f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java @@ -16,9 +16,12 @@ import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndicesOptions.Option; +import org.elasticsearch.action.support.IndicesOptions.WildcardStates; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.xpack.sql.type.EsField; @@ -116,6 +119,10 @@ public class IndexResolver { } } + private static final IndicesOptions INDICES_ONLY_OPTIONS = new IndicesOptions( + EnumSet.of(Option.ALLOW_NO_INDICES, Option.IGNORE_UNAVAILABLE, Option.IGNORE_ALIASES), EnumSet.of(WildcardStates.OPEN)); + + private final Client client; private final String clusterName; @@ -139,14 +146,15 @@ public class IndexResolver { boolean retrieveAliases = CollectionUtils.isEmpty(types) || types.contains(IndexType.ALIAS); boolean retrieveIndices = CollectionUtils.isEmpty(types) || types.contains(IndexType.INDEX); + String[] indices = Strings.commaDelimitedListToStringArray(indexWildcard); if (retrieveAliases) { GetAliasesRequest aliasRequest = new GetAliasesRequest() .local(true) - .aliases(indexWildcard) + .aliases(indices) .indicesOptions(IndicesOptions.lenientExpandOpen()); client.admin().indices().getAliases(aliasRequest, ActionListener.wrap(aliases -> - resolveIndices(indexWildcard, javaRegex, aliases, retrieveIndices, listener), + resolveIndices(indices, javaRegex, aliases, retrieveIndices, listener), ex -> { // with security, two exception can be thrown: // INFE - if no alias matches @@ -154,34 +162,36 @@ public class IndexResolver { // in both cases, that is allowed and we continue with the indices request if (ex instanceof IndexNotFoundException || ex instanceof ElasticsearchSecurityException) { - resolveIndices(indexWildcard, javaRegex, null, retrieveIndices, listener); + resolveIndices(indices, javaRegex, null, retrieveIndices, listener); } else { listener.onFailure(ex); } })); } else { - resolveIndices(indexWildcard, javaRegex, null, retrieveIndices, listener); + resolveIndices(indices, javaRegex, null, retrieveIndices, listener); } } - private void resolveIndices(String indexWildcard, String javaRegex, GetAliasesResponse aliases, + private void resolveIndices(String[] indices, String javaRegex, GetAliasesResponse aliases, boolean retrieveIndices, ActionListener> listener) { if (retrieveIndices) { GetIndexRequest indexRequest = new GetIndexRequest() .local(true) - .indices(indexWildcard) - .indicesOptions(IndicesOptions.lenientExpandOpen()); + .indices(indices) + .features(Feature.SETTINGS) + .includeDefaults(false) + .indicesOptions(INDICES_ONLY_OPTIONS); client.admin().indices().getIndex(indexRequest, - ActionListener.wrap(indices -> filterResults(indexWildcard, javaRegex, aliases, indices, listener), + ActionListener.wrap(response -> filterResults(javaRegex, aliases, response, listener), listener::onFailure)); } else { - filterResults(indexWildcard, javaRegex, aliases, null, listener); + filterResults(javaRegex, aliases, null, listener); } } - private void filterResults(String indexWildcard, String javaRegex, GetAliasesResponse aliases, GetIndexResponse indices, + private void filterResults(String javaRegex, GetAliasesResponse aliases, GetIndexResponse indices, ActionListener> listener) { // since the index name does not support ?, filter the results manually @@ -300,8 +310,7 @@ public class IndexResolver { private static GetIndexRequest createGetIndexRequest(String index) { return new GetIndexRequest() .local(true) - .indices(index) - .features(Feature.MAPPINGS) + .indices(Strings.commaDelimitedListToStringArray(index)) //lenient because we throw our own errors looking at the response e.g. if something was not resolved //also because this way security doesn't throw authorization exceptions but rather honours ignore_unavailable .indicesOptions(IndicesOptions.lenientExpandOpen()); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java index 055e34758cc..d0bff77a648 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.client.Client; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CollectionUtils; @@ -92,7 +93,7 @@ public class Querier { log.trace("About to execute query {} on {}", StringUtils.toString(sourceBuilder), index); } - SearchRequest search = prepareRequest(client, sourceBuilder, timeout, index); + SearchRequest search = prepareRequest(client, sourceBuilder, timeout, Strings.commaDelimitedListToStringArray(index)); ActionListener l; if (query.isAggsOnly()) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java index 5851e991314..8ee34e32a55 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java @@ -82,7 +82,13 @@ public abstract class Expressions { } public static String name(Expression e) { - return e instanceof NamedExpression ? ((NamedExpression) e).name() : e.nodeName(); + if (e instanceof NamedExpression) { + return ((NamedExpression) e).name(); + } else if (e instanceof Literal) { + return e.toString(); + } else { + return e.nodeName(); + } } public static List names(Collection e) { @@ -120,4 +126,4 @@ public abstract class Expressions { return e.dataType().isNumeric()? TypeResolution.TYPE_RESOLVED : new TypeResolution( "Argument required to be numeric ('" + Expressions.name(e) + "' of type '" + e.dataType().esType + "')"); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunction.java index 8462ee293cc..309ee4e8e86 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunction.java @@ -10,6 +10,7 @@ import org.elasticsearch.xpack.sql.expression.Attribute; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.LiteralAttribute; import org.elasticsearch.xpack.sql.expression.function.Function; import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; @@ -68,6 +69,9 @@ public abstract class ScalarFunction extends Function { if (attr instanceof AggregateFunctionAttribute) { return asScriptFrom((AggregateFunctionAttribute) attr); } + if (attr instanceof LiteralAttribute) { + return asScriptFrom((LiteralAttribute) attr); + } // fall-back to return asScriptFrom((FieldAttribute) attr); } @@ -98,6 +102,12 @@ public abstract class ScalarFunction extends Function { aggregate.dataType()); } + protected ScriptTemplate asScriptFrom(LiteralAttribute literal) { + return new ScriptTemplate(formatScript("{}"), + paramsBuilder().variable(literal.literal()).build(), + literal.dataType()); + } + protected String formatScript(String scriptTemplate) { return formatTemplate(scriptTemplate); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/ArithmeticFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/ArithmeticFunction.java index 5715e19963c..e95fec86397 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/ArithmeticFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/ArithmeticFunction.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Literal; import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor.BinaryArithmeticOperation; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryNumericFunction; @@ -65,7 +66,7 @@ public abstract class ArithmeticFunction extends BinaryNumericFunction { public String name() { StringBuilder sb = new StringBuilder(); sb.append("("); - sb.append(left()); + sb.append(Expressions.name(left())); if (!(left() instanceof Literal)) { sb.insert(1, "("); sb.append(")"); @@ -74,7 +75,7 @@ public abstract class ArithmeticFunction extends BinaryNumericFunction { sb.append(operation); sb.append(" "); int pos = sb.length(); - sb.append(right()); + sb.append(Expressions.name(right())); if (!(right() instanceof Literal)) { sb.insert(pos, "("); sb.append(")"); @@ -87,8 +88,4 @@ public abstract class ArithmeticFunction extends BinaryNumericFunction { public String toString() { return name() + "#" + functionId(); } - - protected boolean useParanthesis() { - return !(left() instanceof Literal) || !(right() instanceof Literal); - } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java index 7ce65aa4cfe..f2512672c6a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java @@ -121,12 +121,14 @@ abstract class CommandBuilder extends LogicalPlanBuilder { @Override public Object visitShowFunctions(ShowFunctionsContext ctx) { - return new ShowFunctions(source(ctx), visitPattern(ctx.pattern())); + return new ShowFunctions(source(ctx), visitLikePattern(ctx.likePattern())); } @Override public Object visitShowTables(ShowTablesContext ctx) { - return new ShowTables(source(ctx), visitPattern(ctx.pattern())); + TableIdentifier ti = visitTableIdentifier(ctx.tableIdent); + String index = ti != null ? ti.qualifiedIndex() : null; + return new ShowTables(source(ctx), index, visitLikePattern(ctx.likePattern())); } @Override @@ -136,8 +138,9 @@ abstract class CommandBuilder extends LogicalPlanBuilder { @Override public Object visitShowColumns(ShowColumnsContext ctx) { - TableIdentifier identifier = visitTableIdentifier(ctx.tableIdentifier()); - return new ShowColumns(source(ctx), identifier.index()); + TableIdentifier ti = visitTableIdentifier(ctx.tableIdent); + String index = ti != null ? ti.qualifiedIndex() : null; + return new ShowColumns(source(ctx), index, visitLikePattern(ctx.likePattern())); } @Override @@ -154,9 +157,9 @@ abstract class CommandBuilder extends LogicalPlanBuilder { if (value != null) { // check special ODBC wildcard case if (value.equals(StringUtils.SQL_WILDCARD) && ctx.string().size() == 1) { - // since % is the same as not specifying a value, choose + // convert % to enumeration // https://docs.microsoft.com/en-us/sql/odbc/reference/develop-app/value-list-arguments?view=ssdt-18vs2017 - // that is skip the value + types.addAll(IndexType.VALID); } // special case for legacy apps (like msquery) that always asks for 'TABLE' // which we manually map to all concrete tables supported @@ -172,13 +175,17 @@ abstract class CommandBuilder extends LogicalPlanBuilder { // if the ODBC enumeration is specified, skip validation EnumSet set = types.isEmpty() ? null : EnumSet.copyOf(types); - return new SysTables(source(ctx), visitPattern(ctx.clusterPattern), visitPattern(ctx.tablePattern), set, legacyTableType); + TableIdentifier ti = visitTableIdentifier(ctx.tableIdent); + String index = ti != null ? ti.qualifiedIndex() : null; + return new SysTables(source(ctx), visitLikePattern(ctx.clusterLike), index, visitLikePattern(ctx.tableLike), set, legacyTableType); } @Override public Object visitSysColumns(SysColumnsContext ctx) { - Location loc = source(ctx); - return new SysColumns(loc, string(ctx.cluster), visitPattern(ctx.indexPattern), visitPattern(ctx.columnPattern)); + TableIdentifier ti = visitTableIdentifier(ctx.tableIdent); + String index = ti != null ? ti.qualifiedIndex() : null; + return new SysColumns(source(ctx), string(ctx.cluster), index, visitLikePattern(ctx.tableLike), + visitLikePattern(ctx.columnPattern)); } @Override @@ -190,4 +197,4 @@ abstract class CommandBuilder extends LogicalPlanBuilder { public Object visitSysTableTypes(SysTableTypesContext ctx) { return new SysTableTypes(source(ctx)); } -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java index e202803b261..0c7ecbc7ddf 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java @@ -63,6 +63,7 @@ import org.elasticsearch.xpack.sql.parser.SqlBaseParser.FunctionExpressionContex import org.elasticsearch.xpack.sql.parser.SqlBaseParser.FunctionTemplateContext; import org.elasticsearch.xpack.sql.parser.SqlBaseParser.GuidEscapedLiteralContext; import org.elasticsearch.xpack.sql.parser.SqlBaseParser.IntegerLiteralContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.LikePatternContext; import org.elasticsearch.xpack.sql.parser.SqlBaseParser.LogicalBinaryContext; import org.elasticsearch.xpack.sql.parser.SqlBaseParser.LogicalNotContext; import org.elasticsearch.xpack.sql.parser.SqlBaseParser.MatchQueryContext; @@ -220,6 +221,11 @@ abstract class ExpressionBuilder extends IdentifierBuilder { return pCtx.NOT() != null ? new Not(loc, e) : e; } + @Override + public LikePattern visitLikePattern(LikePatternContext ctx) { + return ctx == null ? null : visitPattern(ctx.pattern()); + } + @Override public LikePattern visitPattern(PatternContext ctx) { if (ctx == null) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java index 8c79ae1ef05..2c9e8e314ef 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java @@ -17,27 +17,17 @@ abstract class IdentifierBuilder extends AbstractBuilder { @Override public TableIdentifier visitTableIdentifier(TableIdentifierContext ctx) { + if (ctx == null) { + return null; + } + Location source = source(ctx); ParseTree tree = ctx.name != null ? ctx.name : ctx.TABLE_IDENTIFIER(); String index = tree.getText(); - validateIndex(index, source); return new TableIdentifier(source, visitIdentifier(ctx.catalog), index); } - // see https://github.com/elastic/elasticsearch/issues/6736 - static void validateIndex(String index, Location source) { - for (int i = 0; i < index.length(); i++) { - char c = index.charAt(i); - if (Character.isUpperCase(c)) { - throw new ParsingException(source, "Invalid index name (needs to be lowercase) {}", index); - } - if (c == '\\' || c == '/' || c == '<' || c == '>' || c == '|' || c == ',' || c == ' ') { - throw new ParsingException(source, "Invalid index name (illegal character {}) {}", c, index); - } - } - } - @Override public String visitIdentifier(IdentifierContext ctx) { return ctx == null ? null : ctx.getText(); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java index 72c417992e3..8f261c0d3d0 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java @@ -551,6 +551,18 @@ class SqlBaseBaseListener implements SqlBaseListener { *

    The default implementation does nothing.

    */ @Override public void exitPredicate(SqlBaseParser.PredicateContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void enterLikePattern(SqlBaseParser.LikePatternContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void exitLikePattern(SqlBaseParser.LikePatternContext ctx) { } /** * {@inheritDoc} * diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java index fd35ec421f6..837e5057c36 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java @@ -326,6 +326,13 @@ class SqlBaseBaseVisitor extends AbstractParseTreeVisitor implements SqlBa * {@link #visitChildren} on {@code ctx}.

    */ @Override public T visitPredicate(SqlBaseParser.PredicateContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

    The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

    + */ + @Override public T visitLikePattern(SqlBaseParser.LikePatternContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java index 18b2a21c02e..82c2ac90e77 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java @@ -509,6 +509,16 @@ interface SqlBaseListener extends ParseTreeListener { * @param ctx the parse tree */ void exitPredicate(SqlBaseParser.PredicateContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#likePattern}. + * @param ctx the parse tree + */ + void enterLikePattern(SqlBaseParser.LikePatternContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#likePattern}. + * @param ctx the parse tree + */ + void exitLikePattern(SqlBaseParser.LikePatternContext ctx); /** * Enter a parse tree produced by {@link SqlBaseParser#pattern}. * @param ctx the parse tree diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java index c91758dadbc..ebf5b0cb09d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java @@ -41,25 +41,26 @@ class SqlBaseParser extends Parser { RULE_setQuantifier = 14, RULE_selectItem = 15, RULE_relation = 16, RULE_joinRelation = 17, RULE_joinType = 18, RULE_joinCriteria = 19, RULE_relationPrimary = 20, RULE_expression = 21, RULE_booleanExpression = 22, RULE_predicated = 23, - RULE_predicate = 24, RULE_pattern = 25, RULE_patternEscape = 26, RULE_valueExpression = 27, - RULE_primaryExpression = 28, RULE_castExpression = 29, RULE_castTemplate = 30, - RULE_extractExpression = 31, RULE_extractTemplate = 32, RULE_functionExpression = 33, - RULE_functionTemplate = 34, RULE_functionName = 35, RULE_constant = 36, - RULE_comparisonOperator = 37, RULE_booleanValue = 38, RULE_dataType = 39, - RULE_qualifiedName = 40, RULE_identifier = 41, RULE_tableIdentifier = 42, - RULE_quoteIdentifier = 43, RULE_unquoteIdentifier = 44, RULE_number = 45, - RULE_string = 46, RULE_nonReserved = 47; + RULE_predicate = 24, RULE_likePattern = 25, RULE_pattern = 26, RULE_patternEscape = 27, + RULE_valueExpression = 28, RULE_primaryExpression = 29, RULE_castExpression = 30, + RULE_castTemplate = 31, RULE_extractExpression = 32, RULE_extractTemplate = 33, + RULE_functionExpression = 34, RULE_functionTemplate = 35, RULE_functionName = 36, + RULE_constant = 37, RULE_comparisonOperator = 38, RULE_booleanValue = 39, + RULE_dataType = 40, RULE_qualifiedName = 41, RULE_identifier = 42, RULE_tableIdentifier = 43, + RULE_quoteIdentifier = 44, RULE_unquoteIdentifier = 45, RULE_number = 46, + RULE_string = 47, RULE_nonReserved = 48; public static final String[] ruleNames = { "singleStatement", "singleExpression", "statement", "query", "queryNoWith", "limitClause", "queryTerm", "orderBy", "querySpecification", "fromClause", "groupBy", "groupingElement", "groupingExpressions", "namedQuery", "setQuantifier", "selectItem", "relation", "joinRelation", "joinType", "joinCriteria", "relationPrimary", "expression", "booleanExpression", "predicated", "predicate", - "pattern", "patternEscape", "valueExpression", "primaryExpression", "castExpression", - "castTemplate", "extractExpression", "extractTemplate", "functionExpression", - "functionTemplate", "functionName", "constant", "comparisonOperator", - "booleanValue", "dataType", "qualifiedName", "identifier", "tableIdentifier", - "quoteIdentifier", "unquoteIdentifier", "number", "string", "nonReserved" + "likePattern", "pattern", "patternEscape", "valueExpression", "primaryExpression", + "castExpression", "castTemplate", "extractExpression", "extractTemplate", + "functionExpression", "functionTemplate", "functionName", "constant", + "comparisonOperator", "booleanValue", "dataType", "qualifiedName", "identifier", + "tableIdentifier", "quoteIdentifier", "unquoteIdentifier", "number", "string", + "nonReserved" }; private static final String[] _LITERAL_NAMES = { @@ -173,9 +174,9 @@ class SqlBaseParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(96); + setState(98); statement(); - setState(97); + setState(99); match(EOF); } } @@ -220,9 +221,9 @@ class SqlBaseParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(99); + setState(101); expression(); - setState(100); + setState(102); match(EOF); } } @@ -341,8 +342,9 @@ class SqlBaseParser extends Parser { } public static class SysColumnsContext extends StatementContext { public StringContext cluster; - public PatternContext indexPattern; - public PatternContext columnPattern; + public LikePatternContext tableLike; + public TableIdentifierContext tableIdent; + public LikePatternContext columnPattern; public TerminalNode SYS() { return getToken(SqlBaseParser.SYS, 0); } public TerminalNode COLUMNS() { return getToken(SqlBaseParser.COLUMNS, 0); } public TerminalNode CATALOG() { return getToken(SqlBaseParser.CATALOG, 0); } @@ -350,15 +352,14 @@ class SqlBaseParser extends Parser { public StringContext string() { return getRuleContext(StringContext.class,0); } - public List pattern() { - return getRuleContexts(PatternContext.class); + public List likePattern() { + return getRuleContexts(LikePatternContext.class); } - public PatternContext pattern(int i) { - return getRuleContext(PatternContext.class,i); + public LikePatternContext likePattern(int i) { + return getRuleContext(LikePatternContext.class,i); } - public List LIKE() { return getTokens(SqlBaseParser.LIKE); } - public TerminalNode LIKE(int i) { - return getToken(SqlBaseParser.LIKE, i); + public TableIdentifierContext tableIdentifier() { + return getRuleContext(TableIdentifierContext.class,0); } public SysColumnsContext(StatementContext ctx) { copyFrom(ctx); } @Override @@ -478,8 +479,9 @@ class SqlBaseParser extends Parser { } } public static class SysTablesContext extends StatementContext { - public PatternContext clusterPattern; - public PatternContext tablePattern; + public LikePatternContext clusterLike; + public LikePatternContext tableLike; + public TableIdentifierContext tableIdent; public TerminalNode SYS() { return getToken(SqlBaseParser.SYS, 0); } public TerminalNode TABLES() { return getToken(SqlBaseParser.TABLES, 0); } public TerminalNode CATALOG() { return getToken(SqlBaseParser.CATALOG, 0); } @@ -490,15 +492,14 @@ class SqlBaseParser extends Parser { public StringContext string(int i) { return getRuleContext(StringContext.class,i); } - public List pattern() { - return getRuleContexts(PatternContext.class); + public List likePattern() { + return getRuleContexts(LikePatternContext.class); } - public PatternContext pattern(int i) { - return getRuleContext(PatternContext.class,i); + public LikePatternContext likePattern(int i) { + return getRuleContext(LikePatternContext.class,i); } - public List LIKE() { return getTokens(SqlBaseParser.LIKE); } - public TerminalNode LIKE(int i) { - return getToken(SqlBaseParser.LIKE, i); + public TableIdentifierContext tableIdentifier() { + return getRuleContext(TableIdentifierContext.class,0); } public SysTablesContext(StatementContext ctx) { copyFrom(ctx); } @Override @@ -518,10 +519,9 @@ class SqlBaseParser extends Parser { public static class ShowFunctionsContext extends StatementContext { public TerminalNode SHOW() { return getToken(SqlBaseParser.SHOW, 0); } public TerminalNode FUNCTIONS() { return getToken(SqlBaseParser.FUNCTIONS, 0); } - public PatternContext pattern() { - return getRuleContext(PatternContext.class,0); + public LikePatternContext likePattern() { + return getRuleContext(LikePatternContext.class,0); } - public TerminalNode LIKE() { return getToken(SqlBaseParser.LIKE, 0); } public ShowFunctionsContext(StatementContext ctx) { copyFrom(ctx); } @Override public void enterRule(ParseTreeListener listener) { @@ -538,12 +538,16 @@ class SqlBaseParser extends Parser { } } public static class ShowTablesContext extends StatementContext { + public LikePatternContext tableLike; + public TableIdentifierContext tableIdent; public TerminalNode SHOW() { return getToken(SqlBaseParser.SHOW, 0); } public TerminalNode TABLES() { return getToken(SqlBaseParser.TABLES, 0); } - public PatternContext pattern() { - return getRuleContext(PatternContext.class,0); + public LikePatternContext likePattern() { + return getRuleContext(LikePatternContext.class,0); + } + public TableIdentifierContext tableIdentifier() { + return getRuleContext(TableIdentifierContext.class,0); } - public TerminalNode LIKE() { return getToken(SqlBaseParser.LIKE, 0); } public ShowTablesContext(StatementContext ctx) { copyFrom(ctx); } @Override public void enterRule(ParseTreeListener listener) { @@ -578,13 +582,18 @@ class SqlBaseParser extends Parser { } } public static class ShowColumnsContext extends StatementContext { + public LikePatternContext tableLike; + public TableIdentifierContext tableIdent; public TerminalNode SHOW() { return getToken(SqlBaseParser.SHOW, 0); } public TerminalNode COLUMNS() { return getToken(SqlBaseParser.COLUMNS, 0); } + public TerminalNode FROM() { return getToken(SqlBaseParser.FROM, 0); } + public TerminalNode IN() { return getToken(SqlBaseParser.IN, 0); } + public LikePatternContext likePattern() { + return getRuleContext(LikePatternContext.class,0); + } public TableIdentifierContext tableIdentifier() { return getRuleContext(TableIdentifierContext.class,0); } - public TerminalNode FROM() { return getToken(SqlBaseParser.FROM, 0); } - public TerminalNode IN() { return getToken(SqlBaseParser.IN, 0); } public TerminalNode DESCRIBE() { return getToken(SqlBaseParser.DESCRIBE, 0); } public TerminalNode DESC() { return getToken(SqlBaseParser.DESC, 0); } public ShowColumnsContext(StatementContext ctx) { copyFrom(ctx); } @@ -608,14 +617,14 @@ class SqlBaseParser extends Parser { enterRule(_localctx, 4, RULE_statement); int _la; try { - setState(211); + setState(204); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,21,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,17,_ctx) ) { case 1: _localctx = new StatementDefaultContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(102); + setState(104); query(); } break; @@ -623,27 +632,27 @@ class SqlBaseParser extends Parser { _localctx = new ExplainContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(103); + setState(105); match(EXPLAIN); - setState(117); + setState(119); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,2,_ctx) ) { case 1: { - setState(104); + setState(106); match(T__0); - setState(113); + setState(115); _errHandler.sync(this); _la = _input.LA(1); while (((((_la - 28)) & ~0x3f) == 0 && ((1L << (_la - 28)) & ((1L << (FORMAT - 28)) | (1L << (PLAN - 28)) | (1L << (VERIFY - 28)))) != 0)) { { - setState(111); + setState(113); switch (_input.LA(1)) { case PLAN: { - setState(105); + setState(107); match(PLAN); - setState(106); + setState(108); ((ExplainContext)_localctx).type = _input.LT(1); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ALL) | (1L << ANALYZED) | (1L << EXECUTABLE) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED))) != 0)) ) { @@ -655,9 +664,9 @@ class SqlBaseParser extends Parser { break; case FORMAT: { - setState(107); + setState(109); match(FORMAT); - setState(108); + setState(110); ((ExplainContext)_localctx).format = _input.LT(1); _la = _input.LA(1); if ( !(_la==GRAPHVIZ || _la==TEXT) ) { @@ -669,9 +678,9 @@ class SqlBaseParser extends Parser { break; case VERIFY: { - setState(109); + setState(111); match(VERIFY); - setState(110); + setState(112); ((ExplainContext)_localctx).verify = booleanValue(); } break; @@ -679,16 +688,16 @@ class SqlBaseParser extends Parser { throw new NoViableAltException(this); } } - setState(115); + setState(117); _errHandler.sync(this); _la = _input.LA(1); } - setState(116); + setState(118); match(T__1); } break; } - setState(119); + setState(121); statement(); } break; @@ -696,27 +705,27 @@ class SqlBaseParser extends Parser { _localctx = new DebugContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(120); + setState(122); match(DEBUG); - setState(132); + setState(134); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,5,_ctx) ) { case 1: { - setState(121); + setState(123); match(T__0); - setState(128); + setState(130); _errHandler.sync(this); _la = _input.LA(1); while (_la==FORMAT || _la==PLAN) { { - setState(126); + setState(128); switch (_input.LA(1)) { case PLAN: { - setState(122); + setState(124); match(PLAN); - setState(123); + setState(125); ((DebugContext)_localctx).type = _input.LT(1); _la = _input.LA(1); if ( !(_la==ANALYZED || _la==OPTIMIZED) ) { @@ -728,9 +737,9 @@ class SqlBaseParser extends Parser { break; case FORMAT: { - setState(124); + setState(126); match(FORMAT); - setState(125); + setState(127); ((DebugContext)_localctx).format = _input.LT(1); _la = _input.LA(1); if ( !(_la==GRAPHVIZ || _la==TEXT) ) { @@ -744,16 +753,16 @@ class SqlBaseParser extends Parser { throw new NoViableAltException(this); } } - setState(130); + setState(132); _errHandler.sync(this); _la = _input.LA(1); } - setState(131); + setState(133); match(T__1); } break; } - setState(134); + setState(136); statement(); } break; @@ -761,28 +770,58 @@ class SqlBaseParser extends Parser { _localctx = new ShowTablesContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(135); + setState(137); match(SHOW); - setState(136); + setState(138); match(TABLES); setState(141); - _la = _input.LA(1); - if (((((_la - 40)) & ~0x3f) == 0 && ((1L << (_la - 40)) & ((1L << (LIKE - 40)) | (1L << (PARAM - 40)) | (1L << (STRING - 40)))) != 0)) { + switch (_input.LA(1)) { + case LIKE: { - setState(138); - _la = _input.LA(1); - if (_la==LIKE) { - { - setState(137); - match(LIKE); - } + setState(139); + ((ShowTablesContext)_localctx).tableLike = likePattern(); } - + break; + case ANALYZE: + case ANALYZED: + case CATALOGS: + case COLUMNS: + case DEBUG: + case EXECUTABLE: + case EXPLAIN: + case FORMAT: + case FUNCTIONS: + case GRAPHVIZ: + case MAPPED: + case OPTIMIZED: + case PARSED: + case PHYSICAL: + case PLAN: + case RLIKE: + case QUERY: + case SCHEMAS: + case SHOW: + case SYS: + case TABLES: + case TEXT: + case TYPE: + case TYPES: + case VERIFY: + case IDENTIFIER: + case DIGIT_IDENTIFIER: + case TABLE_IDENTIFIER: + case QUOTED_IDENTIFIER: + case BACKQUOTED_IDENTIFIER: + { setState(140); - pattern(); + ((ShowTablesContext)_localctx).tableIdent = tableIdentifier(); } + break; + case EOF: + break; + default: + throw new NoViableAltException(this); } - } break; case 5: @@ -800,48 +839,127 @@ class SqlBaseParser extends Parser { } else { consume(); } - setState(146); - tableIdentifier(); + setState(148); + switch (_input.LA(1)) { + case LIKE: + { + setState(146); + ((ShowColumnsContext)_localctx).tableLike = likePattern(); + } + break; + case ANALYZE: + case ANALYZED: + case CATALOGS: + case COLUMNS: + case DEBUG: + case EXECUTABLE: + case EXPLAIN: + case FORMAT: + case FUNCTIONS: + case GRAPHVIZ: + case MAPPED: + case OPTIMIZED: + case PARSED: + case PHYSICAL: + case PLAN: + case RLIKE: + case QUERY: + case SCHEMAS: + case SHOW: + case SYS: + case TABLES: + case TEXT: + case TYPE: + case TYPES: + case VERIFY: + case IDENTIFIER: + case DIGIT_IDENTIFIER: + case TABLE_IDENTIFIER: + case QUOTED_IDENTIFIER: + case BACKQUOTED_IDENTIFIER: + { + setState(147); + ((ShowColumnsContext)_localctx).tableIdent = tableIdentifier(); + } + break; + default: + throw new NoViableAltException(this); + } } break; case 6: _localctx = new ShowColumnsContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(147); + setState(150); _la = _input.LA(1); if ( !(_la==DESC || _la==DESCRIBE) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(148); - tableIdentifier(); + setState(153); + switch (_input.LA(1)) { + case LIKE: + { + setState(151); + ((ShowColumnsContext)_localctx).tableLike = likePattern(); + } + break; + case ANALYZE: + case ANALYZED: + case CATALOGS: + case COLUMNS: + case DEBUG: + case EXECUTABLE: + case EXPLAIN: + case FORMAT: + case FUNCTIONS: + case GRAPHVIZ: + case MAPPED: + case OPTIMIZED: + case PARSED: + case PHYSICAL: + case PLAN: + case RLIKE: + case QUERY: + case SCHEMAS: + case SHOW: + case SYS: + case TABLES: + case TEXT: + case TYPE: + case TYPES: + case VERIFY: + case IDENTIFIER: + case DIGIT_IDENTIFIER: + case TABLE_IDENTIFIER: + case QUOTED_IDENTIFIER: + case BACKQUOTED_IDENTIFIER: + { + setState(152); + ((ShowColumnsContext)_localctx).tableIdent = tableIdentifier(); + } + break; + default: + throw new NoViableAltException(this); + } } break; case 7: _localctx = new ShowFunctionsContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(149); - match(SHOW); - setState(150); - match(FUNCTIONS); setState(155); + match(SHOW); + setState(156); + match(FUNCTIONS); + setState(158); _la = _input.LA(1); - if (((((_la - 40)) & ~0x3f) == 0 && ((1L << (_la - 40)) & ((1L << (LIKE - 40)) | (1L << (PARAM - 40)) | (1L << (STRING - 40)))) != 0)) { + if (_la==LIKE) { { - setState(152); - _la = _input.LA(1); - if (_la==LIKE) { - { - setState(151); - match(LIKE); - } - } - - setState(154); - pattern(); + setState(157); + likePattern(); } } @@ -851,9 +969,9 @@ class SqlBaseParser extends Parser { _localctx = new ShowSchemasContext(_localctx); enterOuterAlt(_localctx, 8); { - setState(157); + setState(160); match(SHOW); - setState(158); + setState(161); match(SCHEMAS); } break; @@ -861,9 +979,9 @@ class SqlBaseParser extends Parser { _localctx = new SysCatalogsContext(_localctx); enterOuterAlt(_localctx, 9); { - setState(159); + setState(162); match(SYS); - setState(160); + setState(163); match(CATALOGS); } break; @@ -871,69 +989,58 @@ class SqlBaseParser extends Parser { _localctx = new SysTablesContext(_localctx); enterOuterAlt(_localctx, 10); { - setState(161); + setState(164); match(SYS); - setState(162); + setState(165); match(TABLES); setState(168); _la = _input.LA(1); if (_la==CATALOG) { { - setState(163); + setState(166); match(CATALOG); - setState(165); - _la = _input.LA(1); - if (_la==LIKE) { - { - setState(164); - match(LIKE); - } - } - setState(167); - ((SysTablesContext)_localctx).clusterPattern = pattern(); + ((SysTablesContext)_localctx).clusterLike = likePattern(); } } - setState(174); - _la = _input.LA(1); - if (((((_la - 40)) & ~0x3f) == 0 && ((1L << (_la - 40)) & ((1L << (LIKE - 40)) | (1L << (PARAM - 40)) | (1L << (STRING - 40)))) != 0)) { + setState(172); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,11,_ctx) ) { + case 1: + { + setState(170); + ((SysTablesContext)_localctx).tableLike = likePattern(); + } + break; + case 2: { setState(171); - _la = _input.LA(1); - if (_la==LIKE) { - { - setState(170); - match(LIKE); - } - } - - setState(173); - ((SysTablesContext)_localctx).tablePattern = pattern(); + ((SysTablesContext)_localctx).tableIdent = tableIdentifier(); } + break; } - - setState(185); + setState(183); _la = _input.LA(1); if (_la==TYPE) { { - setState(176); + setState(174); match(TYPE); - setState(177); + setState(175); string(); - setState(182); + setState(180); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(178); + setState(176); match(T__2); - setState(179); + setState(177); string(); } } - setState(184); + setState(182); _errHandler.sync(this); _la = _input.LA(1); } @@ -946,56 +1053,78 @@ class SqlBaseParser extends Parser { _localctx = new SysColumnsContext(_localctx); enterOuterAlt(_localctx, 11); { - setState(187); + setState(185); match(SYS); - setState(188); + setState(186); match(COLUMNS); - setState(191); + setState(189); _la = _input.LA(1); if (_la==CATALOG) { { - setState(189); + setState(187); match(CATALOG); - setState(190); + setState(188); ((SysColumnsContext)_localctx).cluster = string(); } } - setState(198); - _la = _input.LA(1); - if (_la==TABLE) { + setState(194); + switch (_input.LA(1)) { + case TABLE: + { + setState(191); + match(TABLE); + setState(192); + ((SysColumnsContext)_localctx).tableLike = likePattern(); + } + break; + case ANALYZE: + case ANALYZED: + case CATALOGS: + case COLUMNS: + case DEBUG: + case EXECUTABLE: + case EXPLAIN: + case FORMAT: + case FUNCTIONS: + case GRAPHVIZ: + case MAPPED: + case OPTIMIZED: + case PARSED: + case PHYSICAL: + case PLAN: + case RLIKE: + case QUERY: + case SCHEMAS: + case SHOW: + case SYS: + case TABLES: + case TEXT: + case TYPE: + case TYPES: + case VERIFY: + case IDENTIFIER: + case DIGIT_IDENTIFIER: + case TABLE_IDENTIFIER: + case QUOTED_IDENTIFIER: + case BACKQUOTED_IDENTIFIER: { setState(193); - match(TABLE); - setState(195); - _la = _input.LA(1); - if (_la==LIKE) { - { - setState(194); - match(LIKE); - } - } - - setState(197); - ((SysColumnsContext)_localctx).indexPattern = pattern(); + ((SysColumnsContext)_localctx).tableIdent = tableIdentifier(); } + break; + case EOF: + case LIKE: + break; + default: + throw new NoViableAltException(this); } - - setState(204); + setState(197); _la = _input.LA(1); - if (((((_la - 40)) & ~0x3f) == 0 && ((1L << (_la - 40)) & ((1L << (LIKE - 40)) | (1L << (PARAM - 40)) | (1L << (STRING - 40)))) != 0)) { + if (_la==LIKE) { { - setState(201); - _la = _input.LA(1); - if (_la==LIKE) { - { - setState(200); - match(LIKE); - } - } - - setState(203); - ((SysColumnsContext)_localctx).columnPattern = pattern(); + setState(196); + ((SysColumnsContext)_localctx).columnPattern = likePattern(); } } @@ -1005,9 +1134,9 @@ class SqlBaseParser extends Parser { _localctx = new SysTypesContext(_localctx); enterOuterAlt(_localctx, 12); { - setState(206); + setState(199); match(SYS); - setState(207); + setState(200); match(TYPES); } break; @@ -1015,11 +1144,11 @@ class SqlBaseParser extends Parser { _localctx = new SysTableTypesContext(_localctx); enterOuterAlt(_localctx, 13); { - setState(208); + setState(201); match(SYS); - setState(209); + setState(202); match(TABLE); - setState(210); + setState(203); match(TYPES); } break; @@ -1073,34 +1202,34 @@ class SqlBaseParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(222); + setState(215); _la = _input.LA(1); if (_la==WITH) { { - setState(213); + setState(206); match(WITH); - setState(214); + setState(207); namedQuery(); - setState(219); + setState(212); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(215); + setState(208); match(T__2); - setState(216); + setState(209); namedQuery(); } } - setState(221); + setState(214); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(224); + setState(217); queryNoWith(); } } @@ -1156,42 +1285,42 @@ class SqlBaseParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(226); + setState(219); queryTerm(); - setState(237); + setState(230); _la = _input.LA(1); if (_la==ORDER) { { - setState(227); + setState(220); match(ORDER); - setState(228); + setState(221); match(BY); - setState(229); + setState(222); orderBy(); - setState(234); + setState(227); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(230); + setState(223); match(T__2); - setState(231); + setState(224); orderBy(); } } - setState(236); + setState(229); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(240); + setState(233); _la = _input.LA(1); if (_la==LIMIT || _la==LIMIT_ESC) { { - setState(239); + setState(232); limitClause(); } } @@ -1240,14 +1369,14 @@ class SqlBaseParser extends Parser { enterRule(_localctx, 10, RULE_limitClause); int _la; try { - setState(247); + setState(240); switch (_input.LA(1)) { case LIMIT: enterOuterAlt(_localctx, 1); { - setState(242); + setState(235); match(LIMIT); - setState(243); + setState(236); ((LimitClauseContext)_localctx).limit = _input.LT(1); _la = _input.LA(1); if ( !(_la==ALL || _la==INTEGER_VALUE) ) { @@ -1260,9 +1389,9 @@ class SqlBaseParser extends Parser { case LIMIT_ESC: enterOuterAlt(_localctx, 2); { - setState(244); + setState(237); match(LIMIT_ESC); - setState(245); + setState(238); ((LimitClauseContext)_localctx).limit = _input.LT(1); _la = _input.LA(1); if ( !(_la==ALL || _la==INTEGER_VALUE) ) { @@ -1270,7 +1399,7 @@ class SqlBaseParser extends Parser { } else { consume(); } - setState(246); + setState(239); match(ESC_END); } break; @@ -1343,13 +1472,13 @@ class SqlBaseParser extends Parser { QueryTermContext _localctx = new QueryTermContext(_ctx, getState()); enterRule(_localctx, 12, RULE_queryTerm); try { - setState(254); + setState(247); switch (_input.LA(1)) { case SELECT: _localctx = new QueryPrimaryDefaultContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(249); + setState(242); querySpecification(); } break; @@ -1357,11 +1486,11 @@ class SqlBaseParser extends Parser { _localctx = new SubqueryContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(250); + setState(243); match(T__0); - setState(251); + setState(244); queryNoWith(); - setState(252); + setState(245); match(T__1); } break; @@ -1413,13 +1542,13 @@ class SqlBaseParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(256); + setState(249); expression(); - setState(258); + setState(251); _la = _input.LA(1); if (_la==ASC || _la==DESC) { { - setState(257); + setState(250); ((OrderByContext)_localctx).ordering = _input.LT(1); _la = _input.LA(1); if ( !(_la==ASC || _la==DESC) ) { @@ -1498,75 +1627,75 @@ class SqlBaseParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(260); + setState(253); match(SELECT); - setState(262); + setState(255); _la = _input.LA(1); if (_la==ALL || _la==DISTINCT) { { - setState(261); + setState(254); setQuantifier(); } } - setState(264); + setState(257); selectItem(); - setState(269); + setState(262); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(265); + setState(258); match(T__2); - setState(266); + setState(259); selectItem(); } } - setState(271); + setState(264); _errHandler.sync(this); _la = _input.LA(1); } - setState(273); + setState(266); _la = _input.LA(1); if (_la==FROM) { { - setState(272); + setState(265); fromClause(); } } - setState(277); + setState(270); _la = _input.LA(1); if (_la==WHERE) { { - setState(275); + setState(268); match(WHERE); - setState(276); + setState(269); ((QuerySpecificationContext)_localctx).where = booleanExpression(0); } } - setState(282); + setState(275); _la = _input.LA(1); if (_la==GROUP) { { - setState(279); + setState(272); match(GROUP); - setState(280); + setState(273); match(BY); - setState(281); + setState(274); groupBy(); } } - setState(286); + setState(279); _la = _input.LA(1); if (_la==HAVING) { { - setState(284); + setState(277); match(HAVING); - setState(285); + setState(278); ((QuerySpecificationContext)_localctx).having = booleanExpression(0); } } @@ -1618,23 +1747,23 @@ class SqlBaseParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(288); + setState(281); match(FROM); - setState(289); + setState(282); relation(); - setState(294); + setState(287); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(290); + setState(283); match(T__2); - setState(291); + setState(284); relation(); } } - setState(296); + setState(289); _errHandler.sync(this); _la = _input.LA(1); } @@ -1687,30 +1816,30 @@ class SqlBaseParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(298); + setState(291); _la = _input.LA(1); if (_la==ALL || _la==DISTINCT) { { - setState(297); + setState(290); setQuantifier(); } } - setState(300); + setState(293); groupingElement(); - setState(305); + setState(298); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(301); + setState(294); match(T__2); - setState(302); + setState(295); groupingElement(); } } - setState(307); + setState(300); _errHandler.sync(this); _la = _input.LA(1); } @@ -1765,7 +1894,7 @@ class SqlBaseParser extends Parser { _localctx = new SingleGroupingSetContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(308); + setState(301); groupingExpressions(); } } @@ -1811,47 +1940,47 @@ class SqlBaseParser extends Parser { enterRule(_localctx, 24, RULE_groupingExpressions); int _la; try { - setState(323); + setState(316); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,41,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,37,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(310); + setState(303); match(T__0); - setState(319); + setState(312); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << LEFT) | (1L << MAPPED) | (1L << MATCH) | (1L << NOT) | (1L << NULL) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RIGHT) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TRUE - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (FUNCTION_ESC - 64)) | (1L << (DATE_ESC - 64)) | (1L << (TIME_ESC - 64)) | (1L << (TIMESTAMP_ESC - 64)) | (1L << (GUID_ESC - 64)) | (1L << (PLUS - 64)) | (1L << (MINUS - 64)) | (1L << (ASTERISK - 64)) | (1L << (PARAM - 64)) | (1L << (STRING - 64)) | (1L << (INTEGER_VALUE - 64)) | (1L << (DECIMAL_VALUE - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { { - setState(311); + setState(304); expression(); - setState(316); + setState(309); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(312); + setState(305); match(T__2); - setState(313); + setState(306); expression(); } } - setState(318); + setState(311); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(321); + setState(314); match(T__1); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(322); + setState(315); expression(); } break; @@ -1902,15 +2031,15 @@ class SqlBaseParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(325); + setState(318); ((NamedQueryContext)_localctx).name = identifier(); - setState(326); + setState(319); match(AS); - setState(327); + setState(320); match(T__0); - setState(328); + setState(321); queryNoWith(); - setState(329); + setState(322); match(T__1); } } @@ -1954,7 +2083,7 @@ class SqlBaseParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(331); + setState(324); _la = _input.LA(1); if ( !(_la==ALL || _la==DISTINCT) ) { _errHandler.recoverInline(this); @@ -2017,22 +2146,22 @@ class SqlBaseParser extends Parser { _localctx = new SelectExpressionContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(333); + setState(326); expression(); - setState(338); + setState(331); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << AS) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { { - setState(335); + setState(328); _la = _input.LA(1); if (_la==AS) { { - setState(334); + setState(327); match(AS); } } - setState(337); + setState(330); identifier(); } } @@ -2086,19 +2215,19 @@ class SqlBaseParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(340); + setState(333); relationPrimary(); - setState(344); + setState(337); _errHandler.sync(this); _la = _input.LA(1); while ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << FULL) | (1L << INNER) | (1L << JOIN) | (1L << LEFT) | (1L << NATURAL) | (1L << RIGHT))) != 0)) { { { - setState(341); + setState(334); joinRelation(); } } - setState(346); + setState(339); _errHandler.sync(this); _la = _input.LA(1); } @@ -2152,7 +2281,7 @@ class SqlBaseParser extends Parser { enterRule(_localctx, 34, RULE_joinRelation); int _la; try { - setState(358); + setState(351); switch (_input.LA(1)) { case FULL: case INNER: @@ -2162,18 +2291,18 @@ class SqlBaseParser extends Parser { enterOuterAlt(_localctx, 1); { { - setState(347); + setState(340); joinType(); } - setState(348); + setState(341); match(JOIN); - setState(349); + setState(342); ((JoinRelationContext)_localctx).right = relationPrimary(); - setState(351); + setState(344); _la = _input.LA(1); if (_la==ON || _la==USING) { { - setState(350); + setState(343); joinCriteria(); } } @@ -2183,13 +2312,13 @@ class SqlBaseParser extends Parser { case NATURAL: enterOuterAlt(_localctx, 2); { - setState(353); + setState(346); match(NATURAL); - setState(354); + setState(347); joinType(); - setState(355); + setState(348); match(JOIN); - setState(356); + setState(349); ((JoinRelationContext)_localctx).right = relationPrimary(); } break; @@ -2238,17 +2367,17 @@ class SqlBaseParser extends Parser { enterRule(_localctx, 36, RULE_joinType); int _la; try { - setState(375); + setState(368); switch (_input.LA(1)) { case INNER: case JOIN: enterOuterAlt(_localctx, 1); { - setState(361); + setState(354); _la = _input.LA(1); if (_la==INNER) { { - setState(360); + setState(353); match(INNER); } } @@ -2258,13 +2387,13 @@ class SqlBaseParser extends Parser { case LEFT: enterOuterAlt(_localctx, 2); { - setState(363); + setState(356); match(LEFT); - setState(365); + setState(358); _la = _input.LA(1); if (_la==OUTER) { { - setState(364); + setState(357); match(OUTER); } } @@ -2274,13 +2403,13 @@ class SqlBaseParser extends Parser { case RIGHT: enterOuterAlt(_localctx, 3); { - setState(367); + setState(360); match(RIGHT); - setState(369); + setState(362); _la = _input.LA(1); if (_la==OUTER) { { - setState(368); + setState(361); match(OUTER); } } @@ -2290,13 +2419,13 @@ class SqlBaseParser extends Parser { case FULL: enterOuterAlt(_localctx, 4); { - setState(371); + setState(364); match(FULL); - setState(373); + setState(366); _la = _input.LA(1); if (_la==OUTER) { { - setState(372); + setState(365); match(OUTER); } } @@ -2354,43 +2483,43 @@ class SqlBaseParser extends Parser { enterRule(_localctx, 38, RULE_joinCriteria); int _la; try { - setState(391); + setState(384); switch (_input.LA(1)) { case ON: enterOuterAlt(_localctx, 1); { - setState(377); + setState(370); match(ON); - setState(378); + setState(371); booleanExpression(0); } break; case USING: enterOuterAlt(_localctx, 2); { - setState(379); + setState(372); match(USING); - setState(380); + setState(373); match(T__0); - setState(381); + setState(374); identifier(); - setState(386); + setState(379); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(382); + setState(375); match(T__2); - setState(383); + setState(376); identifier(); } } - setState(388); + setState(381); _errHandler.sync(this); _la = _input.LA(1); } - setState(389); + setState(382); match(T__1); } break; @@ -2495,29 +2624,29 @@ class SqlBaseParser extends Parser { enterRule(_localctx, 40, RULE_relationPrimary); int _la; try { - setState(418); + setState(411); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,60,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,56,_ctx) ) { case 1: _localctx = new TableNameContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(393); + setState(386); tableIdentifier(); - setState(398); + setState(391); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << AS) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { { - setState(395); + setState(388); _la = _input.LA(1); if (_la==AS) { { - setState(394); + setState(387); match(AS); } } - setState(397); + setState(390); qualifiedName(); } } @@ -2528,26 +2657,26 @@ class SqlBaseParser extends Parser { _localctx = new AliasedQueryContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(400); + setState(393); match(T__0); - setState(401); + setState(394); queryNoWith(); - setState(402); + setState(395); match(T__1); - setState(407); + setState(400); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << AS) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { { - setState(404); + setState(397); _la = _input.LA(1); if (_la==AS) { { - setState(403); + setState(396); match(AS); } } - setState(406); + setState(399); qualifiedName(); } } @@ -2558,26 +2687,26 @@ class SqlBaseParser extends Parser { _localctx = new AliasedRelationContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(409); + setState(402); match(T__0); - setState(410); + setState(403); relation(); - setState(411); + setState(404); match(T__1); - setState(416); + setState(409); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << AS) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { { - setState(413); + setState(406); _la = _input.LA(1); if (_la==AS) { { - setState(412); + setState(405); match(AS); } } - setState(415); + setState(408); qualifiedName(); } } @@ -2626,7 +2755,7 @@ class SqlBaseParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(420); + setState(413); booleanExpression(0); } } @@ -2835,18 +2964,18 @@ class SqlBaseParser extends Parser { int _alt; enterOuterAlt(_localctx, 1); { - setState(471); + setState(464); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,64,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,60,_ctx) ) { case 1: { _localctx = new LogicalNotContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(423); + setState(416); match(NOT); - setState(424); + setState(417); booleanExpression(8); } break; @@ -2855,13 +2984,13 @@ class SqlBaseParser extends Parser { _localctx = new ExistsContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(425); + setState(418); match(EXISTS); - setState(426); + setState(419); match(T__0); - setState(427); + setState(420); query(); - setState(428); + setState(421); match(T__1); } break; @@ -2870,29 +2999,29 @@ class SqlBaseParser extends Parser { _localctx = new StringQueryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(430); + setState(423); match(QUERY); - setState(431); + setState(424); match(T__0); - setState(432); + setState(425); ((StringQueryContext)_localctx).queryString = string(); - setState(437); + setState(430); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(433); + setState(426); match(T__2); - setState(434); + setState(427); ((StringQueryContext)_localctx).options = string(); } } - setState(439); + setState(432); _errHandler.sync(this); _la = _input.LA(1); } - setState(440); + setState(433); match(T__1); } break; @@ -2901,33 +3030,33 @@ class SqlBaseParser extends Parser { _localctx = new MatchQueryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(442); + setState(435); match(MATCH); - setState(443); + setState(436); match(T__0); - setState(444); + setState(437); ((MatchQueryContext)_localctx).singleField = qualifiedName(); - setState(445); + setState(438); match(T__2); - setState(446); + setState(439); ((MatchQueryContext)_localctx).queryString = string(); - setState(451); + setState(444); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(447); + setState(440); match(T__2); - setState(448); + setState(441); ((MatchQueryContext)_localctx).options = string(); } } - setState(453); + setState(446); _errHandler.sync(this); _la = _input.LA(1); } - setState(454); + setState(447); match(T__1); } break; @@ -2936,33 +3065,33 @@ class SqlBaseParser extends Parser { _localctx = new MultiMatchQueryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(456); + setState(449); match(MATCH); - setState(457); + setState(450); match(T__0); - setState(458); + setState(451); ((MultiMatchQueryContext)_localctx).multiFields = string(); - setState(459); + setState(452); match(T__2); - setState(460); + setState(453); ((MultiMatchQueryContext)_localctx).queryString = string(); - setState(465); + setState(458); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(461); + setState(454); match(T__2); - setState(462); + setState(455); ((MultiMatchQueryContext)_localctx).options = string(); } } - setState(467); + setState(460); _errHandler.sync(this); _la = _input.LA(1); } - setState(468); + setState(461); match(T__1); } break; @@ -2971,33 +3100,33 @@ class SqlBaseParser extends Parser { _localctx = new BooleanDefaultContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(470); + setState(463); predicated(); } break; } _ctx.stop = _input.LT(-1); - setState(481); + setState(474); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,66,_ctx); + _alt = getInterpreter().adaptivePredict(_input,62,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { if ( _parseListeners!=null ) triggerExitRuleEvent(); _prevctx = _localctx; { - setState(479); + setState(472); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,65,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,61,_ctx) ) { case 1: { _localctx = new LogicalBinaryContext(new BooleanExpressionContext(_parentctx, _parentState)); ((LogicalBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_booleanExpression); - setState(473); + setState(466); if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); - setState(474); + setState(467); ((LogicalBinaryContext)_localctx).operator = match(AND); - setState(475); + setState(468); ((LogicalBinaryContext)_localctx).right = booleanExpression(3); } break; @@ -3006,20 +3135,20 @@ class SqlBaseParser extends Parser { _localctx = new LogicalBinaryContext(new BooleanExpressionContext(_parentctx, _parentState)); ((LogicalBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_booleanExpression); - setState(476); + setState(469); if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); - setState(477); + setState(470); ((LogicalBinaryContext)_localctx).operator = match(OR); - setState(478); + setState(471); ((LogicalBinaryContext)_localctx).right = booleanExpression(2); } break; } } } - setState(483); + setState(476); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,66,_ctx); + _alt = getInterpreter().adaptivePredict(_input,62,_ctx); } } } @@ -3066,14 +3195,14 @@ class SqlBaseParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(484); + setState(477); valueExpression(0); - setState(486); + setState(479); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,67,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,63,_ctx) ) { case 1: { - setState(485); + setState(478); predicate(); } break; @@ -3149,142 +3278,142 @@ class SqlBaseParser extends Parser { enterRule(_localctx, 48, RULE_predicate); int _la; try { - setState(534); + setState(527); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,75,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,71,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(489); + setState(482); _la = _input.LA(1); if (_la==NOT) { { - setState(488); + setState(481); match(NOT); } } - setState(491); + setState(484); ((PredicateContext)_localctx).kind = match(BETWEEN); - setState(492); + setState(485); ((PredicateContext)_localctx).lower = valueExpression(0); - setState(493); + setState(486); match(AND); - setState(494); + setState(487); ((PredicateContext)_localctx).upper = valueExpression(0); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(497); + setState(490); _la = _input.LA(1); if (_la==NOT) { { - setState(496); + setState(489); match(NOT); } } - setState(499); + setState(492); ((PredicateContext)_localctx).kind = match(IN); - setState(500); + setState(493); match(T__0); - setState(501); + setState(494); expression(); - setState(506); + setState(499); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(502); + setState(495); match(T__2); - setState(503); + setState(496); expression(); } } - setState(508); + setState(501); _errHandler.sync(this); _la = _input.LA(1); } - setState(509); + setState(502); match(T__1); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(512); + setState(505); _la = _input.LA(1); if (_la==NOT) { { - setState(511); + setState(504); match(NOT); } } - setState(514); + setState(507); ((PredicateContext)_localctx).kind = match(IN); - setState(515); + setState(508); match(T__0); - setState(516); + setState(509); query(); - setState(517); + setState(510); match(T__1); } break; case 4: enterOuterAlt(_localctx, 4); { - setState(520); + setState(513); _la = _input.LA(1); if (_la==NOT) { { - setState(519); + setState(512); match(NOT); } } - setState(522); + setState(515); ((PredicateContext)_localctx).kind = match(LIKE); - setState(523); + setState(516); pattern(); } break; case 5: enterOuterAlt(_localctx, 5); { - setState(525); + setState(518); _la = _input.LA(1); if (_la==NOT) { { - setState(524); + setState(517); match(NOT); } } - setState(527); + setState(520); ((PredicateContext)_localctx).kind = match(RLIKE); - setState(528); + setState(521); ((PredicateContext)_localctx).regex = string(); } break; case 6: enterOuterAlt(_localctx, 6); { - setState(529); + setState(522); match(IS); - setState(531); + setState(524); _la = _input.LA(1); if (_la==NOT) { { - setState(530); + setState(523); match(NOT); } } - setState(533); + setState(526); ((PredicateContext)_localctx).kind = match(NULL); } break; @@ -3301,6 +3430,53 @@ class SqlBaseParser extends Parser { return _localctx; } + public static class LikePatternContext extends ParserRuleContext { + public TerminalNode LIKE() { return getToken(SqlBaseParser.LIKE, 0); } + public PatternContext pattern() { + return getRuleContext(PatternContext.class,0); + } + public LikePatternContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_likePattern; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterLikePattern(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitLikePattern(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitLikePattern(this); + else return visitor.visitChildren(this); + } + } + + public final LikePatternContext likePattern() throws RecognitionException { + LikePatternContext _localctx = new LikePatternContext(_ctx, getState()); + enterRule(_localctx, 50, RULE_likePattern); + try { + enterOuterAlt(_localctx, 1); + { + setState(529); + match(LIKE); + setState(530); + pattern(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class PatternContext extends ParserRuleContext { public StringContext value; public StringContext string() { @@ -3330,18 +3506,18 @@ class SqlBaseParser extends Parser { public final PatternContext pattern() throws RecognitionException { PatternContext _localctx = new PatternContext(_ctx, getState()); - enterRule(_localctx, 50, RULE_pattern); + enterRule(_localctx, 52, RULE_pattern); try { enterOuterAlt(_localctx, 1); { - setState(536); + setState(532); ((PatternContext)_localctx).value = string(); - setState(538); + setState(534); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,76,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,72,_ctx) ) { case 1: { - setState(537); + setState(533); patternEscape(); } break; @@ -3387,27 +3563,27 @@ class SqlBaseParser extends Parser { public final PatternEscapeContext patternEscape() throws RecognitionException { PatternEscapeContext _localctx = new PatternEscapeContext(_ctx, getState()); - enterRule(_localctx, 52, RULE_patternEscape); + enterRule(_localctx, 54, RULE_patternEscape); try { - setState(546); + setState(542); switch (_input.LA(1)) { case ESCAPE: enterOuterAlt(_localctx, 1); { - setState(540); + setState(536); match(ESCAPE); - setState(541); + setState(537); ((PatternEscapeContext)_localctx).escape = string(); } break; case ESCAPE_ESC: enterOuterAlt(_localctx, 2); { - setState(542); + setState(538); match(ESCAPE_ESC); - setState(543); + setState(539); ((PatternEscapeContext)_localctx).escape = string(); - setState(544); + setState(540); match(ESC_END); } break; @@ -3545,14 +3721,14 @@ class SqlBaseParser extends Parser { int _parentState = getState(); ValueExpressionContext _localctx = new ValueExpressionContext(_ctx, _parentState); ValueExpressionContext _prevctx = _localctx; - int _startState = 54; - enterRecursionRule(_localctx, 54, RULE_valueExpression, _p); + int _startState = 56; + enterRecursionRule(_localctx, 56, RULE_valueExpression, _p); int _la; try { int _alt; enterOuterAlt(_localctx, 1); { - setState(552); + setState(548); switch (_input.LA(1)) { case T__0: case ANALYZE: @@ -3606,7 +3782,7 @@ class SqlBaseParser extends Parser { _ctx = _localctx; _prevctx = _localctx; - setState(549); + setState(545); primaryExpression(); } break; @@ -3616,7 +3792,7 @@ class SqlBaseParser extends Parser { _localctx = new ArithmeticUnaryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(550); + setState(546); ((ArithmeticUnaryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { @@ -3624,7 +3800,7 @@ class SqlBaseParser extends Parser { } else { consume(); } - setState(551); + setState(547); valueExpression(4); } break; @@ -3632,25 +3808,25 @@ class SqlBaseParser extends Parser { throw new NoViableAltException(this); } _ctx.stop = _input.LT(-1); - setState(566); + setState(562); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,80,_ctx); + _alt = getInterpreter().adaptivePredict(_input,76,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { if ( _parseListeners!=null ) triggerExitRuleEvent(); _prevctx = _localctx; { - setState(564); + setState(560); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,79,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,75,_ctx) ) { case 1: { _localctx = new ArithmeticBinaryContext(new ValueExpressionContext(_parentctx, _parentState)); ((ArithmeticBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_valueExpression); - setState(554); + setState(550); if (!(precpred(_ctx, 3))) throw new FailedPredicateException(this, "precpred(_ctx, 3)"); - setState(555); + setState(551); ((ArithmeticBinaryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(((((_la - 88)) & ~0x3f) == 0 && ((1L << (_la - 88)) & ((1L << (ASTERISK - 88)) | (1L << (SLASH - 88)) | (1L << (PERCENT - 88)))) != 0)) ) { @@ -3658,7 +3834,7 @@ class SqlBaseParser extends Parser { } else { consume(); } - setState(556); + setState(552); ((ArithmeticBinaryContext)_localctx).right = valueExpression(4); } break; @@ -3667,9 +3843,9 @@ class SqlBaseParser extends Parser { _localctx = new ArithmeticBinaryContext(new ValueExpressionContext(_parentctx, _parentState)); ((ArithmeticBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_valueExpression); - setState(557); + setState(553); if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); - setState(558); + setState(554); ((ArithmeticBinaryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { @@ -3677,7 +3853,7 @@ class SqlBaseParser extends Parser { } else { consume(); } - setState(559); + setState(555); ((ArithmeticBinaryContext)_localctx).right = valueExpression(3); } break; @@ -3686,20 +3862,20 @@ class SqlBaseParser extends Parser { _localctx = new ComparisonContext(new ValueExpressionContext(_parentctx, _parentState)); ((ComparisonContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_valueExpression); - setState(560); + setState(556); if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); - setState(561); + setState(557); comparisonOperator(); - setState(562); + setState(558); ((ComparisonContext)_localctx).right = valueExpression(2); } break; } } } - setState(568); + setState(564); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,80,_ctx); + _alt = getInterpreter().adaptivePredict(_input,76,_ctx); } } } @@ -3901,17 +4077,17 @@ class SqlBaseParser extends Parser { public final PrimaryExpressionContext primaryExpression() throws RecognitionException { PrimaryExpressionContext _localctx = new PrimaryExpressionContext(_ctx, getState()); - enterRule(_localctx, 56, RULE_primaryExpression); + enterRule(_localctx, 58, RULE_primaryExpression); int _la; try { - setState(590); + setState(586); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,82,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,78,_ctx) ) { case 1: _localctx = new CastContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(569); + setState(565); castExpression(); } break; @@ -3919,7 +4095,7 @@ class SqlBaseParser extends Parser { _localctx = new ExtractContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(570); + setState(566); extractExpression(); } break; @@ -3927,7 +4103,7 @@ class SqlBaseParser extends Parser { _localctx = new ConstantDefaultContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(571); + setState(567); constant(); } break; @@ -3935,7 +4111,7 @@ class SqlBaseParser extends Parser { _localctx = new StarContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(572); + setState(568); match(ASTERISK); } break; @@ -3943,18 +4119,18 @@ class SqlBaseParser extends Parser { _localctx = new StarContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(576); + setState(572); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { { - setState(573); + setState(569); qualifiedName(); - setState(574); + setState(570); match(DOT); } } - setState(578); + setState(574); match(ASTERISK); } break; @@ -3962,7 +4138,7 @@ class SqlBaseParser extends Parser { _localctx = new FunctionContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(579); + setState(575); functionExpression(); } break; @@ -3970,11 +4146,11 @@ class SqlBaseParser extends Parser { _localctx = new SubqueryExpressionContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(580); + setState(576); match(T__0); - setState(581); + setState(577); query(); - setState(582); + setState(578); match(T__1); } break; @@ -3982,7 +4158,7 @@ class SqlBaseParser extends Parser { _localctx = new ColumnReferenceContext(_localctx); enterOuterAlt(_localctx, 8); { - setState(584); + setState(580); identifier(); } break; @@ -3990,7 +4166,7 @@ class SqlBaseParser extends Parser { _localctx = new DereferenceContext(_localctx); enterOuterAlt(_localctx, 9); { - setState(585); + setState(581); qualifiedName(); } break; @@ -3998,11 +4174,11 @@ class SqlBaseParser extends Parser { _localctx = new ParenthesizedExpressionContext(_localctx); enterOuterAlt(_localctx, 10); { - setState(586); + setState(582); match(T__0); - setState(587); + setState(583); expression(); - setState(588); + setState(584); match(T__1); } break; @@ -4046,25 +4222,25 @@ class SqlBaseParser extends Parser { public final CastExpressionContext castExpression() throws RecognitionException { CastExpressionContext _localctx = new CastExpressionContext(_ctx, getState()); - enterRule(_localctx, 58, RULE_castExpression); + enterRule(_localctx, 60, RULE_castExpression); try { - setState(597); + setState(593); switch (_input.LA(1)) { case CAST: enterOuterAlt(_localctx, 1); { - setState(592); + setState(588); castTemplate(); } break; case FUNCTION_ESC: enterOuterAlt(_localctx, 2); { - setState(593); + setState(589); match(FUNCTION_ESC); - setState(594); + setState(590); castTemplate(); - setState(595); + setState(591); match(ESC_END); } break; @@ -4113,21 +4289,21 @@ class SqlBaseParser extends Parser { public final CastTemplateContext castTemplate() throws RecognitionException { CastTemplateContext _localctx = new CastTemplateContext(_ctx, getState()); - enterRule(_localctx, 60, RULE_castTemplate); + enterRule(_localctx, 62, RULE_castTemplate); try { enterOuterAlt(_localctx, 1); { - setState(599); + setState(595); match(CAST); - setState(600); + setState(596); match(T__0); - setState(601); + setState(597); expression(); - setState(602); + setState(598); match(AS); - setState(603); + setState(599); dataType(); - setState(604); + setState(600); match(T__1); } } @@ -4169,25 +4345,25 @@ class SqlBaseParser extends Parser { public final ExtractExpressionContext extractExpression() throws RecognitionException { ExtractExpressionContext _localctx = new ExtractExpressionContext(_ctx, getState()); - enterRule(_localctx, 62, RULE_extractExpression); + enterRule(_localctx, 64, RULE_extractExpression); try { - setState(611); + setState(607); switch (_input.LA(1)) { case EXTRACT: enterOuterAlt(_localctx, 1); { - setState(606); + setState(602); extractTemplate(); } break; case FUNCTION_ESC: enterOuterAlt(_localctx, 2); { - setState(607); + setState(603); match(FUNCTION_ESC); - setState(608); + setState(604); extractTemplate(); - setState(609); + setState(605); match(ESC_END); } break; @@ -4237,21 +4413,21 @@ class SqlBaseParser extends Parser { public final ExtractTemplateContext extractTemplate() throws RecognitionException { ExtractTemplateContext _localctx = new ExtractTemplateContext(_ctx, getState()); - enterRule(_localctx, 64, RULE_extractTemplate); + enterRule(_localctx, 66, RULE_extractTemplate); try { enterOuterAlt(_localctx, 1); { - setState(613); + setState(609); match(EXTRACT); - setState(614); + setState(610); match(T__0); - setState(615); + setState(611); ((ExtractTemplateContext)_localctx).field = identifier(); - setState(616); + setState(612); match(FROM); - setState(617); + setState(613); valueExpression(0); - setState(618); + setState(614); match(T__1); } } @@ -4292,9 +4468,9 @@ class SqlBaseParser extends Parser { public final FunctionExpressionContext functionExpression() throws RecognitionException { FunctionExpressionContext _localctx = new FunctionExpressionContext(_ctx, getState()); - enterRule(_localctx, 66, RULE_functionExpression); + enterRule(_localctx, 68, RULE_functionExpression); try { - setState(625); + setState(621); switch (_input.LA(1)) { case ANALYZE: case ANALYZED: @@ -4329,18 +4505,18 @@ class SqlBaseParser extends Parser { case BACKQUOTED_IDENTIFIER: enterOuterAlt(_localctx, 1); { - setState(620); + setState(616); functionTemplate(); } break; case FUNCTION_ESC: enterOuterAlt(_localctx, 2); { - setState(621); + setState(617); match(FUNCTION_ESC); - setState(622); + setState(618); functionTemplate(); - setState(623); + setState(619); match(ESC_END); } break; @@ -4393,50 +4569,50 @@ class SqlBaseParser extends Parser { public final FunctionTemplateContext functionTemplate() throws RecognitionException { FunctionTemplateContext _localctx = new FunctionTemplateContext(_ctx, getState()); - enterRule(_localctx, 68, RULE_functionTemplate); + enterRule(_localctx, 70, RULE_functionTemplate); int _la; try { enterOuterAlt(_localctx, 1); { - setState(627); + setState(623); functionName(); - setState(628); + setState(624); match(T__0); - setState(640); + setState(636); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ALL) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << DISTINCT) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << LEFT) | (1L << MAPPED) | (1L << MATCH) | (1L << NOT) | (1L << NULL) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RIGHT) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TRUE - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (FUNCTION_ESC - 64)) | (1L << (DATE_ESC - 64)) | (1L << (TIME_ESC - 64)) | (1L << (TIMESTAMP_ESC - 64)) | (1L << (GUID_ESC - 64)) | (1L << (PLUS - 64)) | (1L << (MINUS - 64)) | (1L << (ASTERISK - 64)) | (1L << (PARAM - 64)) | (1L << (STRING - 64)) | (1L << (INTEGER_VALUE - 64)) | (1L << (DECIMAL_VALUE - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { { - setState(630); + setState(626); _la = _input.LA(1); if (_la==ALL || _la==DISTINCT) { { - setState(629); + setState(625); setQuantifier(); } } - setState(632); + setState(628); expression(); - setState(637); + setState(633); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(633); + setState(629); match(T__2); - setState(634); + setState(630); expression(); } } - setState(639); + setState(635); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(642); + setState(638); match(T__1); } } @@ -4478,21 +4654,21 @@ class SqlBaseParser extends Parser { public final FunctionNameContext functionName() throws RecognitionException { FunctionNameContext _localctx = new FunctionNameContext(_ctx, getState()); - enterRule(_localctx, 70, RULE_functionName); + enterRule(_localctx, 72, RULE_functionName); try { - setState(647); + setState(643); switch (_input.LA(1)) { case LEFT: enterOuterAlt(_localctx, 1); { - setState(644); + setState(640); match(LEFT); } break; case RIGHT: enterOuterAlt(_localctx, 2); { - setState(645); + setState(641); match(RIGHT); } break; @@ -4527,7 +4703,7 @@ class SqlBaseParser extends Parser { case BACKQUOTED_IDENTIFIER: enterOuterAlt(_localctx, 3); { - setState(646); + setState(642); identifier(); } break; @@ -4736,16 +4912,16 @@ class SqlBaseParser extends Parser { public final ConstantContext constant() throws RecognitionException { ConstantContext _localctx = new ConstantContext(_ctx, getState()); - enterRule(_localctx, 72, RULE_constant); + enterRule(_localctx, 74, RULE_constant); try { int _alt; - setState(674); + setState(670); switch (_input.LA(1)) { case NULL: _localctx = new NullLiteralContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(649); + setState(645); match(NULL); } break; @@ -4754,7 +4930,7 @@ class SqlBaseParser extends Parser { _localctx = new NumericLiteralContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(650); + setState(646); number(); } break; @@ -4763,7 +4939,7 @@ class SqlBaseParser extends Parser { _localctx = new BooleanLiteralContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(651); + setState(647); booleanValue(); } break; @@ -4771,7 +4947,7 @@ class SqlBaseParser extends Parser { _localctx = new StringLiteralContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(653); + setState(649); _errHandler.sync(this); _alt = 1; do { @@ -4779,7 +4955,7 @@ class SqlBaseParser extends Parser { case 1: { { - setState(652); + setState(648); match(STRING); } } @@ -4787,9 +4963,9 @@ class SqlBaseParser extends Parser { default: throw new NoViableAltException(this); } - setState(655); + setState(651); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,90,_ctx); + _alt = getInterpreter().adaptivePredict(_input,86,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); } break; @@ -4797,7 +4973,7 @@ class SqlBaseParser extends Parser { _localctx = new ParamLiteralContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(657); + setState(653); match(PARAM); } break; @@ -4805,11 +4981,11 @@ class SqlBaseParser extends Parser { _localctx = new DateEscapedLiteralContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(658); + setState(654); match(DATE_ESC); - setState(659); + setState(655); string(); - setState(660); + setState(656); match(ESC_END); } break; @@ -4817,11 +4993,11 @@ class SqlBaseParser extends Parser { _localctx = new TimeEscapedLiteralContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(662); + setState(658); match(TIME_ESC); - setState(663); + setState(659); string(); - setState(664); + setState(660); match(ESC_END); } break; @@ -4829,11 +5005,11 @@ class SqlBaseParser extends Parser { _localctx = new TimestampEscapedLiteralContext(_localctx); enterOuterAlt(_localctx, 8); { - setState(666); + setState(662); match(TIMESTAMP_ESC); - setState(667); + setState(663); string(); - setState(668); + setState(664); match(ESC_END); } break; @@ -4841,11 +5017,11 @@ class SqlBaseParser extends Parser { _localctx = new GuidEscapedLiteralContext(_localctx); enterOuterAlt(_localctx, 9); { - setState(670); + setState(666); match(GUID_ESC); - setState(671); + setState(667); string(); - setState(672); + setState(668); match(ESC_END); } break; @@ -4892,12 +5068,12 @@ class SqlBaseParser extends Parser { public final ComparisonOperatorContext comparisonOperator() throws RecognitionException { ComparisonOperatorContext _localctx = new ComparisonOperatorContext(_ctx, getState()); - enterRule(_localctx, 74, RULE_comparisonOperator); + enterRule(_localctx, 76, RULE_comparisonOperator); int _la; try { enterOuterAlt(_localctx, 1); { - setState(676); + setState(672); _la = _input.LA(1); if ( !(((((_la - 80)) & ~0x3f) == 0 && ((1L << (_la - 80)) & ((1L << (EQ - 80)) | (1L << (NEQ - 80)) | (1L << (LT - 80)) | (1L << (LTE - 80)) | (1L << (GT - 80)) | (1L << (GTE - 80)))) != 0)) ) { _errHandler.recoverInline(this); @@ -4941,12 +5117,12 @@ class SqlBaseParser extends Parser { public final BooleanValueContext booleanValue() throws RecognitionException { BooleanValueContext _localctx = new BooleanValueContext(_ctx, getState()); - enterRule(_localctx, 76, RULE_booleanValue); + enterRule(_localctx, 78, RULE_booleanValue); int _la; try { enterOuterAlt(_localctx, 1); { - setState(678); + setState(674); _la = _input.LA(1); if ( !(_la==FALSE || _la==TRUE) ) { _errHandler.recoverInline(this); @@ -4999,12 +5175,12 @@ class SqlBaseParser extends Parser { public final DataTypeContext dataType() throws RecognitionException { DataTypeContext _localctx = new DataTypeContext(_ctx, getState()); - enterRule(_localctx, 78, RULE_dataType); + enterRule(_localctx, 80, RULE_dataType); try { _localctx = new PrimitiveDataTypeContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(680); + setState(676); identifier(); } } @@ -5051,30 +5227,30 @@ class SqlBaseParser extends Parser { public final QualifiedNameContext qualifiedName() throws RecognitionException { QualifiedNameContext _localctx = new QualifiedNameContext(_ctx, getState()); - enterRule(_localctx, 80, RULE_qualifiedName); + enterRule(_localctx, 82, RULE_qualifiedName); try { int _alt; enterOuterAlt(_localctx, 1); { - setState(687); + setState(683); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,92,_ctx); + _alt = getInterpreter().adaptivePredict(_input,88,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(682); + setState(678); identifier(); - setState(683); + setState(679); match(DOT); } } } - setState(689); + setState(685); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,92,_ctx); + _alt = getInterpreter().adaptivePredict(_input,88,_ctx); } - setState(690); + setState(686); identifier(); } } @@ -5117,15 +5293,15 @@ class SqlBaseParser extends Parser { public final IdentifierContext identifier() throws RecognitionException { IdentifierContext _localctx = new IdentifierContext(_ctx, getState()); - enterRule(_localctx, 82, RULE_identifier); + enterRule(_localctx, 84, RULE_identifier); try { - setState(694); + setState(690); switch (_input.LA(1)) { case QUOTED_IDENTIFIER: case BACKQUOTED_IDENTIFIER: enterOuterAlt(_localctx, 1); { - setState(692); + setState(688); quoteIdentifier(); } break; @@ -5158,7 +5334,7 @@ class SqlBaseParser extends Parser { case DIGIT_IDENTIFIER: enterOuterAlt(_localctx, 2); { - setState(693); + setState(689); unquoteIdentifier(); } break; @@ -5208,46 +5384,46 @@ class SqlBaseParser extends Parser { public final TableIdentifierContext tableIdentifier() throws RecognitionException { TableIdentifierContext _localctx = new TableIdentifierContext(_ctx, getState()); - enterRule(_localctx, 84, RULE_tableIdentifier); + enterRule(_localctx, 86, RULE_tableIdentifier); int _la; try { - setState(708); + setState(704); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,96,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,92,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(699); + setState(695); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { { - setState(696); + setState(692); ((TableIdentifierContext)_localctx).catalog = identifier(); - setState(697); + setState(693); match(T__3); } } - setState(701); + setState(697); match(TABLE_IDENTIFIER); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(705); + setState(701); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,95,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,91,_ctx) ) { case 1: { - setState(702); + setState(698); ((TableIdentifierContext)_localctx).catalog = identifier(); - setState(703); + setState(699); match(T__3); } break; } - setState(707); + setState(703); ((TableIdentifierContext)_localctx).name = identifier(); } break; @@ -5312,15 +5488,15 @@ class SqlBaseParser extends Parser { public final QuoteIdentifierContext quoteIdentifier() throws RecognitionException { QuoteIdentifierContext _localctx = new QuoteIdentifierContext(_ctx, getState()); - enterRule(_localctx, 86, RULE_quoteIdentifier); + enterRule(_localctx, 88, RULE_quoteIdentifier); try { - setState(712); + setState(708); switch (_input.LA(1)) { case QUOTED_IDENTIFIER: _localctx = new QuotedIdentifierContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(710); + setState(706); match(QUOTED_IDENTIFIER); } break; @@ -5328,7 +5504,7 @@ class SqlBaseParser extends Parser { _localctx = new BackQuotedIdentifierContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(711); + setState(707); match(BACKQUOTED_IDENTIFIER); } break; @@ -5398,15 +5574,15 @@ class SqlBaseParser extends Parser { public final UnquoteIdentifierContext unquoteIdentifier() throws RecognitionException { UnquoteIdentifierContext _localctx = new UnquoteIdentifierContext(_ctx, getState()); - enterRule(_localctx, 88, RULE_unquoteIdentifier); + enterRule(_localctx, 90, RULE_unquoteIdentifier); try { - setState(717); + setState(713); switch (_input.LA(1)) { case IDENTIFIER: _localctx = new UnquotedIdentifierContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(714); + setState(710); match(IDENTIFIER); } break; @@ -5438,7 +5614,7 @@ class SqlBaseParser extends Parser { _localctx = new UnquotedIdentifierContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(715); + setState(711); nonReserved(); } break; @@ -5446,7 +5622,7 @@ class SqlBaseParser extends Parser { _localctx = new DigitIdentifierContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(716); + setState(712); match(DIGIT_IDENTIFIER); } break; @@ -5513,15 +5689,15 @@ class SqlBaseParser extends Parser { public final NumberContext number() throws RecognitionException { NumberContext _localctx = new NumberContext(_ctx, getState()); - enterRule(_localctx, 90, RULE_number); + enterRule(_localctx, 92, RULE_number); try { - setState(721); + setState(717); switch (_input.LA(1)) { case DECIMAL_VALUE: _localctx = new DecimalLiteralContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(719); + setState(715); match(DECIMAL_VALUE); } break; @@ -5529,7 +5705,7 @@ class SqlBaseParser extends Parser { _localctx = new IntegerLiteralContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(720); + setState(716); match(INTEGER_VALUE); } break; @@ -5572,12 +5748,12 @@ class SqlBaseParser extends Parser { public final StringContext string() throws RecognitionException { StringContext _localctx = new StringContext(_ctx, getState()); - enterRule(_localctx, 92, RULE_string); + enterRule(_localctx, 94, RULE_string); int _la; try { enterOuterAlt(_localctx, 1); { - setState(723); + setState(719); _la = _input.LA(1); if ( !(_la==PARAM || _la==STRING) ) { _errHandler.recoverInline(this); @@ -5644,12 +5820,12 @@ class SqlBaseParser extends Parser { public final NonReservedContext nonReserved() throws RecognitionException { NonReservedContext _localctx = new NonReservedContext(_ctx, getState()); - enterRule(_localctx, 94, RULE_nonReserved); + enterRule(_localctx, 96, RULE_nonReserved); int _la; try { enterOuterAlt(_localctx, 1); { - setState(725); + setState(721); _la = _input.LA(1); if ( !(((((_la - 6)) & ~0x3f) == 0 && ((1L << (_la - 6)) & ((1L << (ANALYZE - 6)) | (1L << (ANALYZED - 6)) | (1L << (CATALOGS - 6)) | (1L << (COLUMNS - 6)) | (1L << (DEBUG - 6)) | (1L << (EXECUTABLE - 6)) | (1L << (EXPLAIN - 6)) | (1L << (FORMAT - 6)) | (1L << (FUNCTIONS - 6)) | (1L << (GRAPHVIZ - 6)) | (1L << (MAPPED - 6)) | (1L << (OPTIMIZED - 6)) | (1L << (PARSED - 6)) | (1L << (PHYSICAL - 6)) | (1L << (PLAN - 6)) | (1L << (RLIKE - 6)) | (1L << (QUERY - 6)) | (1L << (SCHEMAS - 6)) | (1L << (SHOW - 6)) | (1L << (SYS - 6)) | (1L << (TABLES - 6)) | (1L << (TEXT - 6)) | (1L << (TYPE - 6)) | (1L << (TYPES - 6)) | (1L << (VERIFY - 6)))) != 0)) ) { _errHandler.recoverInline(this); @@ -5673,7 +5849,7 @@ class SqlBaseParser extends Parser { switch (ruleIndex) { case 22: return booleanExpression_sempred((BooleanExpressionContext)_localctx, predIndex); - case 27: + case 28: return valueExpression_sempred((ValueExpressionContext)_localctx, predIndex); } return true; @@ -5700,295 +5876,293 @@ class SqlBaseParser extends Parser { } public static final String _serializedATN = - "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3l\u02da\4\2\t\2\4"+ + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3l\u02d6\4\2\t\2\4"+ "\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t"+ "\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+ "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+ "\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t \4!"+ "\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4"+ - ",\t,\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\3\2\3\2\3\2\3\3\3\3\3\3\3\4"+ - "\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4r\n\4\f\4\16\4u\13\4\3\4\5\4x\n\4"+ - "\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4\u0081\n\4\f\4\16\4\u0084\13\4\3\4\5\4"+ - "\u0087\n\4\3\4\3\4\3\4\3\4\5\4\u008d\n\4\3\4\5\4\u0090\n\4\3\4\3\4\3\4"+ - "\3\4\3\4\3\4\3\4\3\4\3\4\5\4\u009b\n\4\3\4\5\4\u009e\n\4\3\4\3\4\3\4\3"+ - "\4\3\4\3\4\3\4\3\4\5\4\u00a8\n\4\3\4\5\4\u00ab\n\4\3\4\5\4\u00ae\n\4\3"+ - "\4\5\4\u00b1\n\4\3\4\3\4\3\4\3\4\7\4\u00b7\n\4\f\4\16\4\u00ba\13\4\5\4"+ - "\u00bc\n\4\3\4\3\4\3\4\3\4\5\4\u00c2\n\4\3\4\3\4\5\4\u00c6\n\4\3\4\5\4"+ - "\u00c9\n\4\3\4\5\4\u00cc\n\4\3\4\5\4\u00cf\n\4\3\4\3\4\3\4\3\4\3\4\5\4"+ - "\u00d6\n\4\3\5\3\5\3\5\3\5\7\5\u00dc\n\5\f\5\16\5\u00df\13\5\5\5\u00e1"+ - "\n\5\3\5\3\5\3\6\3\6\3\6\3\6\3\6\3\6\7\6\u00eb\n\6\f\6\16\6\u00ee\13\6"+ - "\5\6\u00f0\n\6\3\6\5\6\u00f3\n\6\3\7\3\7\3\7\3\7\3\7\5\7\u00fa\n\7\3\b"+ - "\3\b\3\b\3\b\3\b\5\b\u0101\n\b\3\t\3\t\5\t\u0105\n\t\3\n\3\n\5\n\u0109"+ - "\n\n\3\n\3\n\3\n\7\n\u010e\n\n\f\n\16\n\u0111\13\n\3\n\5\n\u0114\n\n\3"+ - "\n\3\n\5\n\u0118\n\n\3\n\3\n\3\n\5\n\u011d\n\n\3\n\3\n\5\n\u0121\n\n\3"+ - "\13\3\13\3\13\3\13\7\13\u0127\n\13\f\13\16\13\u012a\13\13\3\f\5\f\u012d"+ - "\n\f\3\f\3\f\3\f\7\f\u0132\n\f\f\f\16\f\u0135\13\f\3\r\3\r\3\16\3\16\3"+ - "\16\3\16\7\16\u013d\n\16\f\16\16\16\u0140\13\16\5\16\u0142\n\16\3\16\3"+ - "\16\5\16\u0146\n\16\3\17\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\21\3\21"+ - "\5\21\u0152\n\21\3\21\5\21\u0155\n\21\3\22\3\22\7\22\u0159\n\22\f\22\16"+ - "\22\u015c\13\22\3\23\3\23\3\23\3\23\5\23\u0162\n\23\3\23\3\23\3\23\3\23"+ - "\3\23\5\23\u0169\n\23\3\24\5\24\u016c\n\24\3\24\3\24\5\24\u0170\n\24\3"+ - "\24\3\24\5\24\u0174\n\24\3\24\3\24\5\24\u0178\n\24\5\24\u017a\n\24\3\25"+ - "\3\25\3\25\3\25\3\25\3\25\3\25\7\25\u0183\n\25\f\25\16\25\u0186\13\25"+ - "\3\25\3\25\5\25\u018a\n\25\3\26\3\26\5\26\u018e\n\26\3\26\5\26\u0191\n"+ - "\26\3\26\3\26\3\26\3\26\5\26\u0197\n\26\3\26\5\26\u019a\n\26\3\26\3\26"+ - "\3\26\3\26\5\26\u01a0\n\26\3\26\5\26\u01a3\n\26\5\26\u01a5\n\26\3\27\3"+ - "\27\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\7"+ - "\30\u01b6\n\30\f\30\16\30\u01b9\13\30\3\30\3\30\3\30\3\30\3\30\3\30\3"+ - "\30\3\30\3\30\7\30\u01c4\n\30\f\30\16\30\u01c7\13\30\3\30\3\30\3\30\3"+ - "\30\3\30\3\30\3\30\3\30\3\30\7\30\u01d2\n\30\f\30\16\30\u01d5\13\30\3"+ - "\30\3\30\3\30\5\30\u01da\n\30\3\30\3\30\3\30\3\30\3\30\3\30\7\30\u01e2"+ - "\n\30\f\30\16\30\u01e5\13\30\3\31\3\31\5\31\u01e9\n\31\3\32\5\32\u01ec"+ - "\n\32\3\32\3\32\3\32\3\32\3\32\3\32\5\32\u01f4\n\32\3\32\3\32\3\32\3\32"+ - "\3\32\7\32\u01fb\n\32\f\32\16\32\u01fe\13\32\3\32\3\32\3\32\5\32\u0203"+ - "\n\32\3\32\3\32\3\32\3\32\3\32\3\32\5\32\u020b\n\32\3\32\3\32\3\32\5\32"+ - "\u0210\n\32\3\32\3\32\3\32\3\32\5\32\u0216\n\32\3\32\5\32\u0219\n\32\3"+ - "\33\3\33\5\33\u021d\n\33\3\34\3\34\3\34\3\34\3\34\3\34\5\34\u0225\n\34"+ - "\3\35\3\35\3\35\3\35\5\35\u022b\n\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35"+ - "\3\35\3\35\3\35\7\35\u0237\n\35\f\35\16\35\u023a\13\35\3\36\3\36\3\36"+ - "\3\36\3\36\3\36\3\36\5\36\u0243\n\36\3\36\3\36\3\36\3\36\3\36\3\36\3\36"+ - "\3\36\3\36\3\36\3\36\3\36\5\36\u0251\n\36\3\37\3\37\3\37\3\37\3\37\5\37"+ - "\u0258\n\37\3 \3 \3 \3 \3 \3 \3 \3!\3!\3!\3!\3!\5!\u0266\n!\3\"\3\"\3"+ - "\"\3\"\3\"\3\"\3\"\3#\3#\3#\3#\3#\5#\u0274\n#\3$\3$\3$\5$\u0279\n$\3$"+ - "\3$\3$\7$\u027e\n$\f$\16$\u0281\13$\5$\u0283\n$\3$\3$\3%\3%\3%\5%\u028a"+ - "\n%\3&\3&\3&\3&\6&\u0290\n&\r&\16&\u0291\3&\3&\3&\3&\3&\3&\3&\3&\3&\3"+ - "&\3&\3&\3&\3&\3&\3&\3&\5&\u02a5\n&\3\'\3\'\3(\3(\3)\3)\3*\3*\3*\7*\u02b0"+ - "\n*\f*\16*\u02b3\13*\3*\3*\3+\3+\5+\u02b9\n+\3,\3,\3,\5,\u02be\n,\3,\3"+ - ",\3,\3,\5,\u02c4\n,\3,\5,\u02c7\n,\3-\3-\5-\u02cb\n-\3.\3.\3.\5.\u02d0"+ - "\n.\3/\3/\5/\u02d4\n/\3\60\3\60\3\61\3\61\3\61\2\4.8\62\2\4\6\b\n\f\16"+ - "\20\22\24\26\30\32\34\36 \"$&(*,.\60\62\64\668:<>@BDFHJLNPRTVXZ\\^`\2"+ - "\20\b\2\7\7\t\t\31\31,,\62\62\66\66\4\2\"\"BB\4\2\t\t\62\62\4\2\37\37"+ - "%%\3\2\25\26\4\2\7\7aa\4\2\r\r\25\25\4\2\7\7\27\27\3\2XY\3\2Z\\\3\2RW"+ - "\4\2\35\35CC\3\2_`\20\2\b\t\22\24\31\31\33\33\36\36!\",,\62\62\668:<>"+ - "?ABDEGG\u0336\2b\3\2\2\2\4e\3\2\2\2\6\u00d5\3\2\2\2\b\u00e0\3\2\2\2\n"+ - "\u00e4\3\2\2\2\f\u00f9\3\2\2\2\16\u0100\3\2\2\2\20\u0102\3\2\2\2\22\u0106"+ - "\3\2\2\2\24\u0122\3\2\2\2\26\u012c\3\2\2\2\30\u0136\3\2\2\2\32\u0145\3"+ - "\2\2\2\34\u0147\3\2\2\2\36\u014d\3\2\2\2 \u014f\3\2\2\2\"\u0156\3\2\2"+ - "\2$\u0168\3\2\2\2&\u0179\3\2\2\2(\u0189\3\2\2\2*\u01a4\3\2\2\2,\u01a6"+ - "\3\2\2\2.\u01d9\3\2\2\2\60\u01e6\3\2\2\2\62\u0218\3\2\2\2\64\u021a\3\2"+ - "\2\2\66\u0224\3\2\2\28\u022a\3\2\2\2:\u0250\3\2\2\2<\u0257\3\2\2\2>\u0259"+ - "\3\2\2\2@\u0265\3\2\2\2B\u0267\3\2\2\2D\u0273\3\2\2\2F\u0275\3\2\2\2H"+ - "\u0289\3\2\2\2J\u02a4\3\2\2\2L\u02a6\3\2\2\2N\u02a8\3\2\2\2P\u02aa\3\2"+ - "\2\2R\u02b1\3\2\2\2T\u02b8\3\2\2\2V\u02c6\3\2\2\2X\u02ca\3\2\2\2Z\u02cf"+ - "\3\2\2\2\\\u02d3\3\2\2\2^\u02d5\3\2\2\2`\u02d7\3\2\2\2bc\5\6\4\2cd\7\2"+ - "\2\3d\3\3\2\2\2ef\5,\27\2fg\7\2\2\3g\5\3\2\2\2h\u00d6\5\b\5\2iw\7\33\2"+ - "\2js\7\3\2\2kl\78\2\2lr\t\2\2\2mn\7\36\2\2nr\t\3\2\2op\7G\2\2pr\5N(\2"+ - "qk\3\2\2\2qm\3\2\2\2qo\3\2\2\2ru\3\2\2\2sq\3\2\2\2st\3\2\2\2tv\3\2\2\2"+ - "us\3\2\2\2vx\7\4\2\2wj\3\2\2\2wx\3\2\2\2xy\3\2\2\2y\u00d6\5\6\4\2z\u0086"+ - "\7\24\2\2{\u0082\7\3\2\2|}\78\2\2}\u0081\t\4\2\2~\177\7\36\2\2\177\u0081"+ - "\t\3\2\2\u0080|\3\2\2\2\u0080~\3\2\2\2\u0081\u0084\3\2\2\2\u0082\u0080"+ - "\3\2\2\2\u0082\u0083\3\2\2\2\u0083\u0085\3\2\2\2\u0084\u0082\3\2\2\2\u0085"+ - "\u0087\7\4\2\2\u0086{\3\2\2\2\u0086\u0087\3\2\2\2\u0087\u0088\3\2\2\2"+ - "\u0088\u00d6\5\6\4\2\u0089\u008a\7>\2\2\u008a\u008f\7A\2\2\u008b\u008d"+ - "\7*\2\2\u008c\u008b\3\2\2\2\u008c\u008d\3\2\2\2\u008d\u008e\3\2\2\2\u008e"+ - "\u0090\5\64\33\2\u008f\u008c\3\2\2\2\u008f\u0090\3\2\2\2\u0090\u00d6\3"+ - "\2\2\2\u0091\u0092\7>\2\2\u0092\u0093\7\23\2\2\u0093\u0094\t\5\2\2\u0094"+ - "\u00d6\5V,\2\u0095\u0096\t\6\2\2\u0096\u00d6\5V,\2\u0097\u0098\7>\2\2"+ - "\u0098\u009d\7!\2\2\u0099\u009b\7*\2\2\u009a\u0099\3\2\2\2\u009a\u009b"+ - "\3\2\2\2\u009b\u009c\3\2\2\2\u009c\u009e\5\64\33\2\u009d\u009a\3\2\2\2"+ - "\u009d\u009e\3\2\2\2\u009e\u00d6\3\2\2\2\u009f\u00a0\7>\2\2\u00a0\u00d6"+ - "\7<\2\2\u00a1\u00a2\7?\2\2\u00a2\u00d6\7\22\2\2\u00a3\u00a4\7?\2\2\u00a4"+ - "\u00aa\7A\2\2\u00a5\u00a7\7\21\2\2\u00a6\u00a8\7*\2\2\u00a7\u00a6\3\2"+ - "\2\2\u00a7\u00a8\3\2\2\2\u00a8\u00a9\3\2\2\2\u00a9\u00ab\5\64\33\2\u00aa"+ - "\u00a5\3\2\2\2\u00aa\u00ab\3\2\2\2\u00ab\u00b0\3\2\2\2\u00ac\u00ae\7*"+ - "\2\2\u00ad\u00ac\3\2\2\2\u00ad\u00ae\3\2\2\2\u00ae\u00af\3\2\2\2\u00af"+ - "\u00b1\5\64\33\2\u00b0\u00ad\3\2\2\2\u00b0\u00b1\3\2\2\2\u00b1\u00bb\3"+ - "\2\2\2\u00b2\u00b3\7D\2\2\u00b3\u00b8\5^\60\2\u00b4\u00b5\7\5\2\2\u00b5"+ - "\u00b7\5^\60\2\u00b6\u00b4\3\2\2\2\u00b7\u00ba\3\2\2\2\u00b8\u00b6\3\2"+ - "\2\2\u00b8\u00b9\3\2\2\2\u00b9\u00bc\3\2\2\2\u00ba\u00b8\3\2\2\2\u00bb"+ - "\u00b2\3\2\2\2\u00bb\u00bc\3\2\2\2\u00bc\u00d6\3\2\2\2\u00bd\u00be\7?"+ - "\2\2\u00be\u00c1\7\23\2\2\u00bf\u00c0\7\21\2\2\u00c0\u00c2\5^\60\2\u00c1"+ - "\u00bf\3\2\2\2\u00c1\u00c2\3\2\2\2\u00c2\u00c8\3\2\2\2\u00c3\u00c5\7@"+ - "\2\2\u00c4\u00c6\7*\2\2\u00c5\u00c4\3\2\2\2\u00c5\u00c6\3\2\2\2\u00c6"+ - "\u00c7\3\2\2\2\u00c7\u00c9\5\64\33\2\u00c8\u00c3\3\2\2\2\u00c8\u00c9\3"+ - "\2\2\2\u00c9\u00ce\3\2\2\2\u00ca\u00cc\7*\2\2\u00cb\u00ca\3\2\2\2\u00cb"+ - "\u00cc\3\2\2\2\u00cc\u00cd\3\2\2\2\u00cd\u00cf\5\64\33\2\u00ce\u00cb\3"+ - "\2\2\2\u00ce\u00cf\3\2\2\2\u00cf\u00d6\3\2\2\2\u00d0\u00d1\7?\2\2\u00d1"+ - "\u00d6\7E\2\2\u00d2\u00d3\7?\2\2\u00d3\u00d4\7@\2\2\u00d4\u00d6\7E\2\2"+ - "\u00d5h\3\2\2\2\u00d5i\3\2\2\2\u00d5z\3\2\2\2\u00d5\u0089\3\2\2\2\u00d5"+ - "\u0091\3\2\2\2\u00d5\u0095\3\2\2\2\u00d5\u0097\3\2\2\2\u00d5\u009f\3\2"+ - "\2\2\u00d5\u00a1\3\2\2\2\u00d5\u00a3\3\2\2\2\u00d5\u00bd\3\2\2\2\u00d5"+ - "\u00d0\3\2\2\2\u00d5\u00d2\3\2\2\2\u00d6\7\3\2\2\2\u00d7\u00d8\7I\2\2"+ - "\u00d8\u00dd\5\34\17\2\u00d9\u00da\7\5\2\2\u00da\u00dc\5\34\17\2\u00db"+ - "\u00d9\3\2\2\2\u00dc\u00df\3\2\2\2\u00dd\u00db\3\2\2\2\u00dd\u00de\3\2"+ - "\2\2\u00de\u00e1\3\2\2\2\u00df\u00dd\3\2\2\2\u00e0\u00d7\3\2\2\2\u00e0"+ - "\u00e1\3\2\2\2\u00e1\u00e2\3\2\2\2\u00e2\u00e3\5\n\6\2\u00e3\t\3\2\2\2"+ - "\u00e4\u00ef\5\16\b\2\u00e5\u00e6\7\64\2\2\u00e6\u00e7\7\17\2\2\u00e7"+ - "\u00ec\5\20\t\2\u00e8\u00e9\7\5\2\2\u00e9\u00eb\5\20\t\2\u00ea\u00e8\3"+ - "\2\2\2\u00eb\u00ee\3\2\2\2\u00ec\u00ea\3\2\2\2\u00ec\u00ed\3\2\2\2\u00ed"+ - "\u00f0\3\2\2\2\u00ee\u00ec\3\2\2\2\u00ef\u00e5\3\2\2\2\u00ef\u00f0\3\2"+ - "\2\2\u00f0\u00f2\3\2\2\2\u00f1\u00f3\5\f\7\2\u00f2\u00f1\3\2\2\2\u00f2"+ - "\u00f3\3\2\2\2\u00f3\13\3\2\2\2\u00f4\u00f5\7+\2\2\u00f5\u00fa\t\7\2\2"+ - "\u00f6\u00f7\7L\2\2\u00f7\u00f8\t\7\2\2\u00f8\u00fa\7Q\2\2\u00f9\u00f4"+ - "\3\2\2\2\u00f9\u00f6\3\2\2\2\u00fa\r\3\2\2\2\u00fb\u0101\5\22\n\2\u00fc"+ - "\u00fd\7\3\2\2\u00fd\u00fe\5\n\6\2\u00fe\u00ff\7\4\2\2\u00ff\u0101\3\2"+ - "\2\2\u0100\u00fb\3\2\2\2\u0100\u00fc\3\2\2\2\u0101\17\3\2\2\2\u0102\u0104"+ - "\5,\27\2\u0103\u0105\t\b\2\2\u0104\u0103\3\2\2\2\u0104\u0105\3\2\2\2\u0105"+ - "\21\3\2\2\2\u0106\u0108\7=\2\2\u0107\u0109\5\36\20\2\u0108\u0107\3\2\2"+ - "\2\u0108\u0109\3\2\2\2\u0109\u010a\3\2\2\2\u010a\u010f\5 \21\2\u010b\u010c"+ - "\7\5\2\2\u010c\u010e\5 \21\2\u010d\u010b\3\2\2\2\u010e\u0111\3\2\2\2\u010f"+ - "\u010d\3\2\2\2\u010f\u0110\3\2\2\2\u0110\u0113\3\2\2\2\u0111\u010f\3\2"+ - "\2\2\u0112\u0114\5\24\13\2\u0113\u0112\3\2\2\2\u0113\u0114\3\2\2\2\u0114"+ - "\u0117\3\2\2\2\u0115\u0116\7H\2\2\u0116\u0118\5.\30\2\u0117\u0115\3\2"+ - "\2\2\u0117\u0118\3\2\2\2\u0118\u011c\3\2\2\2\u0119\u011a\7#\2\2\u011a"+ - "\u011b\7\17\2\2\u011b\u011d\5\26\f\2\u011c\u0119\3\2\2\2\u011c\u011d\3"+ - "\2\2\2\u011d\u0120\3\2\2\2\u011e\u011f\7$\2\2\u011f\u0121\5.\30\2\u0120"+ - "\u011e\3\2\2\2\u0120\u0121\3\2\2\2\u0121\23\3\2\2\2\u0122\u0123\7\37\2"+ - "\2\u0123\u0128\5\"\22\2\u0124\u0125\7\5\2\2\u0125\u0127\5\"\22\2\u0126"+ - "\u0124\3\2\2\2\u0127\u012a\3\2\2\2\u0128\u0126\3\2\2\2\u0128\u0129\3\2"+ - "\2\2\u0129\25\3\2\2\2\u012a\u0128\3\2\2\2\u012b\u012d\5\36\20\2\u012c"+ - "\u012b\3\2\2\2\u012c\u012d\3\2\2\2\u012d\u012e\3\2\2\2\u012e\u0133\5\30"+ - "\r\2\u012f\u0130\7\5\2\2\u0130\u0132\5\30\r\2\u0131\u012f\3\2\2\2\u0132"+ - "\u0135\3\2\2\2\u0133\u0131\3\2\2\2\u0133\u0134\3\2\2\2\u0134\27\3\2\2"+ - "\2\u0135\u0133\3\2\2\2\u0136\u0137\5\32\16\2\u0137\31\3\2\2\2\u0138\u0141"+ - "\7\3\2\2\u0139\u013e\5,\27\2\u013a\u013b\7\5\2\2\u013b\u013d\5,\27\2\u013c"+ - "\u013a\3\2\2\2\u013d\u0140\3\2\2\2\u013e\u013c\3\2\2\2\u013e\u013f\3\2"+ - "\2\2\u013f\u0142\3\2\2\2\u0140\u013e\3\2\2\2\u0141\u0139\3\2\2\2\u0141"+ - "\u0142\3\2\2\2\u0142\u0143\3\2\2\2\u0143\u0146\7\4\2\2\u0144\u0146\5,"+ - "\27\2\u0145\u0138\3\2\2\2\u0145\u0144\3\2\2\2\u0146\33\3\2\2\2\u0147\u0148"+ - "\5T+\2\u0148\u0149\7\f\2\2\u0149\u014a\7\3\2\2\u014a\u014b\5\n\6\2\u014b"+ - "\u014c\7\4\2\2\u014c\35\3\2\2\2\u014d\u014e\t\t\2\2\u014e\37\3\2\2\2\u014f"+ - "\u0154\5,\27\2\u0150\u0152\7\f\2\2\u0151\u0150\3\2\2\2\u0151\u0152\3\2"+ - "\2\2\u0152\u0153\3\2\2\2\u0153\u0155\5T+\2\u0154\u0151\3\2\2\2\u0154\u0155"+ - "\3\2\2\2\u0155!\3\2\2\2\u0156\u015a\5*\26\2\u0157\u0159\5$\23\2\u0158"+ - "\u0157\3\2\2\2\u0159\u015c\3\2\2\2\u015a\u0158\3\2\2\2\u015a\u015b\3\2"+ - "\2\2\u015b#\3\2\2\2\u015c\u015a\3\2\2\2\u015d\u015e\5&\24\2\u015e\u015f"+ - "\7(\2\2\u015f\u0161\5*\26\2\u0160\u0162\5(\25\2\u0161\u0160\3\2\2\2\u0161"+ - "\u0162\3\2\2\2\u0162\u0169\3\2\2\2\u0163\u0164\7.\2\2\u0164\u0165\5&\24"+ - "\2\u0165\u0166\7(\2\2\u0166\u0167\5*\26\2\u0167\u0169\3\2\2\2\u0168\u015d"+ - "\3\2\2\2\u0168\u0163\3\2\2\2\u0169%\3\2\2\2\u016a\u016c\7&\2\2\u016b\u016a"+ - "\3\2\2\2\u016b\u016c\3\2\2\2\u016c\u017a\3\2\2\2\u016d\u016f\7)\2\2\u016e"+ - "\u0170\7\65\2\2\u016f\u016e\3\2\2\2\u016f\u0170\3\2\2\2\u0170\u017a\3"+ - "\2\2\2\u0171\u0173\79\2\2\u0172\u0174\7\65\2\2\u0173\u0172\3\2\2\2\u0173"+ - "\u0174\3\2\2\2\u0174\u017a\3\2\2\2\u0175\u0177\7 \2\2\u0176\u0178\7\65"+ - "\2\2\u0177\u0176\3\2\2\2\u0177\u0178\3\2\2\2\u0178\u017a\3\2\2\2\u0179"+ - "\u016b\3\2\2\2\u0179\u016d\3\2\2\2\u0179\u0171\3\2\2\2\u0179\u0175\3\2"+ - "\2\2\u017a\'\3\2\2\2\u017b\u017c\7\61\2\2\u017c\u018a\5.\30\2\u017d\u017e"+ - "\7F\2\2\u017e\u017f\7\3\2\2\u017f\u0184\5T+\2\u0180\u0181\7\5\2\2\u0181"+ - "\u0183\5T+\2\u0182\u0180\3\2\2\2\u0183\u0186\3\2\2\2\u0184\u0182\3\2\2"+ - "\2\u0184\u0185\3\2\2\2\u0185\u0187\3\2\2\2\u0186\u0184\3\2\2\2\u0187\u0188"+ - "\7\4\2\2\u0188\u018a\3\2\2\2\u0189\u017b\3\2\2\2\u0189\u017d\3\2\2\2\u018a"+ - ")\3\2\2\2\u018b\u0190\5V,\2\u018c\u018e\7\f\2\2\u018d\u018c\3\2\2\2\u018d"+ - "\u018e\3\2\2\2\u018e\u018f\3\2\2\2\u018f\u0191\5R*\2\u0190\u018d\3\2\2"+ - "\2\u0190\u0191\3\2\2\2\u0191\u01a5\3\2\2\2\u0192\u0193\7\3\2\2\u0193\u0194"+ - "\5\n\6\2\u0194\u0199\7\4\2\2\u0195\u0197\7\f\2\2\u0196\u0195\3\2\2\2\u0196"+ - "\u0197\3\2\2\2\u0197\u0198\3\2\2\2\u0198\u019a\5R*\2\u0199\u0196\3\2\2"+ - "\2\u0199\u019a\3\2\2\2\u019a\u01a5\3\2\2\2\u019b\u019c\7\3\2\2\u019c\u019d"+ - "\5\"\22\2\u019d\u01a2\7\4\2\2\u019e\u01a0\7\f\2\2\u019f\u019e\3\2\2\2"+ - "\u019f\u01a0\3\2\2\2\u01a0\u01a1\3\2\2\2\u01a1\u01a3\5R*\2\u01a2\u019f"+ - "\3\2\2\2\u01a2\u01a3\3\2\2\2\u01a3\u01a5\3\2\2\2\u01a4\u018b\3\2\2\2\u01a4"+ - "\u0192\3\2\2\2\u01a4\u019b\3\2\2\2\u01a5+\3\2\2\2\u01a6\u01a7\5.\30\2"+ - "\u01a7-\3\2\2\2\u01a8\u01a9\b\30\1\2\u01a9\u01aa\7/\2\2\u01aa\u01da\5"+ - ".\30\n\u01ab\u01ac\7\32\2\2\u01ac\u01ad\7\3\2\2\u01ad\u01ae\5\b\5\2\u01ae"+ - "\u01af\7\4\2\2\u01af\u01da\3\2\2\2\u01b0\u01b1\7;\2\2\u01b1\u01b2\7\3"+ - "\2\2\u01b2\u01b7\5^\60\2\u01b3\u01b4\7\5\2\2\u01b4\u01b6\5^\60\2\u01b5"+ - "\u01b3\3\2\2\2\u01b6\u01b9\3\2\2\2\u01b7\u01b5\3\2\2\2\u01b7\u01b8\3\2"+ - "\2\2\u01b8\u01ba\3\2\2\2\u01b9\u01b7\3\2\2\2\u01ba\u01bb\7\4\2\2\u01bb"+ - "\u01da\3\2\2\2\u01bc\u01bd\7-\2\2\u01bd\u01be\7\3\2\2\u01be\u01bf\5R*"+ - "\2\u01bf\u01c0\7\5\2\2\u01c0\u01c5\5^\60\2\u01c1\u01c2\7\5\2\2\u01c2\u01c4"+ - "\5^\60\2\u01c3\u01c1\3\2\2\2\u01c4\u01c7\3\2\2\2\u01c5\u01c3\3\2\2\2\u01c5"+ - "\u01c6\3\2\2\2\u01c6\u01c8\3\2\2\2\u01c7\u01c5\3\2\2\2\u01c8\u01c9\7\4"+ - "\2\2\u01c9\u01da\3\2\2\2\u01ca\u01cb\7-\2\2\u01cb\u01cc\7\3\2\2\u01cc"+ - "\u01cd\5^\60\2\u01cd\u01ce\7\5\2\2\u01ce\u01d3\5^\60\2\u01cf\u01d0\7\5"+ - "\2\2\u01d0\u01d2\5^\60\2\u01d1\u01cf\3\2\2\2\u01d2\u01d5\3\2\2\2\u01d3"+ - "\u01d1\3\2\2\2\u01d3\u01d4\3\2\2\2\u01d4\u01d6\3\2\2\2\u01d5\u01d3\3\2"+ - "\2\2\u01d6\u01d7\7\4\2\2\u01d7\u01da\3\2\2\2\u01d8\u01da\5\60\31\2\u01d9"+ - "\u01a8\3\2\2\2\u01d9\u01ab\3\2\2\2\u01d9\u01b0\3\2\2\2\u01d9\u01bc\3\2"+ - "\2\2\u01d9\u01ca\3\2\2\2\u01d9\u01d8\3\2\2\2\u01da\u01e3\3\2\2\2\u01db"+ - "\u01dc\f\4\2\2\u01dc\u01dd\7\n\2\2\u01dd\u01e2\5.\30\5\u01de\u01df\f\3"+ - "\2\2\u01df\u01e0\7\63\2\2\u01e0\u01e2\5.\30\4\u01e1\u01db\3\2\2\2\u01e1"+ - "\u01de\3\2\2\2\u01e2\u01e5\3\2\2\2\u01e3\u01e1\3\2\2\2\u01e3\u01e4\3\2"+ - "\2\2\u01e4/\3\2\2\2\u01e5\u01e3\3\2\2\2\u01e6\u01e8\58\35\2\u01e7\u01e9"+ - "\5\62\32\2\u01e8\u01e7\3\2\2\2\u01e8\u01e9\3\2\2\2\u01e9\61\3\2\2\2\u01ea"+ - "\u01ec\7/\2\2\u01eb\u01ea\3\2\2\2\u01eb\u01ec\3\2\2\2\u01ec\u01ed\3\2"+ - "\2\2\u01ed\u01ee\7\16\2\2\u01ee\u01ef\58\35\2\u01ef\u01f0\7\n\2\2\u01f0"+ - "\u01f1\58\35\2\u01f1\u0219\3\2\2\2\u01f2\u01f4\7/\2\2\u01f3\u01f2\3\2"+ - "\2\2\u01f3\u01f4\3\2\2\2\u01f4\u01f5\3\2\2\2\u01f5\u01f6\7%\2\2\u01f6"+ - "\u01f7\7\3\2\2\u01f7\u01fc\5,\27\2\u01f8\u01f9\7\5\2\2\u01f9\u01fb\5,"+ - "\27\2\u01fa\u01f8\3\2\2\2\u01fb\u01fe\3\2\2\2\u01fc\u01fa\3\2\2\2\u01fc"+ - "\u01fd\3\2\2\2\u01fd\u01ff\3\2\2\2\u01fe\u01fc\3\2\2\2\u01ff\u0200\7\4"+ - "\2\2\u0200\u0219\3\2\2\2\u0201\u0203\7/\2\2\u0202\u0201\3\2\2\2\u0202"+ - "\u0203\3\2\2\2\u0203\u0204\3\2\2\2\u0204\u0205\7%\2\2\u0205\u0206\7\3"+ - "\2\2\u0206\u0207\5\b\5\2\u0207\u0208\7\4\2\2\u0208\u0219\3\2\2\2\u0209"+ - "\u020b\7/\2\2\u020a\u0209\3\2\2\2\u020a\u020b\3\2\2\2\u020b\u020c\3\2"+ - "\2\2\u020c\u020d\7*\2\2\u020d\u0219\5\64\33\2\u020e\u0210\7/\2\2\u020f"+ - "\u020e\3\2\2\2\u020f\u0210\3\2\2\2\u0210\u0211\3\2\2\2\u0211\u0212\7:"+ - "\2\2\u0212\u0219\5^\60\2\u0213\u0215\7\'\2\2\u0214\u0216\7/\2\2\u0215"+ - "\u0214\3\2\2\2\u0215\u0216\3\2\2\2\u0216\u0217\3\2\2\2\u0217\u0219\7\60"+ - "\2\2\u0218\u01eb\3\2\2\2\u0218\u01f3\3\2\2\2\u0218\u0202\3\2\2\2\u0218"+ - "\u020a\3\2\2\2\u0218\u020f\3\2\2\2\u0218\u0213\3\2\2\2\u0219\63\3\2\2"+ - "\2\u021a\u021c\5^\60\2\u021b\u021d\5\66\34\2\u021c\u021b\3\2\2\2\u021c"+ - "\u021d\3\2\2\2\u021d\65\3\2\2\2\u021e\u021f\7\30\2\2\u021f\u0225\5^\60"+ - "\2\u0220\u0221\7J\2\2\u0221\u0222\5^\60\2\u0222\u0223\7Q\2\2\u0223\u0225"+ - "\3\2\2\2\u0224\u021e\3\2\2\2\u0224\u0220\3\2\2\2\u0225\67\3\2\2\2\u0226"+ - "\u0227\b\35\1\2\u0227\u022b\5:\36\2\u0228\u0229\t\n\2\2\u0229\u022b\5"+ - "8\35\6\u022a\u0226\3\2\2\2\u022a\u0228\3\2\2\2\u022b\u0238\3\2\2\2\u022c"+ - "\u022d\f\5\2\2\u022d\u022e\t\13\2\2\u022e\u0237\58\35\6\u022f\u0230\f"+ - "\4\2\2\u0230\u0231\t\n\2\2\u0231\u0237\58\35\5\u0232\u0233\f\3\2\2\u0233"+ - "\u0234\5L\'\2\u0234\u0235\58\35\4\u0235\u0237\3\2\2\2\u0236\u022c\3\2"+ - "\2\2\u0236\u022f\3\2\2\2\u0236\u0232\3\2\2\2\u0237\u023a\3\2\2\2\u0238"+ - "\u0236\3\2\2\2\u0238\u0239\3\2\2\2\u02399\3\2\2\2\u023a\u0238\3\2\2\2"+ - "\u023b\u0251\5<\37\2\u023c\u0251\5@!\2\u023d\u0251\5J&\2\u023e\u0251\7"+ - "Z\2\2\u023f\u0240\5R*\2\u0240\u0241\7^\2\2\u0241\u0243\3\2\2\2\u0242\u023f"+ - "\3\2\2\2\u0242\u0243\3\2\2\2\u0243\u0244\3\2\2\2\u0244\u0251\7Z\2\2\u0245"+ - "\u0251\5D#\2\u0246\u0247\7\3\2\2\u0247\u0248\5\b\5\2\u0248\u0249\7\4\2"+ - "\2\u0249\u0251\3\2\2\2\u024a\u0251\5T+\2\u024b\u0251\5R*\2\u024c\u024d"+ - "\7\3\2\2\u024d\u024e\5,\27\2\u024e\u024f\7\4\2\2\u024f\u0251\3\2\2\2\u0250"+ - "\u023b\3\2\2\2\u0250\u023c\3\2\2\2\u0250\u023d\3\2\2\2\u0250\u023e\3\2"+ - "\2\2\u0250\u0242\3\2\2\2\u0250\u0245\3\2\2\2\u0250\u0246\3\2\2\2\u0250"+ - "\u024a\3\2\2\2\u0250\u024b\3\2\2\2\u0250\u024c\3\2\2\2\u0251;\3\2\2\2"+ - "\u0252\u0258\5> \2\u0253\u0254\7K\2\2\u0254\u0255\5> \2\u0255\u0256\7"+ - "Q\2\2\u0256\u0258\3\2\2\2\u0257\u0252\3\2\2\2\u0257\u0253\3\2\2\2\u0258"+ - "=\3\2\2\2\u0259\u025a\7\20\2\2\u025a\u025b\7\3\2\2\u025b\u025c\5,\27\2"+ - "\u025c\u025d\7\f\2\2\u025d\u025e\5P)\2\u025e\u025f\7\4\2\2\u025f?\3\2"+ - "\2\2\u0260\u0266\5B\"\2\u0261\u0262\7K\2\2\u0262\u0263\5B\"\2\u0263\u0264"+ - "\7Q\2\2\u0264\u0266\3\2\2\2\u0265\u0260\3\2\2\2\u0265\u0261\3\2\2\2\u0266"+ - "A\3\2\2\2\u0267\u0268\7\34\2\2\u0268\u0269\7\3\2\2\u0269\u026a\5T+\2\u026a"+ - "\u026b\7\37\2\2\u026b\u026c\58\35\2\u026c\u026d\7\4\2\2\u026dC\3\2\2\2"+ - "\u026e\u0274\5F$\2\u026f\u0270\7K\2\2\u0270\u0271\5F$\2\u0271\u0272\7"+ - "Q\2\2\u0272\u0274\3\2\2\2\u0273\u026e\3\2\2\2\u0273\u026f\3\2\2\2\u0274"+ - "E\3\2\2\2\u0275\u0276\5H%\2\u0276\u0282\7\3\2\2\u0277\u0279\5\36\20\2"+ - "\u0278\u0277\3\2\2\2\u0278\u0279\3\2\2\2\u0279\u027a\3\2\2\2\u027a\u027f"+ - "\5,\27\2\u027b\u027c\7\5\2\2\u027c\u027e\5,\27\2\u027d\u027b\3\2\2\2\u027e"+ - "\u0281\3\2\2\2\u027f\u027d\3\2\2\2\u027f\u0280\3\2\2\2\u0280\u0283\3\2"+ - "\2\2\u0281\u027f\3\2\2\2\u0282\u0278\3\2\2\2\u0282\u0283\3\2\2\2\u0283"+ - "\u0284\3\2\2\2\u0284\u0285\7\4\2\2\u0285G\3\2\2\2\u0286\u028a\7)\2\2\u0287"+ - "\u028a\79\2\2\u0288\u028a\5T+\2\u0289\u0286\3\2\2\2\u0289\u0287\3\2\2"+ - "\2\u0289\u0288\3\2\2\2\u028aI\3\2\2\2\u028b\u02a5\7\60\2\2\u028c\u02a5"+ - "\5\\/\2\u028d\u02a5\5N(\2\u028e\u0290\7`\2\2\u028f\u028e\3\2\2\2\u0290"+ - "\u0291\3\2\2\2\u0291\u028f\3\2\2\2\u0291\u0292\3\2\2\2\u0292\u02a5\3\2"+ - "\2\2\u0293\u02a5\7_\2\2\u0294\u0295\7M\2\2\u0295\u0296\5^\60\2\u0296\u0297"+ - "\7Q\2\2\u0297\u02a5\3\2\2\2\u0298\u0299\7N\2\2\u0299\u029a\5^\60\2\u029a"+ - "\u029b\7Q\2\2\u029b\u02a5\3\2\2\2\u029c\u029d\7O\2\2\u029d\u029e\5^\60"+ - "\2\u029e\u029f\7Q\2\2\u029f\u02a5\3\2\2\2\u02a0\u02a1\7P\2\2\u02a1\u02a2"+ - "\5^\60\2\u02a2\u02a3\7Q\2\2\u02a3\u02a5\3\2\2\2\u02a4\u028b\3\2\2\2\u02a4"+ - "\u028c\3\2\2\2\u02a4\u028d\3\2\2\2\u02a4\u028f\3\2\2\2\u02a4\u0293\3\2"+ - "\2\2\u02a4\u0294\3\2\2\2\u02a4\u0298\3\2\2\2\u02a4\u029c\3\2\2\2\u02a4"+ - "\u02a0\3\2\2\2\u02a5K\3\2\2\2\u02a6\u02a7\t\f\2\2\u02a7M\3\2\2\2\u02a8"+ - "\u02a9\t\r\2\2\u02a9O\3\2\2\2\u02aa\u02ab\5T+\2\u02abQ\3\2\2\2\u02ac\u02ad"+ - "\5T+\2\u02ad\u02ae\7^\2\2\u02ae\u02b0\3\2\2\2\u02af\u02ac\3\2\2\2\u02b0"+ - "\u02b3\3\2\2\2\u02b1\u02af\3\2\2\2\u02b1\u02b2\3\2\2\2\u02b2\u02b4\3\2"+ - "\2\2\u02b3\u02b1\3\2\2\2\u02b4\u02b5\5T+\2\u02b5S\3\2\2\2\u02b6\u02b9"+ - "\5X-\2\u02b7\u02b9\5Z.\2\u02b8\u02b6\3\2\2\2\u02b8\u02b7\3\2\2\2\u02b9"+ - "U\3\2\2\2\u02ba\u02bb\5T+\2\u02bb\u02bc\7\6\2\2\u02bc\u02be\3\2\2\2\u02bd"+ - "\u02ba\3\2\2\2\u02bd\u02be\3\2\2\2\u02be\u02bf\3\2\2\2\u02bf\u02c7\7e"+ - "\2\2\u02c0\u02c1\5T+\2\u02c1\u02c2\7\6\2\2\u02c2\u02c4\3\2\2\2\u02c3\u02c0"+ - "\3\2\2\2\u02c3\u02c4\3\2\2\2\u02c4\u02c5\3\2\2\2\u02c5\u02c7\5T+\2\u02c6"+ - "\u02bd\3\2\2\2\u02c6\u02c3\3\2\2\2\u02c7W\3\2\2\2\u02c8\u02cb\7f\2\2\u02c9"+ - "\u02cb\7g\2\2\u02ca\u02c8\3\2\2\2\u02ca\u02c9\3\2\2\2\u02cbY\3\2\2\2\u02cc"+ - "\u02d0\7c\2\2\u02cd\u02d0\5`\61\2\u02ce\u02d0\7d\2\2\u02cf\u02cc\3\2\2"+ - "\2\u02cf\u02cd\3\2\2\2\u02cf\u02ce\3\2\2\2\u02d0[\3\2\2\2\u02d1\u02d4"+ - "\7b\2\2\u02d2\u02d4\7a\2\2\u02d3\u02d1\3\2\2\2\u02d3\u02d2\3\2\2\2\u02d4"+ - "]\3\2\2\2\u02d5\u02d6\t\16\2\2\u02d6_\3\2\2\2\u02d7\u02d8\t\17\2\2\u02d8"+ - "a\3\2\2\2fqsw\u0080\u0082\u0086\u008c\u008f\u009a\u009d\u00a7\u00aa\u00ad"+ - "\u00b0\u00b8\u00bb\u00c1\u00c5\u00c8\u00cb\u00ce\u00d5\u00dd\u00e0\u00ec"+ - "\u00ef\u00f2\u00f9\u0100\u0104\u0108\u010f\u0113\u0117\u011c\u0120\u0128"+ - "\u012c\u0133\u013e\u0141\u0145\u0151\u0154\u015a\u0161\u0168\u016b\u016f"+ - "\u0173\u0177\u0179\u0184\u0189\u018d\u0190\u0196\u0199\u019f\u01a2\u01a4"+ - "\u01b7\u01c5\u01d3\u01d9\u01e1\u01e3\u01e8\u01eb\u01f3\u01fc\u0202\u020a"+ - "\u020f\u0215\u0218\u021c\u0224\u022a\u0236\u0238\u0242\u0250\u0257\u0265"+ - "\u0273\u0278\u027f\u0282\u0289\u0291\u02a4\u02b1\u02b8\u02bd\u02c3\u02c6"+ - "\u02ca\u02cf\u02d3"; + ",\t,\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\3\2\3\2\3\2\3\3\3"+ + "\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4t\n\4\f\4\16\4w\13\4\3\4"+ + "\5\4z\n\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4\u0083\n\4\f\4\16\4\u0086\13"+ + "\4\3\4\5\4\u0089\n\4\3\4\3\4\3\4\3\4\3\4\5\4\u0090\n\4\3\4\3\4\3\4\3\4"+ + "\3\4\5\4\u0097\n\4\3\4\3\4\3\4\5\4\u009c\n\4\3\4\3\4\3\4\5\4\u00a1\n\4"+ + "\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\5\4\u00ab\n\4\3\4\3\4\5\4\u00af\n\4\3"+ + "\4\3\4\3\4\3\4\7\4\u00b5\n\4\f\4\16\4\u00b8\13\4\5\4\u00ba\n\4\3\4\3\4"+ + "\3\4\3\4\5\4\u00c0\n\4\3\4\3\4\3\4\5\4\u00c5\n\4\3\4\5\4\u00c8\n\4\3\4"+ + "\3\4\3\4\3\4\3\4\5\4\u00cf\n\4\3\5\3\5\3\5\3\5\7\5\u00d5\n\5\f\5\16\5"+ + "\u00d8\13\5\5\5\u00da\n\5\3\5\3\5\3\6\3\6\3\6\3\6\3\6\3\6\7\6\u00e4\n"+ + "\6\f\6\16\6\u00e7\13\6\5\6\u00e9\n\6\3\6\5\6\u00ec\n\6\3\7\3\7\3\7\3\7"+ + "\3\7\5\7\u00f3\n\7\3\b\3\b\3\b\3\b\3\b\5\b\u00fa\n\b\3\t\3\t\5\t\u00fe"+ + "\n\t\3\n\3\n\5\n\u0102\n\n\3\n\3\n\3\n\7\n\u0107\n\n\f\n\16\n\u010a\13"+ + "\n\3\n\5\n\u010d\n\n\3\n\3\n\5\n\u0111\n\n\3\n\3\n\3\n\5\n\u0116\n\n\3"+ + "\n\3\n\5\n\u011a\n\n\3\13\3\13\3\13\3\13\7\13\u0120\n\13\f\13\16\13\u0123"+ + "\13\13\3\f\5\f\u0126\n\f\3\f\3\f\3\f\7\f\u012b\n\f\f\f\16\f\u012e\13\f"+ + "\3\r\3\r\3\16\3\16\3\16\3\16\7\16\u0136\n\16\f\16\16\16\u0139\13\16\5"+ + "\16\u013b\n\16\3\16\3\16\5\16\u013f\n\16\3\17\3\17\3\17\3\17\3\17\3\17"+ + "\3\20\3\20\3\21\3\21\5\21\u014b\n\21\3\21\5\21\u014e\n\21\3\22\3\22\7"+ + "\22\u0152\n\22\f\22\16\22\u0155\13\22\3\23\3\23\3\23\3\23\5\23\u015b\n"+ + "\23\3\23\3\23\3\23\3\23\3\23\5\23\u0162\n\23\3\24\5\24\u0165\n\24\3\24"+ + "\3\24\5\24\u0169\n\24\3\24\3\24\5\24\u016d\n\24\3\24\3\24\5\24\u0171\n"+ + "\24\5\24\u0173\n\24\3\25\3\25\3\25\3\25\3\25\3\25\3\25\7\25\u017c\n\25"+ + "\f\25\16\25\u017f\13\25\3\25\3\25\5\25\u0183\n\25\3\26\3\26\5\26\u0187"+ + "\n\26\3\26\5\26\u018a\n\26\3\26\3\26\3\26\3\26\5\26\u0190\n\26\3\26\5"+ + "\26\u0193\n\26\3\26\3\26\3\26\3\26\5\26\u0199\n\26\3\26\5\26\u019c\n\26"+ + "\5\26\u019e\n\26\3\27\3\27\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30"+ + "\3\30\3\30\3\30\3\30\7\30\u01af\n\30\f\30\16\30\u01b2\13\30\3\30\3\30"+ + "\3\30\3\30\3\30\3\30\3\30\3\30\3\30\7\30\u01bd\n\30\f\30\16\30\u01c0\13"+ + "\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\7\30\u01cb\n\30\f\30"+ + "\16\30\u01ce\13\30\3\30\3\30\3\30\5\30\u01d3\n\30\3\30\3\30\3\30\3\30"+ + "\3\30\3\30\7\30\u01db\n\30\f\30\16\30\u01de\13\30\3\31\3\31\5\31\u01e2"+ + "\n\31\3\32\5\32\u01e5\n\32\3\32\3\32\3\32\3\32\3\32\3\32\5\32\u01ed\n"+ + "\32\3\32\3\32\3\32\3\32\3\32\7\32\u01f4\n\32\f\32\16\32\u01f7\13\32\3"+ + "\32\3\32\3\32\5\32\u01fc\n\32\3\32\3\32\3\32\3\32\3\32\3\32\5\32\u0204"+ + "\n\32\3\32\3\32\3\32\5\32\u0209\n\32\3\32\3\32\3\32\3\32\5\32\u020f\n"+ + "\32\3\32\5\32\u0212\n\32\3\33\3\33\3\33\3\34\3\34\5\34\u0219\n\34\3\35"+ + "\3\35\3\35\3\35\3\35\3\35\5\35\u0221\n\35\3\36\3\36\3\36\3\36\5\36\u0227"+ + "\n\36\3\36\3\36\3\36\3\36\3\36\3\36\3\36\3\36\3\36\3\36\7\36\u0233\n\36"+ + "\f\36\16\36\u0236\13\36\3\37\3\37\3\37\3\37\3\37\3\37\3\37\5\37\u023f"+ + "\n\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\5\37"+ + "\u024d\n\37\3 \3 \3 \3 \3 \5 \u0254\n \3!\3!\3!\3!\3!\3!\3!\3\"\3\"\3"+ + "\"\3\"\3\"\5\"\u0262\n\"\3#\3#\3#\3#\3#\3#\3#\3$\3$\3$\3$\3$\5$\u0270"+ + "\n$\3%\3%\3%\5%\u0275\n%\3%\3%\3%\7%\u027a\n%\f%\16%\u027d\13%\5%\u027f"+ + "\n%\3%\3%\3&\3&\3&\5&\u0286\n&\3\'\3\'\3\'\3\'\6\'\u028c\n\'\r\'\16\'"+ + "\u028d\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'"+ + "\3\'\5\'\u02a1\n\'\3(\3(\3)\3)\3*\3*\3+\3+\3+\7+\u02ac\n+\f+\16+\u02af"+ + "\13+\3+\3+\3,\3,\5,\u02b5\n,\3-\3-\3-\5-\u02ba\n-\3-\3-\3-\3-\5-\u02c0"+ + "\n-\3-\5-\u02c3\n-\3.\3.\5.\u02c7\n.\3/\3/\3/\5/\u02cc\n/\3\60\3\60\5"+ + "\60\u02d0\n\60\3\61\3\61\3\62\3\62\3\62\2\4.:\63\2\4\6\b\n\f\16\20\22"+ + "\24\26\30\32\34\36 \"$&(*,.\60\62\64\668:<>@BDFHJLNPRTVXZ\\^`b\2\20\b"+ + "\2\7\7\t\t\31\31,,\62\62\66\66\4\2\"\"BB\4\2\t\t\62\62\4\2\37\37%%\3\2"+ + "\25\26\4\2\7\7aa\4\2\r\r\25\25\4\2\7\7\27\27\3\2XY\3\2Z\\\3\2RW\4\2\35"+ + "\35CC\3\2_`\20\2\b\t\22\24\31\31\33\33\36\36!\",,\62\62\668:<>?ABDEGG"+ + "\u0330\2d\3\2\2\2\4g\3\2\2\2\6\u00ce\3\2\2\2\b\u00d9\3\2\2\2\n\u00dd\3"+ + "\2\2\2\f\u00f2\3\2\2\2\16\u00f9\3\2\2\2\20\u00fb\3\2\2\2\22\u00ff\3\2"+ + "\2\2\24\u011b\3\2\2\2\26\u0125\3\2\2\2\30\u012f\3\2\2\2\32\u013e\3\2\2"+ + "\2\34\u0140\3\2\2\2\36\u0146\3\2\2\2 \u0148\3\2\2\2\"\u014f\3\2\2\2$\u0161"+ + "\3\2\2\2&\u0172\3\2\2\2(\u0182\3\2\2\2*\u019d\3\2\2\2,\u019f\3\2\2\2."+ + "\u01d2\3\2\2\2\60\u01df\3\2\2\2\62\u0211\3\2\2\2\64\u0213\3\2\2\2\66\u0216"+ + "\3\2\2\28\u0220\3\2\2\2:\u0226\3\2\2\2<\u024c\3\2\2\2>\u0253\3\2\2\2@"+ + "\u0255\3\2\2\2B\u0261\3\2\2\2D\u0263\3\2\2\2F\u026f\3\2\2\2H\u0271\3\2"+ + "\2\2J\u0285\3\2\2\2L\u02a0\3\2\2\2N\u02a2\3\2\2\2P\u02a4\3\2\2\2R\u02a6"+ + "\3\2\2\2T\u02ad\3\2\2\2V\u02b4\3\2\2\2X\u02c2\3\2\2\2Z\u02c6\3\2\2\2\\"+ + "\u02cb\3\2\2\2^\u02cf\3\2\2\2`\u02d1\3\2\2\2b\u02d3\3\2\2\2de\5\6\4\2"+ + "ef\7\2\2\3f\3\3\2\2\2gh\5,\27\2hi\7\2\2\3i\5\3\2\2\2j\u00cf\5\b\5\2ky"+ + "\7\33\2\2lu\7\3\2\2mn\78\2\2nt\t\2\2\2op\7\36\2\2pt\t\3\2\2qr\7G\2\2r"+ + "t\5P)\2sm\3\2\2\2so\3\2\2\2sq\3\2\2\2tw\3\2\2\2us\3\2\2\2uv\3\2\2\2vx"+ + "\3\2\2\2wu\3\2\2\2xz\7\4\2\2yl\3\2\2\2yz\3\2\2\2z{\3\2\2\2{\u00cf\5\6"+ + "\4\2|\u0088\7\24\2\2}\u0084\7\3\2\2~\177\78\2\2\177\u0083\t\4\2\2\u0080"+ + "\u0081\7\36\2\2\u0081\u0083\t\3\2\2\u0082~\3\2\2\2\u0082\u0080\3\2\2\2"+ + "\u0083\u0086\3\2\2\2\u0084\u0082\3\2\2\2\u0084\u0085\3\2\2\2\u0085\u0087"+ + "\3\2\2\2\u0086\u0084\3\2\2\2\u0087\u0089\7\4\2\2\u0088}\3\2\2\2\u0088"+ + "\u0089\3\2\2\2\u0089\u008a\3\2\2\2\u008a\u00cf\5\6\4\2\u008b\u008c\7>"+ + "\2\2\u008c\u008f\7A\2\2\u008d\u0090\5\64\33\2\u008e\u0090\5X-\2\u008f"+ + "\u008d\3\2\2\2\u008f\u008e\3\2\2\2\u008f\u0090\3\2\2\2\u0090\u00cf\3\2"+ + "\2\2\u0091\u0092\7>\2\2\u0092\u0093\7\23\2\2\u0093\u0096\t\5\2\2\u0094"+ + "\u0097\5\64\33\2\u0095\u0097\5X-\2\u0096\u0094\3\2\2\2\u0096\u0095\3\2"+ + "\2\2\u0097\u00cf\3\2\2\2\u0098\u009b\t\6\2\2\u0099\u009c\5\64\33\2\u009a"+ + "\u009c\5X-\2\u009b\u0099\3\2\2\2\u009b\u009a\3\2\2\2\u009c\u00cf\3\2\2"+ + "\2\u009d\u009e\7>\2\2\u009e\u00a0\7!\2\2\u009f\u00a1\5\64\33\2\u00a0\u009f"+ + "\3\2\2\2\u00a0\u00a1\3\2\2\2\u00a1\u00cf\3\2\2\2\u00a2\u00a3\7>\2\2\u00a3"+ + "\u00cf\7<\2\2\u00a4\u00a5\7?\2\2\u00a5\u00cf\7\22\2\2\u00a6\u00a7\7?\2"+ + "\2\u00a7\u00aa\7A\2\2\u00a8\u00a9\7\21\2\2\u00a9\u00ab\5\64\33\2\u00aa"+ + "\u00a8\3\2\2\2\u00aa\u00ab\3\2\2\2\u00ab\u00ae\3\2\2\2\u00ac\u00af\5\64"+ + "\33\2\u00ad\u00af\5X-\2\u00ae\u00ac\3\2\2\2\u00ae\u00ad\3\2\2\2\u00ae"+ + "\u00af\3\2\2\2\u00af\u00b9\3\2\2\2\u00b0\u00b1\7D\2\2\u00b1\u00b6\5`\61"+ + "\2\u00b2\u00b3\7\5\2\2\u00b3\u00b5\5`\61\2\u00b4\u00b2\3\2\2\2\u00b5\u00b8"+ + "\3\2\2\2\u00b6\u00b4\3\2\2\2\u00b6\u00b7\3\2\2\2\u00b7\u00ba\3\2\2\2\u00b8"+ + "\u00b6\3\2\2\2\u00b9\u00b0\3\2\2\2\u00b9\u00ba\3\2\2\2\u00ba\u00cf\3\2"+ + "\2\2\u00bb\u00bc\7?\2\2\u00bc\u00bf\7\23\2\2\u00bd\u00be\7\21\2\2\u00be"+ + "\u00c0\5`\61\2\u00bf\u00bd\3\2\2\2\u00bf\u00c0\3\2\2\2\u00c0\u00c4\3\2"+ + "\2\2\u00c1\u00c2\7@\2\2\u00c2\u00c5\5\64\33\2\u00c3\u00c5\5X-\2\u00c4"+ + "\u00c1\3\2\2\2\u00c4\u00c3\3\2\2\2\u00c4\u00c5\3\2\2\2\u00c5\u00c7\3\2"+ + "\2\2\u00c6\u00c8\5\64\33\2\u00c7\u00c6\3\2\2\2\u00c7\u00c8\3\2\2\2\u00c8"+ + "\u00cf\3\2\2\2\u00c9\u00ca\7?\2\2\u00ca\u00cf\7E\2\2\u00cb\u00cc\7?\2"+ + "\2\u00cc\u00cd\7@\2\2\u00cd\u00cf\7E\2\2\u00cej\3\2\2\2\u00cek\3\2\2\2"+ + "\u00ce|\3\2\2\2\u00ce\u008b\3\2\2\2\u00ce\u0091\3\2\2\2\u00ce\u0098\3"+ + "\2\2\2\u00ce\u009d\3\2\2\2\u00ce\u00a2\3\2\2\2\u00ce\u00a4\3\2\2\2\u00ce"+ + "\u00a6\3\2\2\2\u00ce\u00bb\3\2\2\2\u00ce\u00c9\3\2\2\2\u00ce\u00cb\3\2"+ + "\2\2\u00cf\7\3\2\2\2\u00d0\u00d1\7I\2\2\u00d1\u00d6\5\34\17\2\u00d2\u00d3"+ + "\7\5\2\2\u00d3\u00d5\5\34\17\2\u00d4\u00d2\3\2\2\2\u00d5\u00d8\3\2\2\2"+ + "\u00d6\u00d4\3\2\2\2\u00d6\u00d7\3\2\2\2\u00d7\u00da\3\2\2\2\u00d8\u00d6"+ + "\3\2\2\2\u00d9\u00d0\3\2\2\2\u00d9\u00da\3\2\2\2\u00da\u00db\3\2\2\2\u00db"+ + "\u00dc\5\n\6\2\u00dc\t\3\2\2\2\u00dd\u00e8\5\16\b\2\u00de\u00df\7\64\2"+ + "\2\u00df\u00e0\7\17\2\2\u00e0\u00e5\5\20\t\2\u00e1\u00e2\7\5\2\2\u00e2"+ + "\u00e4\5\20\t\2\u00e3\u00e1\3\2\2\2\u00e4\u00e7\3\2\2\2\u00e5\u00e3\3"+ + "\2\2\2\u00e5\u00e6\3\2\2\2\u00e6\u00e9\3\2\2\2\u00e7\u00e5\3\2\2\2\u00e8"+ + "\u00de\3\2\2\2\u00e8\u00e9\3\2\2\2\u00e9\u00eb\3\2\2\2\u00ea\u00ec\5\f"+ + "\7\2\u00eb\u00ea\3\2\2\2\u00eb\u00ec\3\2\2\2\u00ec\13\3\2\2\2\u00ed\u00ee"+ + "\7+\2\2\u00ee\u00f3\t\7\2\2\u00ef\u00f0\7L\2\2\u00f0\u00f1\t\7\2\2\u00f1"+ + "\u00f3\7Q\2\2\u00f2\u00ed\3\2\2\2\u00f2\u00ef\3\2\2\2\u00f3\r\3\2\2\2"+ + "\u00f4\u00fa\5\22\n\2\u00f5\u00f6\7\3\2\2\u00f6\u00f7\5\n\6\2\u00f7\u00f8"+ + "\7\4\2\2\u00f8\u00fa\3\2\2\2\u00f9\u00f4\3\2\2\2\u00f9\u00f5\3\2\2\2\u00fa"+ + "\17\3\2\2\2\u00fb\u00fd\5,\27\2\u00fc\u00fe\t\b\2\2\u00fd\u00fc\3\2\2"+ + "\2\u00fd\u00fe\3\2\2\2\u00fe\21\3\2\2\2\u00ff\u0101\7=\2\2\u0100\u0102"+ + "\5\36\20\2\u0101\u0100\3\2\2\2\u0101\u0102\3\2\2\2\u0102\u0103\3\2\2\2"+ + "\u0103\u0108\5 \21\2\u0104\u0105\7\5\2\2\u0105\u0107\5 \21\2\u0106\u0104"+ + "\3\2\2\2\u0107\u010a\3\2\2\2\u0108\u0106\3\2\2\2\u0108\u0109\3\2\2\2\u0109"+ + "\u010c\3\2\2\2\u010a\u0108\3\2\2\2\u010b\u010d\5\24\13\2\u010c\u010b\3"+ + "\2\2\2\u010c\u010d\3\2\2\2\u010d\u0110\3\2\2\2\u010e\u010f\7H\2\2\u010f"+ + "\u0111\5.\30\2\u0110\u010e\3\2\2\2\u0110\u0111\3\2\2\2\u0111\u0115\3\2"+ + "\2\2\u0112\u0113\7#\2\2\u0113\u0114\7\17\2\2\u0114\u0116\5\26\f\2\u0115"+ + "\u0112\3\2\2\2\u0115\u0116\3\2\2\2\u0116\u0119\3\2\2\2\u0117\u0118\7$"+ + "\2\2\u0118\u011a\5.\30\2\u0119\u0117\3\2\2\2\u0119\u011a\3\2\2\2\u011a"+ + "\23\3\2\2\2\u011b\u011c\7\37\2\2\u011c\u0121\5\"\22\2\u011d\u011e\7\5"+ + "\2\2\u011e\u0120\5\"\22\2\u011f\u011d\3\2\2\2\u0120\u0123\3\2\2\2\u0121"+ + "\u011f\3\2\2\2\u0121\u0122\3\2\2\2\u0122\25\3\2\2\2\u0123\u0121\3\2\2"+ + "\2\u0124\u0126\5\36\20\2\u0125\u0124\3\2\2\2\u0125\u0126\3\2\2\2\u0126"+ + "\u0127\3\2\2\2\u0127\u012c\5\30\r\2\u0128\u0129\7\5\2\2\u0129\u012b\5"+ + "\30\r\2\u012a\u0128\3\2\2\2\u012b\u012e\3\2\2\2\u012c\u012a\3\2\2\2\u012c"+ + "\u012d\3\2\2\2\u012d\27\3\2\2\2\u012e\u012c\3\2\2\2\u012f\u0130\5\32\16"+ + "\2\u0130\31\3\2\2\2\u0131\u013a\7\3\2\2\u0132\u0137\5,\27\2\u0133\u0134"+ + "\7\5\2\2\u0134\u0136\5,\27\2\u0135\u0133\3\2\2\2\u0136\u0139\3\2\2\2\u0137"+ + "\u0135\3\2\2\2\u0137\u0138\3\2\2\2\u0138\u013b\3\2\2\2\u0139\u0137\3\2"+ + "\2\2\u013a\u0132\3\2\2\2\u013a\u013b\3\2\2\2\u013b\u013c\3\2\2\2\u013c"+ + "\u013f\7\4\2\2\u013d\u013f\5,\27\2\u013e\u0131\3\2\2\2\u013e\u013d\3\2"+ + "\2\2\u013f\33\3\2\2\2\u0140\u0141\5V,\2\u0141\u0142\7\f\2\2\u0142\u0143"+ + "\7\3\2\2\u0143\u0144\5\n\6\2\u0144\u0145\7\4\2\2\u0145\35\3\2\2\2\u0146"+ + "\u0147\t\t\2\2\u0147\37\3\2\2\2\u0148\u014d\5,\27\2\u0149\u014b\7\f\2"+ + "\2\u014a\u0149\3\2\2\2\u014a\u014b\3\2\2\2\u014b\u014c\3\2\2\2\u014c\u014e"+ + "\5V,\2\u014d\u014a\3\2\2\2\u014d\u014e\3\2\2\2\u014e!\3\2\2\2\u014f\u0153"+ + "\5*\26\2\u0150\u0152\5$\23\2\u0151\u0150\3\2\2\2\u0152\u0155\3\2\2\2\u0153"+ + "\u0151\3\2\2\2\u0153\u0154\3\2\2\2\u0154#\3\2\2\2\u0155\u0153\3\2\2\2"+ + "\u0156\u0157\5&\24\2\u0157\u0158\7(\2\2\u0158\u015a\5*\26\2\u0159\u015b"+ + "\5(\25\2\u015a\u0159\3\2\2\2\u015a\u015b\3\2\2\2\u015b\u0162\3\2\2\2\u015c"+ + "\u015d\7.\2\2\u015d\u015e\5&\24\2\u015e\u015f\7(\2\2\u015f\u0160\5*\26"+ + "\2\u0160\u0162\3\2\2\2\u0161\u0156\3\2\2\2\u0161\u015c\3\2\2\2\u0162%"+ + "\3\2\2\2\u0163\u0165\7&\2\2\u0164\u0163\3\2\2\2\u0164\u0165\3\2\2\2\u0165"+ + "\u0173\3\2\2\2\u0166\u0168\7)\2\2\u0167\u0169\7\65\2\2\u0168\u0167\3\2"+ + "\2\2\u0168\u0169\3\2\2\2\u0169\u0173\3\2\2\2\u016a\u016c\79\2\2\u016b"+ + "\u016d\7\65\2\2\u016c\u016b\3\2\2\2\u016c\u016d\3\2\2\2\u016d\u0173\3"+ + "\2\2\2\u016e\u0170\7 \2\2\u016f\u0171\7\65\2\2\u0170\u016f\3\2\2\2\u0170"+ + "\u0171\3\2\2\2\u0171\u0173\3\2\2\2\u0172\u0164\3\2\2\2\u0172\u0166\3\2"+ + "\2\2\u0172\u016a\3\2\2\2\u0172\u016e\3\2\2\2\u0173\'\3\2\2\2\u0174\u0175"+ + "\7\61\2\2\u0175\u0183\5.\30\2\u0176\u0177\7F\2\2\u0177\u0178\7\3\2\2\u0178"+ + "\u017d\5V,\2\u0179\u017a\7\5\2\2\u017a\u017c\5V,\2\u017b\u0179\3\2\2\2"+ + "\u017c\u017f\3\2\2\2\u017d\u017b\3\2\2\2\u017d\u017e\3\2\2\2\u017e\u0180"+ + "\3\2\2\2\u017f\u017d\3\2\2\2\u0180\u0181\7\4\2\2\u0181\u0183\3\2\2\2\u0182"+ + "\u0174\3\2\2\2\u0182\u0176\3\2\2\2\u0183)\3\2\2\2\u0184\u0189\5X-\2\u0185"+ + "\u0187\7\f\2\2\u0186\u0185\3\2\2\2\u0186\u0187\3\2\2\2\u0187\u0188\3\2"+ + "\2\2\u0188\u018a\5T+\2\u0189\u0186\3\2\2\2\u0189\u018a\3\2\2\2\u018a\u019e"+ + "\3\2\2\2\u018b\u018c\7\3\2\2\u018c\u018d\5\n\6\2\u018d\u0192\7\4\2\2\u018e"+ + "\u0190\7\f\2\2\u018f\u018e\3\2\2\2\u018f\u0190\3\2\2\2\u0190\u0191\3\2"+ + "\2\2\u0191\u0193\5T+\2\u0192\u018f\3\2\2\2\u0192\u0193\3\2\2\2\u0193\u019e"+ + "\3\2\2\2\u0194\u0195\7\3\2\2\u0195\u0196\5\"\22\2\u0196\u019b\7\4\2\2"+ + "\u0197\u0199\7\f\2\2\u0198\u0197\3\2\2\2\u0198\u0199\3\2\2\2\u0199\u019a"+ + "\3\2\2\2\u019a\u019c\5T+\2\u019b\u0198\3\2\2\2\u019b\u019c\3\2\2\2\u019c"+ + "\u019e\3\2\2\2\u019d\u0184\3\2\2\2\u019d\u018b\3\2\2\2\u019d\u0194\3\2"+ + "\2\2\u019e+\3\2\2\2\u019f\u01a0\5.\30\2\u01a0-\3\2\2\2\u01a1\u01a2\b\30"+ + "\1\2\u01a2\u01a3\7/\2\2\u01a3\u01d3\5.\30\n\u01a4\u01a5\7\32\2\2\u01a5"+ + "\u01a6\7\3\2\2\u01a6\u01a7\5\b\5\2\u01a7\u01a8\7\4\2\2\u01a8\u01d3\3\2"+ + "\2\2\u01a9\u01aa\7;\2\2\u01aa\u01ab\7\3\2\2\u01ab\u01b0\5`\61\2\u01ac"+ + "\u01ad\7\5\2\2\u01ad\u01af\5`\61\2\u01ae\u01ac\3\2\2\2\u01af\u01b2\3\2"+ + "\2\2\u01b0\u01ae\3\2\2\2\u01b0\u01b1\3\2\2\2\u01b1\u01b3\3\2\2\2\u01b2"+ + "\u01b0\3\2\2\2\u01b3\u01b4\7\4\2\2\u01b4\u01d3\3\2\2\2\u01b5\u01b6\7-"+ + "\2\2\u01b6\u01b7\7\3\2\2\u01b7\u01b8\5T+\2\u01b8\u01b9\7\5\2\2\u01b9\u01be"+ + "\5`\61\2\u01ba\u01bb\7\5\2\2\u01bb\u01bd\5`\61\2\u01bc\u01ba\3\2\2\2\u01bd"+ + "\u01c0\3\2\2\2\u01be\u01bc\3\2\2\2\u01be\u01bf\3\2\2\2\u01bf\u01c1\3\2"+ + "\2\2\u01c0\u01be\3\2\2\2\u01c1\u01c2\7\4\2\2\u01c2\u01d3\3\2\2\2\u01c3"+ + "\u01c4\7-\2\2\u01c4\u01c5\7\3\2\2\u01c5\u01c6\5`\61\2\u01c6\u01c7\7\5"+ + "\2\2\u01c7\u01cc\5`\61\2\u01c8\u01c9\7\5\2\2\u01c9\u01cb\5`\61\2\u01ca"+ + "\u01c8\3\2\2\2\u01cb\u01ce\3\2\2\2\u01cc\u01ca\3\2\2\2\u01cc\u01cd\3\2"+ + "\2\2\u01cd\u01cf\3\2\2\2\u01ce\u01cc\3\2\2\2\u01cf\u01d0\7\4\2\2\u01d0"+ + "\u01d3\3\2\2\2\u01d1\u01d3\5\60\31\2\u01d2\u01a1\3\2\2\2\u01d2\u01a4\3"+ + "\2\2\2\u01d2\u01a9\3\2\2\2\u01d2\u01b5\3\2\2\2\u01d2\u01c3\3\2\2\2\u01d2"+ + "\u01d1\3\2\2\2\u01d3\u01dc\3\2\2\2\u01d4\u01d5\f\4\2\2\u01d5\u01d6\7\n"+ + "\2\2\u01d6\u01db\5.\30\5\u01d7\u01d8\f\3\2\2\u01d8\u01d9\7\63\2\2\u01d9"+ + "\u01db\5.\30\4\u01da\u01d4\3\2\2\2\u01da\u01d7\3\2\2\2\u01db\u01de\3\2"+ + "\2\2\u01dc\u01da\3\2\2\2\u01dc\u01dd\3\2\2\2\u01dd/\3\2\2\2\u01de\u01dc"+ + "\3\2\2\2\u01df\u01e1\5:\36\2\u01e0\u01e2\5\62\32\2\u01e1\u01e0\3\2\2\2"+ + "\u01e1\u01e2\3\2\2\2\u01e2\61\3\2\2\2\u01e3\u01e5\7/\2\2\u01e4\u01e3\3"+ + "\2\2\2\u01e4\u01e5\3\2\2\2\u01e5\u01e6\3\2\2\2\u01e6\u01e7\7\16\2\2\u01e7"+ + "\u01e8\5:\36\2\u01e8\u01e9\7\n\2\2\u01e9\u01ea\5:\36\2\u01ea\u0212\3\2"+ + "\2\2\u01eb\u01ed\7/\2\2\u01ec\u01eb\3\2\2\2\u01ec\u01ed\3\2\2\2\u01ed"+ + "\u01ee\3\2\2\2\u01ee\u01ef\7%\2\2\u01ef\u01f0\7\3\2\2\u01f0\u01f5\5,\27"+ + "\2\u01f1\u01f2\7\5\2\2\u01f2\u01f4\5,\27\2\u01f3\u01f1\3\2\2\2\u01f4\u01f7"+ + "\3\2\2\2\u01f5\u01f3\3\2\2\2\u01f5\u01f6\3\2\2\2\u01f6\u01f8\3\2\2\2\u01f7"+ + "\u01f5\3\2\2\2\u01f8\u01f9\7\4\2\2\u01f9\u0212\3\2\2\2\u01fa\u01fc\7/"+ + "\2\2\u01fb\u01fa\3\2\2\2\u01fb\u01fc\3\2\2\2\u01fc\u01fd\3\2\2\2\u01fd"+ + "\u01fe\7%\2\2\u01fe\u01ff\7\3\2\2\u01ff\u0200\5\b\5\2\u0200\u0201\7\4"+ + "\2\2\u0201\u0212\3\2\2\2\u0202\u0204\7/\2\2\u0203\u0202\3\2\2\2\u0203"+ + "\u0204\3\2\2\2\u0204\u0205\3\2\2\2\u0205\u0206\7*\2\2\u0206\u0212\5\66"+ + "\34\2\u0207\u0209\7/\2\2\u0208\u0207\3\2\2\2\u0208\u0209\3\2\2\2\u0209"+ + "\u020a\3\2\2\2\u020a\u020b\7:\2\2\u020b\u0212\5`\61\2\u020c\u020e\7\'"+ + "\2\2\u020d\u020f\7/\2\2\u020e\u020d\3\2\2\2\u020e\u020f\3\2\2\2\u020f"+ + "\u0210\3\2\2\2\u0210\u0212\7\60\2\2\u0211\u01e4\3\2\2\2\u0211\u01ec\3"+ + "\2\2\2\u0211\u01fb\3\2\2\2\u0211\u0203\3\2\2\2\u0211\u0208\3\2\2\2\u0211"+ + "\u020c\3\2\2\2\u0212\63\3\2\2\2\u0213\u0214\7*\2\2\u0214\u0215\5\66\34"+ + "\2\u0215\65\3\2\2\2\u0216\u0218\5`\61\2\u0217\u0219\58\35\2\u0218\u0217"+ + "\3\2\2\2\u0218\u0219\3\2\2\2\u0219\67\3\2\2\2\u021a\u021b\7\30\2\2\u021b"+ + "\u0221\5`\61\2\u021c\u021d\7J\2\2\u021d\u021e\5`\61\2\u021e\u021f\7Q\2"+ + "\2\u021f\u0221\3\2\2\2\u0220\u021a\3\2\2\2\u0220\u021c\3\2\2\2\u02219"+ + "\3\2\2\2\u0222\u0223\b\36\1\2\u0223\u0227\5<\37\2\u0224\u0225\t\n\2\2"+ + "\u0225\u0227\5:\36\6\u0226\u0222\3\2\2\2\u0226\u0224\3\2\2\2\u0227\u0234"+ + "\3\2\2\2\u0228\u0229\f\5\2\2\u0229\u022a\t\13\2\2\u022a\u0233\5:\36\6"+ + "\u022b\u022c\f\4\2\2\u022c\u022d\t\n\2\2\u022d\u0233\5:\36\5\u022e\u022f"+ + "\f\3\2\2\u022f\u0230\5N(\2\u0230\u0231\5:\36\4\u0231\u0233\3\2\2\2\u0232"+ + "\u0228\3\2\2\2\u0232\u022b\3\2\2\2\u0232\u022e\3\2\2\2\u0233\u0236\3\2"+ + "\2\2\u0234\u0232\3\2\2\2\u0234\u0235\3\2\2\2\u0235;\3\2\2\2\u0236\u0234"+ + "\3\2\2\2\u0237\u024d\5> \2\u0238\u024d\5B\"\2\u0239\u024d\5L\'\2\u023a"+ + "\u024d\7Z\2\2\u023b\u023c\5T+\2\u023c\u023d\7^\2\2\u023d\u023f\3\2\2\2"+ + "\u023e\u023b\3\2\2\2\u023e\u023f\3\2\2\2\u023f\u0240\3\2\2\2\u0240\u024d"+ + "\7Z\2\2\u0241\u024d\5F$\2\u0242\u0243\7\3\2\2\u0243\u0244\5\b\5\2\u0244"+ + "\u0245\7\4\2\2\u0245\u024d\3\2\2\2\u0246\u024d\5V,\2\u0247\u024d\5T+\2"+ + "\u0248\u0249\7\3\2\2\u0249\u024a\5,\27\2\u024a\u024b\7\4\2\2\u024b\u024d"+ + "\3\2\2\2\u024c\u0237\3\2\2\2\u024c\u0238\3\2\2\2\u024c\u0239\3\2\2\2\u024c"+ + "\u023a\3\2\2\2\u024c\u023e\3\2\2\2\u024c\u0241\3\2\2\2\u024c\u0242\3\2"+ + "\2\2\u024c\u0246\3\2\2\2\u024c\u0247\3\2\2\2\u024c\u0248\3\2\2\2\u024d"+ + "=\3\2\2\2\u024e\u0254\5@!\2\u024f\u0250\7K\2\2\u0250\u0251\5@!\2\u0251"+ + "\u0252\7Q\2\2\u0252\u0254\3\2\2\2\u0253\u024e\3\2\2\2\u0253\u024f\3\2"+ + "\2\2\u0254?\3\2\2\2\u0255\u0256\7\20\2\2\u0256\u0257\7\3\2\2\u0257\u0258"+ + "\5,\27\2\u0258\u0259\7\f\2\2\u0259\u025a\5R*\2\u025a\u025b\7\4\2\2\u025b"+ + "A\3\2\2\2\u025c\u0262\5D#\2\u025d\u025e\7K\2\2\u025e\u025f\5D#\2\u025f"+ + "\u0260\7Q\2\2\u0260\u0262\3\2\2\2\u0261\u025c\3\2\2\2\u0261\u025d\3\2"+ + "\2\2\u0262C\3\2\2\2\u0263\u0264\7\34\2\2\u0264\u0265\7\3\2\2\u0265\u0266"+ + "\5V,\2\u0266\u0267\7\37\2\2\u0267\u0268\5:\36\2\u0268\u0269\7\4\2\2\u0269"+ + "E\3\2\2\2\u026a\u0270\5H%\2\u026b\u026c\7K\2\2\u026c\u026d\5H%\2\u026d"+ + "\u026e\7Q\2\2\u026e\u0270\3\2\2\2\u026f\u026a\3\2\2\2\u026f\u026b\3\2"+ + "\2\2\u0270G\3\2\2\2\u0271\u0272\5J&\2\u0272\u027e\7\3\2\2\u0273\u0275"+ + "\5\36\20\2\u0274\u0273\3\2\2\2\u0274\u0275\3\2\2\2\u0275\u0276\3\2\2\2"+ + "\u0276\u027b\5,\27\2\u0277\u0278\7\5\2\2\u0278\u027a\5,\27\2\u0279\u0277"+ + "\3\2\2\2\u027a\u027d\3\2\2\2\u027b\u0279\3\2\2\2\u027b\u027c\3\2\2\2\u027c"+ + "\u027f\3\2\2\2\u027d\u027b\3\2\2\2\u027e\u0274\3\2\2\2\u027e\u027f\3\2"+ + "\2\2\u027f\u0280\3\2\2\2\u0280\u0281\7\4\2\2\u0281I\3\2\2\2\u0282\u0286"+ + "\7)\2\2\u0283\u0286\79\2\2\u0284\u0286\5V,\2\u0285\u0282\3\2\2\2\u0285"+ + "\u0283\3\2\2\2\u0285\u0284\3\2\2\2\u0286K\3\2\2\2\u0287\u02a1\7\60\2\2"+ + "\u0288\u02a1\5^\60\2\u0289\u02a1\5P)\2\u028a\u028c\7`\2\2\u028b\u028a"+ + "\3\2\2\2\u028c\u028d\3\2\2\2\u028d\u028b\3\2\2\2\u028d\u028e\3\2\2\2\u028e"+ + "\u02a1\3\2\2\2\u028f\u02a1\7_\2\2\u0290\u0291\7M\2\2\u0291\u0292\5`\61"+ + "\2\u0292\u0293\7Q\2\2\u0293\u02a1\3\2\2\2\u0294\u0295\7N\2\2\u0295\u0296"+ + "\5`\61\2\u0296\u0297\7Q\2\2\u0297\u02a1\3\2\2\2\u0298\u0299\7O\2\2\u0299"+ + "\u029a\5`\61\2\u029a\u029b\7Q\2\2\u029b\u02a1\3\2\2\2\u029c\u029d\7P\2"+ + "\2\u029d\u029e\5`\61\2\u029e\u029f\7Q\2\2\u029f\u02a1\3\2\2\2\u02a0\u0287"+ + "\3\2\2\2\u02a0\u0288\3\2\2\2\u02a0\u0289\3\2\2\2\u02a0\u028b\3\2\2\2\u02a0"+ + "\u028f\3\2\2\2\u02a0\u0290\3\2\2\2\u02a0\u0294\3\2\2\2\u02a0\u0298\3\2"+ + "\2\2\u02a0\u029c\3\2\2\2\u02a1M\3\2\2\2\u02a2\u02a3\t\f\2\2\u02a3O\3\2"+ + "\2\2\u02a4\u02a5\t\r\2\2\u02a5Q\3\2\2\2\u02a6\u02a7\5V,\2\u02a7S\3\2\2"+ + "\2\u02a8\u02a9\5V,\2\u02a9\u02aa\7^\2\2\u02aa\u02ac\3\2\2\2\u02ab\u02a8"+ + "\3\2\2\2\u02ac\u02af\3\2\2\2\u02ad\u02ab\3\2\2\2\u02ad\u02ae\3\2\2\2\u02ae"+ + "\u02b0\3\2\2\2\u02af\u02ad\3\2\2\2\u02b0\u02b1\5V,\2\u02b1U\3\2\2\2\u02b2"+ + "\u02b5\5Z.\2\u02b3\u02b5\5\\/\2\u02b4\u02b2\3\2\2\2\u02b4\u02b3\3\2\2"+ + "\2\u02b5W\3\2\2\2\u02b6\u02b7\5V,\2\u02b7\u02b8\7\6\2\2\u02b8\u02ba\3"+ + "\2\2\2\u02b9\u02b6\3\2\2\2\u02b9\u02ba\3\2\2\2\u02ba\u02bb\3\2\2\2\u02bb"+ + "\u02c3\7e\2\2\u02bc\u02bd\5V,\2\u02bd\u02be\7\6\2\2\u02be\u02c0\3\2\2"+ + "\2\u02bf\u02bc\3\2\2\2\u02bf\u02c0\3\2\2\2\u02c0\u02c1\3\2\2\2\u02c1\u02c3"+ + "\5V,\2\u02c2\u02b9\3\2\2\2\u02c2\u02bf\3\2\2\2\u02c3Y\3\2\2\2\u02c4\u02c7"+ + "\7f\2\2\u02c5\u02c7\7g\2\2\u02c6\u02c4\3\2\2\2\u02c6\u02c5\3\2\2\2\u02c7"+ + "[\3\2\2\2\u02c8\u02cc\7c\2\2\u02c9\u02cc\5b\62\2\u02ca\u02cc\7d\2\2\u02cb"+ + "\u02c8\3\2\2\2\u02cb\u02c9\3\2\2\2\u02cb\u02ca\3\2\2\2\u02cc]\3\2\2\2"+ + "\u02cd\u02d0\7b\2\2\u02ce\u02d0\7a\2\2\u02cf\u02cd\3\2\2\2\u02cf\u02ce"+ + "\3\2\2\2\u02d0_\3\2\2\2\u02d1\u02d2\t\16\2\2\u02d2a\3\2\2\2\u02d3\u02d4"+ + "\t\17\2\2\u02d4c\3\2\2\2bsuy\u0082\u0084\u0088\u008f\u0096\u009b\u00a0"+ + "\u00aa\u00ae\u00b6\u00b9\u00bf\u00c4\u00c7\u00ce\u00d6\u00d9\u00e5\u00e8"+ + "\u00eb\u00f2\u00f9\u00fd\u0101\u0108\u010c\u0110\u0115\u0119\u0121\u0125"+ + "\u012c\u0137\u013a\u013e\u014a\u014d\u0153\u015a\u0161\u0164\u0168\u016c"+ + "\u0170\u0172\u017d\u0182\u0186\u0189\u018f\u0192\u0198\u019b\u019d\u01b0"+ + "\u01be\u01cc\u01d2\u01da\u01dc\u01e1\u01e4\u01ec\u01f5\u01fb\u0203\u0208"+ + "\u020e\u0211\u0218\u0220\u0226\u0232\u0234\u023e\u024c\u0253\u0261\u026f"+ + "\u0274\u027b\u027e\u0285\u028d\u02a0\u02ad\u02b4\u02b9\u02bf\u02c2\u02c6"+ + "\u02cb\u02cf"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java index b2ad5c8f770..2c28b18cdf2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java @@ -306,6 +306,12 @@ interface SqlBaseVisitor extends ParseTreeVisitor { * @return the visitor result */ T visitPredicate(SqlBaseParser.PredicateContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#likePattern}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitLikePattern(SqlBaseParser.LikePatternContext ctx); /** * Visit a parse tree produced by {@link SqlBaseParser#pattern}. * @param ctx the parse tree diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/TableIdentifier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/TableIdentifier.java index 0d91703679c..99913fc1272 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/TableIdentifier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/TableIdentifier.java @@ -53,6 +53,10 @@ public class TableIdentifier { return location; } + public String qualifiedIndex() { + return cluster != null ? cluster + ":" + index : index; + } + @Override public String toString() { StringBuilder builder = new StringBuilder(); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java index 47716687b33..e2197d42608 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.sql.plan.logical.command; import org.elasticsearch.action.ActionListener; import org.elasticsearch.xpack.sql.expression.Attribute; import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.regex.LikePattern; import org.elasticsearch.xpack.sql.session.Rows; import org.elasticsearch.xpack.sql.session.SchemaRowSet; import org.elasticsearch.xpack.sql.session.SqlSession; @@ -29,19 +30,25 @@ import static java.util.Collections.emptyList; public class ShowColumns extends Command { private final String index; + private final LikePattern pattern; - public ShowColumns(Location location, String index) { + public ShowColumns(Location location, String index, LikePattern pattern) { super(location); this.index = index; + this.pattern = pattern; } public String index() { return index; } + public LikePattern pattern() { + return pattern; + } + @Override protected NodeInfo info() { - return NodeInfo.create(this, ShowColumns::new, index); + return NodeInfo.create(this, ShowColumns::new, index, pattern); } @Override @@ -51,7 +58,9 @@ public class ShowColumns extends Command { @Override public void execute(SqlSession session, ActionListener listener) { - session.indexResolver().resolveWithSameMapping(index, null, ActionListener.wrap( + String idx = index != null ? index : (pattern != null ? pattern.asIndexNameWildcard() : "*"); + String regex = pattern != null ? pattern.asJavaRegex() : null; + session.indexResolver().resolveWithSameMapping(idx, regex, ActionListener.wrap( indexResult -> { List> rows = emptyList(); if (indexResult.isValid()) { @@ -81,7 +90,7 @@ public class ShowColumns extends Command { @Override public int hashCode() { - return Objects.hash(index); + return Objects.hash(index, pattern); } @Override @@ -95,6 +104,7 @@ public class ShowColumns extends Command { } ShowColumns other = (ShowColumns) obj; - return Objects.equals(index, other.index); + return Objects.equals(index, other.index) + && Objects.equals(pattern, other.pattern); } -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowTables.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowTables.java index ce81aa9a2e6..0735f870545 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowTables.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowTables.java @@ -22,16 +22,22 @@ import static java.util.stream.Collectors.toList; public class ShowTables extends Command { + private final String index; private final LikePattern pattern; - public ShowTables(Location location, LikePattern pattern) { + public ShowTables(Location location, String index, LikePattern pattern) { super(location); + this.index = index; this.pattern = pattern; } @Override protected NodeInfo info() { - return NodeInfo.create(this, ShowTables::new, pattern); + return NodeInfo.create(this, ShowTables::new, index, pattern); + } + + public String index() { + return index; } public LikePattern pattern() { @@ -45,9 +51,9 @@ public class ShowTables extends Command { @Override public final void execute(SqlSession session, ActionListener listener) { - String index = pattern != null ? pattern.asIndexNameWildcard() : "*"; + String idx = index != null ? index : (pattern != null ? pattern.asIndexNameWildcard() : "*"); String regex = pattern != null ? pattern.asJavaRegex() : null; - session.indexResolver().resolveNames(index, regex, null, ActionListener.wrap(result -> { + session.indexResolver().resolveNames(idx, regex, null, ActionListener.wrap(result -> { listener.onResponse(Rows.of(output(), result.stream() .map(t -> asList(t.name(), t.type().toSql())) .collect(toList()))); @@ -56,7 +62,7 @@ public class ShowTables extends Command { @Override public int hashCode() { - return Objects.hash(pattern); + return Objects.hash(index, pattern); } @Override @@ -70,6 +76,7 @@ public class ShowTables extends Command { } ShowTables other = (ShowTables) obj; - return Objects.equals(pattern, other.pattern); + return Objects.equals(index, other.index) + && Objects.equals(pattern, other.pattern); } -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java index 8005ce07589..6337108b54b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java @@ -39,19 +39,21 @@ import static org.elasticsearch.xpack.sql.type.DataType.SHORT; public class SysColumns extends Command { private final String catalog; - private final LikePattern indexPattern; + private final String index; + private final LikePattern pattern; private final LikePattern columnPattern; - public SysColumns(Location location, String catalog, LikePattern indexPattern, LikePattern columnPattern) { + public SysColumns(Location location, String catalog, String index, LikePattern pattern, LikePattern columnPattern) { super(location); this.catalog = catalog; - this.indexPattern = indexPattern; + this.index = index; + this.pattern = pattern; this.columnPattern = columnPattern; } @Override protected NodeInfo info() { - return NodeInfo.create(this, SysColumns::new, catalog, indexPattern, columnPattern); + return NodeInfo.create(this, SysColumns::new, catalog, index, pattern, columnPattern); } @Override @@ -94,12 +96,12 @@ public class SysColumns extends Command { return; } - String index = indexPattern != null ? indexPattern.asIndexNameWildcard() : "*"; - String regex = indexPattern != null ? indexPattern.asJavaRegex() : null; + String idx = index != null ? index : (pattern != null ? pattern.asIndexNameWildcard() : "*"); + String regex = pattern != null ? pattern.asJavaRegex() : null; Pattern columnMatcher = columnPattern != null ? Pattern.compile(columnPattern.asJavaRegex()) : null; - session.indexResolver().resolveAsSeparateMappings(index, regex, ActionListener.wrap(esIndices -> { + session.indexResolver().resolveAsSeparateMappings(idx, regex, ActionListener.wrap(esIndices -> { List> rows = new ArrayList<>(); for (EsIndex esIndex : esIndices) { fillInRows(cluster, esIndex.name(), esIndex.mapping(), null, rows, columnMatcher); @@ -165,7 +167,7 @@ public class SysColumns extends Command { @Override public int hashCode() { - return Objects.hash(catalog, indexPattern, columnPattern); + return Objects.hash(catalog, index, pattern, columnPattern); } @Override @@ -180,7 +182,8 @@ public class SysColumns extends Command { SysColumns other = (SysColumns) obj; return Objects.equals(catalog, other.catalog) - && Objects.equals(indexPattern, other.indexPattern) + && Objects.equals(index, other.index) + && Objects.equals(pattern, other.pattern) && Objects.equals(columnPattern, other.columnPattern); } } \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java index eb6f6a36b55..69d0ad50648 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java @@ -32,16 +32,18 @@ import static org.elasticsearch.xpack.sql.util.StringUtils.SQL_WILDCARD; public class SysTables extends Command { + private final String index; private final LikePattern pattern; private final LikePattern clusterPattern; private final EnumSet types; // flag indicating whether tables are reported as `TABLE` or `BASE TABLE` private final boolean legacyTableTypes; - public SysTables(Location location, LikePattern clusterPattern, LikePattern pattern, EnumSet types, + public SysTables(Location location, LikePattern clusterPattern, String index, LikePattern pattern, EnumSet types, boolean legacyTableTypes) { super(location); this.clusterPattern = clusterPattern; + this.index = index; this.pattern = pattern; this.types = types; this.legacyTableTypes = legacyTableTypes; @@ -49,7 +51,7 @@ public class SysTables extends Command { @Override protected NodeInfo info() { - return NodeInfo.create(this, SysTables::new, clusterPattern, pattern, types, legacyTableTypes); + return NodeInfo.create(this, SysTables::new, clusterPattern, index, pattern, types, legacyTableTypes); } @Override @@ -76,7 +78,7 @@ public class SysTables extends Command { // https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqltables-function?view=ssdt-18vs2017#comments if (clusterPattern != null && clusterPattern.pattern().equals(SQL_WILDCARD)) { - if (pattern != null && pattern.pattern().isEmpty() && CollectionUtils.isEmpty(types)) { + if ((pattern == null || pattern.pattern().isEmpty()) && CollectionUtils.isEmpty(types)) { Object[] enumeration = new Object[10]; // send only the cluster, everything else null enumeration[0] = cluster; @@ -86,8 +88,9 @@ public class SysTables extends Command { } // if no types were specified (the parser takes care of the % case) - if (CollectionUtils.isEmpty(types)) { - if (clusterPattern != null && clusterPattern.pattern().isEmpty()) { + if (IndexType.VALID.equals(types)) { + if ((clusterPattern == null || clusterPattern.pattern().isEmpty()) + && (pattern == null || pattern.pattern().isEmpty())) { List> values = new ArrayList<>(); // send only the types, everything else null for (IndexType type : IndexType.VALID) { @@ -111,10 +114,10 @@ public class SysTables extends Command { return; } - String index = pattern != null ? pattern.asIndexNameWildcard() : "*"; + String idx = index != null ? index : (pattern != null ? pattern.asIndexNameWildcard() : "*"); String regex = pattern != null ? pattern.asJavaRegex() : null; - session.indexResolver().resolveNames(index, regex, types, ActionListener.wrap(result -> listener.onResponse( + session.indexResolver().resolveNames(idx, regex, types, ActionListener.wrap(result -> listener.onResponse( Rows.of(output(), result.stream() // sort by type (which might be legacy), then by name .sorted(Comparator. comparing(i -> legacyName(i.type())) @@ -139,7 +142,7 @@ public class SysTables extends Command { @Override public int hashCode() { - return Objects.hash(clusterPattern, pattern, types); + return Objects.hash(clusterPattern, index, pattern, types); } @Override @@ -154,6 +157,7 @@ public class SysTables extends Command { SysTables other = (SysTables) obj; return Objects.equals(clusterPattern, other.clusterPattern) + && Objects.equals(index, other.index) && Objects.equals(pattern, other.pattern) && Objects.equals(types, other.types); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java index 5fb8a754f0f..b8faedec718 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java @@ -112,6 +112,9 @@ public class Aggs { } public Aggs addAgg(LeafAgg agg) { + if (metricAggs.contains(agg)) { + return this; + } return new Aggs(groups, combine(metricAggs, agg), pipelineAggs); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByColumnKey.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByColumnKey.java index e98770318d2..931eaee6464 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByColumnKey.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByColumnKey.java @@ -25,7 +25,8 @@ public class GroupByColumnKey extends GroupByKey { public TermsValuesSourceBuilder asValueSource() { return new TermsValuesSourceBuilder(id()) .field(fieldName()) - .order(direction().asOrder()); + .order(direction().asOrder()) + .missingBucket(true); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateKey.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateKey.java index 43c80e75057..61c00c706ee 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateKey.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateKey.java @@ -44,7 +44,8 @@ public class GroupByDateKey extends GroupByKey { return new DateHistogramValuesSourceBuilder(id()) .field(fieldName()) .dateHistogramInterval(new DateHistogramInterval(interval)) - .timeZone(DateTimeZone.forTimeZone(timeZone)); + .timeZone(DateTimeZone.forTimeZone(timeZone)) + .missingBucket(true); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByScriptKey.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByScriptKey.java index a4af765d034..ccd2bf934ab 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByScriptKey.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByScriptKey.java @@ -36,7 +36,8 @@ public class GroupByScriptKey extends GroupByKey { public TermsValuesSourceBuilder asValueSource() { TermsValuesSourceBuilder builder = new TermsValuesSourceBuilder(id()) .script(script.toPainless()) - .order(direction().asOrder()); + .order(direction().asOrder()) + .missingBucket(true); if (script.outputType().isNumeric()) { builder.valueType(ValueType.NUMBER); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java index 9570eaf1b6a..0f00822e3f4 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java @@ -5,7 +5,7 @@ */ package org.elasticsearch.xpack.sql.util; -import org.apache.lucene.search.spell.LevensteinDistance; +import org.apache.lucene.search.spell.LevenshteinDistance; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; @@ -248,7 +248,7 @@ public abstract class StringUtils { } public static List findSimilar(String match, Iterable potentialMatches) { - LevensteinDistance ld = new LevensteinDistance(); + LevenshteinDistance ld = new LevenshteinDistance(); List> scoredMatches = new ArrayList<>(); for (String potentialMatch : potentialMatches) { float distance = ld.getDistance(match, potentialMatch); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java index a9e1349e831..9aa0c9f7b36 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java @@ -52,11 +52,6 @@ public class FieldHitExtractorTests extends AbstractWireSerializingTestCase IdentifierBuilder.validateIndex("some,index", L)); - assertThat(pe.getMessage(), is("line 1:12: Invalid index name (illegal character ,) some,index")); - } - - public void testUpperCasedIndex() throws Exception { - ParsingException pe = expectThrows(ParsingException.class, () -> IdentifierBuilder.validateIndex("thisIsAnIndex", L)); - assertThat(pe.getMessage(), is("line 1:12: Invalid index name (needs to be lowercase) thisIsAnIndex")); - } -} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java index e42ec51b425..c6e1c389bb1 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java @@ -49,6 +49,22 @@ public class SysTablesTests extends ESTestCase { private final IndexInfo index = new IndexInfo("test", IndexType.INDEX); private final IndexInfo alias = new IndexInfo("alias", IndexType.ALIAS); + public void testSysTablesEnumerateCatalog() throws Exception { + executeCommand("SYS TABLES CATALOG LIKE '%'", r -> { + assertEquals(1, r.size()); + assertEquals(CLUSTER_NAME, r.column(0)); + }); + } + + public void testSysTablesEnumerateTypes() throws Exception { + executeCommand("SYS TABLES TYPE '%'", r -> { + assertEquals(2, r.size()); + assertEquals("ALIAS", r.column(3)); + assertTrue(r.advanceRow()); + assertEquals("BASE TABLE", r.column(3)); + }); + } + public void testSysTablesDifferentCatalog() throws Exception { executeCommand("SYS TABLES CATALOG LIKE 'foo'", r -> { assertEquals(0, r.size()); @@ -58,10 +74,10 @@ public class SysTablesTests extends ESTestCase { public void testSysTablesNoTypes() throws Exception { executeCommand("SYS TABLES", r -> { - assertEquals("alias", r.column(2)); - assertTrue(r.advanceRow()); assertEquals(2, r.size()); - assertEquals("test", r.column(2)); + assertEquals("ALIAS", r.column(3)); + assertTrue(r.advanceRow()); + assertEquals("BASE TABLE", r.column(3)); }, index, alias); } @@ -208,22 +224,7 @@ public class SysTablesTests extends ESTestCase { public void testSysTablesTypesEnumerationWoString() throws Exception { executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' ", r -> { - assertEquals(2, r.size()); - - Iterator it = IndexType.VALID.stream().sorted(Comparator.comparing(IndexType::toSql)).iterator(); - - for (int t = 0; t < r.size(); t++) { - assertEquals(it.next().toSql(), r.column(3)); - - // everything else should be null - for (int i = 0; i < 10; i++) { - if (i != 3) { - assertNull(r.column(i)); - } - } - - r.advanceRow(); - } + assertEquals(0, r.size()); }, new IndexInfo[0]); } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.create_and_follow_index.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.create_and_follow_index.json new file mode 100644 index 00000000000..46ff872a1a4 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.create_and_follow_index.json @@ -0,0 +1,21 @@ +{ + "ccr.create_and_follow_index": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current", + "methods": [ "POST" ], + "url": { + "path": "/{index}/_ccr/create_and_follow", + "paths": [ "/{index}/_ccr/create_and_follow" ], + "parts": { + "index": { + "type": "string", + "required": true, + "description": "The name of the follower index" + } + } + }, + "body": { + "description" : "The name of the leader index and other optional ccr related parameters", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json new file mode 100644 index 00000000000..c958c842b54 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json @@ -0,0 +1,17 @@ +{ + "ccr.delete_auto_follow_pattern": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current", + "methods": [ "DELETE" ], + "url": { + "path": "/_ccr/auto_follow/{leader_cluster_alias}", + "paths": [ "/_ccr/auto_follow/{leader_cluster_alias}" ], + "parts": { + "leader_cluster_alias": { + "type": "string", + "required": true, + "description": "The name of the leader cluster alias." + } + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow_index.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow_index.json new file mode 100644 index 00000000000..749aae48d91 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow_index.json @@ -0,0 +1,21 @@ +{ + "ccr.follow_index": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current", + "methods": [ "POST" ], + "url": { + "path": "/{index}/_ccr/follow", + "paths": [ "/{index}/_ccr/follow" ], + "parts": { + "index": { + "type": "string", + "required": true, + "description": "The name of the follower index." + } + } + }, + "body": { + "description" : "The name of the leader index and other optional ccr related parameters", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json new file mode 100644 index 00000000000..ca9c255097f --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json @@ -0,0 +1,21 @@ +{ + "ccr.put_auto_follow_pattern": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current", + "methods": [ "PUT" ], + "url": { + "path": "/_ccr/auto_follow/{leader_cluster_alias}", + "paths": [ "/_ccr/auto_follow/{leader_cluster_alias}" ], + "parts": { + "leader_cluster_alias": { + "type": "string", + "required": true, + "description": "The name of the leader cluster alias." + } + } + }, + "body": { + "description" : "The specification of the auto follow pattern", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.stats.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.stats.json new file mode 100644 index 00000000000..7f5cda09f25 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.stats.json @@ -0,0 +1,16 @@ +{ + "ccr.stats": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current", + "methods": [ "GET" ], + "url": { + "path": "/_ccr/stats", + "paths": [ "/_ccr/stats", "/_ccr/stats/{index}" ], + "parts": { + "index": { + "type": "list", + "description": "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices" + } + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.unfollow_index.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.unfollow_index.json new file mode 100644 index 00000000000..5e9a111496a --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.unfollow_index.json @@ -0,0 +1,17 @@ +{ + "ccr.unfollow_index": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current", + "methods": [ "POST" ], + "url": { + "path": "/{index}/_ccr/unfollow", + "paths": [ "/{index}/_ccr/unfollow" ], + "parts": { + "index": { + "type": "string", + "required": true, + "description": "The name of the follower index that should stop following its leader index." + } + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_forecast.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_forecast.json new file mode 100644 index 00000000000..78040351d35 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_forecast.json @@ -0,0 +1,38 @@ +{ + "xpack.ml.delete_forecast": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-forecast.html", + "methods": [ "DELETE" ], + "url": { + "path": "/_xpack/ml/anomaly_detectors/{job_id}/_forecast/{forecast_id}", + "paths": [ + "/_xpack/ml/anomaly_detectors/{job_id}/_forecast", + "/_xpack/ml/anomaly_detectors/{job_id}/_forecast/{forecast_id}" + ], + "parts": { + "job_id": { + "type": "string", + "required": true, + "description": "The ID of the job from which to delete forecasts" + }, + "forecast_id": { + "type": "string", + "required": false, + "description": "The ID of the forecast to delete, can be comma delimited list. Leaving blank implies `_all`" + } + }, + "params": { + "allow_no_forecasts": { + "type": "boolean", + "required": false, + "description": "Whether to ignore if `_all` matches no forecasts" + }, + "timeout": { + "type": "time", + "requred": false, + "description": "Controls the time to wait until the forecast(s) are deleted. Default to 30 seconds" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.find_file_structure.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.find_file_structure.json new file mode 100644 index 00000000000..bd41e0c00bc --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.find_file_structure.json @@ -0,0 +1,25 @@ +{ + "xpack.ml.find_file_structure": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-file-structure.html", + "methods": [ "POST" ], + "url": { + "path": "/_xpack/ml/find_file_structure", + "paths": [ "/_xpack/ml/find_file_structure" ], + "params": { + "lines_to_sample": { + "type": "int", + "description": "Optional parameter to specify how many lines of the file to include in the analysis" + }, + "explain": { + "type": "boolean", + "description": "Optional parameter to include an commentary on how the structure was derived" + } + } + }, + "body": { + "description" : "The contents of the file to be analyzed", + "required" : true, + "serialize" : "bulk" + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role_mapping.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role_mapping.json index 26c72666e8f..4c1df6b99db 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role_mapping.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role_mapping.json @@ -1,6 +1,6 @@ { "xpack.security.delete_role_mapping": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-role-mapping.html#security-api-delete-role-mapping", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role-mapping.html", "methods": [ "DELETE" ], "url": { "path": "/_xpack/security/role_mapping/{name}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role_mapping.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role_mapping.json index 0bdeb54cfb6..7696f6671e4 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role_mapping.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role_mapping.json @@ -1,6 +1,6 @@ { "xpack.security.get_role_mapping": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-role-mapping.html#security-api-get-role-mapping", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role-mapping.html", "methods": [ "GET" ], "url": { "path": "/_xpack/security/role_mapping/{name}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.has_privileges.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.has_privileges.json index 64b15ae9c02..9c75b40e4d1 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.has_privileges.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.has_privileges.json @@ -1,6 +1,6 @@ { "xpack.security.has_privileges": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-privileges.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-has-privileges.html", "methods": [ "GET", "POST" ], "url": { "path": "/_xpack/security/user/_has_privileges", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role_mapping.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role_mapping.json index 3f92cd130ba..98e723d80e9 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role_mapping.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role_mapping.json @@ -1,6 +1,6 @@ { "xpack.security.put_role_mapping": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-role-mapping.html#security-api-put-role-mapping", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role-mapping.html", "methods": [ "PUT", "POST" ], "url": { "path": "/_xpack/security/role_mapping/{name}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/auto_follow.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/auto_follow.yml new file mode 100644 index 00000000000..f4cf79fb558 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/auto_follow.yml @@ -0,0 +1,13 @@ +--- +"Test put and delete auto follow pattern": + - do: + ccr.put_auto_follow_pattern: + leader_cluster_alias: _local_ + body: + leader_index_patterns: ['logs-*'] + - is_true: acknowledged + + - do: + ccr.delete_auto_follow_pattern: + leader_cluster_alias: _local_ + - is_true: acknowledged diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml new file mode 100644 index 00000000000..6c95f307c25 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml @@ -0,0 +1,42 @@ +--- +"Test follow and unfollow an existing index": + - do: + indices.create: + index: foo + body: + settings: + index: + soft_deletes: + enabled: true + mappings: + doc: + properties: + field: + type: keyword + - is_true: acknowledged + + - do: + ccr.create_and_follow_index: + index: bar + body: + leader_index: foo + - is_true: follow_index_created + - is_true: follow_index_shards_acked + - is_true: index_following_started + + - do: + ccr.unfollow_index: + index: bar + - is_true: acknowledged + + - do: + ccr.follow_index: + index: bar + body: + leader_index: foo + - is_true: acknowledged + + - do: + ccr.unfollow_index: + index: bar + - is_true: acknowledged diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/stats.yml new file mode 100644 index 00000000000..431629b1d23 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/stats.yml @@ -0,0 +1,57 @@ +--- +"Test stats": + - do: + indices.create: + index: foo + body: + settings: + index: + soft_deletes: + enabled: true + mappings: + doc: + properties: + field: + type: keyword + + - do: + ccr.create_and_follow_index: + index: bar + body: + leader_index: foo + - is_true: follow_index_created + - is_true: follow_index_shards_acked + - is_true: index_following_started + + # we can not reliably wait for replication to occur so we test the endpoint without indexing any documents + - do: + ccr.stats: + index: bar + - match: { bar.0.leader_index: "foo" } + - match: { bar.0.shard_id: 0 } + - gte: { bar.0.leader_global_checkpoint: -1 } + - gte: { bar.0.leader_max_seq_no: -1 } + - gte: { bar.0.follower_global_checkpoint: -1 } + - gte: { bar.0.follower_max_seq_no: -1 } + - gte: { bar.0.last_requested_seq_no: -1 } + - gte: { bar.0.number_of_concurrent_reads: 0 } + - match: { bar.0.number_of_concurrent_writes: 0 } + - match: { bar.0.number_of_queued_writes: 0 } + - gte: { bar.0.mapping_version: 0 } + - gte: { bar.0.total_fetch_time_millis: 0 } + - gte: { bar.0.number_of_successful_fetches: 0 } + - gte: { bar.0.number_of_failed_fetches: 0 } + - match: { bar.0.operations_received: 0 } + - match: { bar.0.total_transferred_bytes: 0 } + - match: { bar.0.total_index_time_millis: 0 } + - match: { bar.0.number_of_successful_bulk_operations: 0 } + - match: { bar.0.number_of_failed_bulk_operations: 0 } + - match: { bar.0.number_of_operations_indexed: 0 } + - length: { bar.0.fetch_exceptions: 0 } + - gte: { bar.0.time_since_last_fetch_millis: -1 } + + - do: + ccr.unfollow_index: + index: bar + - is_true: acknowledged + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_forecast.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_forecast.yml new file mode 100644 index 00000000000..667f80410e0 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_forecast.yml @@ -0,0 +1,143 @@ +setup: + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: delete-forecast-job + body: > + { + "description":"A forecast job", + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}], + "bucket_span" : "1s" + }, + "data_description" : { + "format":"xcontent" + } + } + +--- +"Test delete forecast on missing forecast": + - do: + catch: /resource_not_found_exception/ + xpack.ml.delete_forecast: + job_id: delete-forecast-job + forecast_id: this-is-a-bad-forecast + +--- +"Test delete forecast": + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json + index: + index: .ml-anomalies-shared + type: doc + id: "delete-forecast-job_model_forecast_someforecastid_1486591200000_1800_0_961_0" + body: + { + "job_id": "delete-forecast-job", + "forecast_id": "someforecastid", + "result_type": "model_forecast", + "bucket_span": 1800, + "detector_index": 0, + "timestamp": 1486591200000, + "model_feature": "'arithmetic mean value by person'", + "forecast_lower": 5440.502250736747, + "forecast_upper": 6294.296972680027, + "forecast_prediction": 5867.399611708387 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json + index: + index: .ml-anomalies-shared + type: doc + id: "delete-forecast-job_model_forecast_someforecastid_1486591300000_1800_0_961_0" + body: + { + "job_id": "delete-forecast-job", + "forecast_id": "someforecastid", + "result_type": "model_forecast", + "bucket_span": 1800, + "detector_index": 0, + "timestamp": 1486591300000, + "model_feature": "'arithmetic mean value by person'", + "forecast_lower": 5440.502250736747, + "forecast_upper": 6294.296972680027, + "forecast_prediction": 5867.399611708387 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json + index: + index: .ml-anomalies-shared + type: doc + id: "delete-forecast-job_model_forecast_request_stats_someforecastid" + body: + { + "job_id": "delete-forecast-job", + "result_type": "model_forecast_request_stats", + "forecast_id": "someforecastid", + "processed_record_count": 48, + "forecast_messages": [], + "timestamp": 1486575000000, + "forecast_start_timestamp": 1486575000000, + "forecast_end_timestamp": 1486661400000, + "forecast_create_timestamp": 1535721789000, + "forecast_expiry_timestamp": 1536931389000, + "forecast_progress": 1, + "processing_time_ms": 3, + "forecast_memory_bytes": 7034, + "forecast_status": "finished" + } + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: + index: .ml-anomalies-delete-forecast-job + - do: + xpack.ml.delete_forecast: + job_id: delete-forecast-job + forecast_id: someforecastid + - match: { acknowledged: true } + - do: + catch: missing + get: + id: delete-forecast-job_model_forecast_request_stats_someforecastid + index: .ml-anomalies-shared + type: doc + - do: + catch: missing + get: + id: delete-forecast-job_model_forecast_someforecastid_1486591300000_1800_0_961_0 + index: .ml-anomalies-shared + type: doc + - do: + catch: missing + get: + id: delete-forecast-job_model_forecast_someforecastid_1486591200000_1800_0_961_0 + index: .ml-anomalies-shared + type: doc + +--- +"Test delete on _all forecasts not allow no forecasts": + - do: + catch: /resource_not_found_exception/ + xpack.ml.delete_forecast: + job_id: delete-forecast-job + forecast_id: _all + allow_no_forecasts: false + +--- +"Test delete on _all forecasts": + - do: + xpack.ml.delete_forecast: + job_id: delete-forecast-job + forecast_id: _all + allow_no_forecasts: true + - match: { acknowledged: true } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/find_file_structure.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/find_file_structure.yml new file mode 100644 index 00000000000..1d164cc0c5a --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/find_file_structure.yml @@ -0,0 +1,44 @@ +--- +"Test JSON file structure analysis": + - do: + headers: + # This is to stop the usual content type randomization, which + # would obviously ruin the results for this particular test + Content-Type: "application/json" + xpack.ml.find_file_structure: + body: + - airline: AAL + responsetime: 132.2046 + sourcetype: file-structure-test + time: 1403481600 + - airline: JZA + responsetime: 990.4628 + sourcetype: file-structure-test + time: 1403481700 + - airline: AAL + responsetime: 134.2046 + sourcetype: file-structure-test + time: 1403481800 + + - match: { num_lines_analyzed: 3 } + - match: { num_messages_analyzed: 3 } + - match: { charset: "UTF-8" } + - match: { has_byte_order_marker: false } + - match: { format: json } + - match: { timestamp_field: time } + - match: { timestamp_formats.0: UNIX } + - match: { need_client_timezone: false } + - match: { mappings.airline.type: keyword } + - match: { mappings.responsetime.type: double } + - match: { mappings.sourcetype.type: keyword } + - match: { mappings.time.type: date } + - match: { mappings.time.format: epoch_second } + - match: { field_stats.airline.count: 3 } + - match: { field_stats.airline.cardinality: 2 } + - match: { field_stats.responsetime.count: 3 } + - match: { field_stats.responsetime.cardinality: 3 } + - match: { field_stats.sourcetype.count: 3 } + - match: { field_stats.sourcetype.cardinality: 1 } + - match: { field_stats.time.count: 3 } + - match: { field_stats.time.cardinality: 3 } + - match: { field_stats.time.cardinality: 3 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml index f3fa8114ddb..759ddbad2b4 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml @@ -210,3 +210,4 @@ setup: job_state: "stopped" upgraded_doc_id: true + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml index 516be25be2a..23df0c58377 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml @@ -128,6 +128,38 @@ setup: ] } +--- +"Test put_job in non-rollup index": + - do: + indices.create: + index: non-rollup + - do: + catch: /foo/ + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: foo + body: > + { + "index_pattern": "foo", + "rollup_index": "non-rollup", + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "the_field", + "interval": "1h" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + + --- "Try to include headers": diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml index d401d5c69ba..e2f1174665e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml @@ -152,6 +152,20 @@ setup: - match: { aggregations.histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" } - match: { aggregations.histo.buckets.3.doc_count: 20 } +--- +"Empty aggregation": + + - do: + xpack.rollup.rollup_search: + index: "foo_rollup" + body: + size: 0 + aggs: {} + + - length: { hits.hits: 0 } + - match: { hits.total: 0 } + - is_false: aggregations + --- "Search with Metric": diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml new file mode 100644 index 00000000000..57bfd821ea2 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml @@ -0,0 +1,343 @@ +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + + +--- +teardown: + - do: + xpack.security.delete_user: + username: "test_user" + ignore: 404 + + - do: + xpack.security.delete_role: + name: "foo_only_access" + ignore: 404 + +--- +"Index-based access": + + - do: + xpack.security.put_role: + name: "foo_only_access" + body: > + { + "cluster": [ "all" ], + "indices": [ + { "names": ["foo"], "privileges": ["all"] }, + { "names": ["rollup"], "privileges": ["all"] } + ] + } + + - do: + xpack.security.put_user: + username: "test_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "foo_only_access" ], + "full_name" : "foo only" + } + + - do: + indices.create: + index: foo + body: + mappings: + _doc: + properties: + timestamp: + type: date + value_field: + type: integer + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: foo + type: _doc + body: + timestamp: 123 + value_field: 1232 + + - do: + indices.create: + index: foobar + body: + mappings: + _doc: + properties: + timestamp: + type: date + value_field: + type: integer + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: foobar + type: _doc + body: + timestamp: 123 + value_field: 456 + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + indices.refresh: + index: foo + + # This index pattern will match both indices, but we only have permission to read one + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + xpack.rollup.put_job: + id: foo + body: > + { + "index_pattern": "foo*", + "rollup_index": "rollup", + "cron": "*/1 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1s" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + + - is_true: acknowledged + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + xpack.rollup.start_job: + id: foo + - is_true: started + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + indices.refresh: + index: rollup + + # this is a hacky way to sleep for 5s, since we will never have 10 nodes + - do: + catch: request_timeout + cluster.health: + wait_for_nodes: 10 + timeout: "5s" + - match: + timed_out: true + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + xpack.rollup.get_jobs: + id: foo + - match: + jobs.0.stats.documents_processed: 1 + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + index: foo + body: + query: + match_all: {} + + - match: + hits.total: 1 + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + index: rollup + body: + query: + match_all: {} + + - match: + hits.total: 1 + - match: + hits.hits.0._id: "foo$VxMkzTqILshClbtbFi4-rQ" + - match: + hits.hits.0._source: + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram.timestamp: 0 + value_field.max.value: 1232.0 + _rollup.version: 2 + timestamp.date_histogram.interval: "1s" + value_field.sum.value: 1232.0 + value_field.min.value: 1232.0 + timestamp.date_histogram._count: 1 + _rollup.id: "foo" + + +--- +"Attribute-based access": + + - do: + xpack.security.put_role: + name: "foo_only_access" + body: > + { + "cluster": [ "all" ], + "indices": [ + { + "names": ["foo"], + "privileges": ["all"], + "query": { + "template": { + "source": "{\"bool\":{\"filter\":[{\"term\":{\"visibility\":\"public\"}}]}}" + } + } + }, + { "names": ["rollup"], "privileges": ["all"] } + ] + } + + - do: + xpack.security.put_user: + username: "test_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "foo_only_access" ], + "full_name" : "foo only" + } + + - do: + indices.create: + index: foo + body: + mappings: + _doc: + properties: + timestamp: + type: date + value_field: + type: integer + visibility: + type: keyword + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: foo + type: _doc + body: + timestamp: 123 + value_field: 1232 + visibility: "public" + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: foobar + type: _doc + body: + timestamp: 123 + value_field: 456 + visibility: "private" + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + indices.refresh: + index: foo + + # Index contains two docs, but we should only be able to see one of them + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + xpack.rollup.put_job: + id: foo + body: > + { + "index_pattern": "foo", + "rollup_index": "rollup", + "cron": "*/1 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1s" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + - is_true: acknowledged + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + xpack.rollup.start_job: + id: foo + - is_true: started + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + indices.refresh: + index: rollup + + # this is a hacky way to sleep for 5s, since we will never have 10 nodes + - do: + catch: request_timeout + cluster.health: + wait_for_nodes: 10 + timeout: "5s" + - match: + timed_out: true + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + xpack.rollup.get_jobs: + id: foo + - match: + jobs.0.stats.documents_processed: 1 + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + index: foo + body: + query: + match_all: {} + + - match: + hits.total: 1 + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + index: rollup + body: + query: + match_all: {} + + - match: + hits.total: 1 + - match: + hits.hits.0._id: "foo$VxMkzTqILshClbtbFi4-rQ" + - match: + hits.hits.0._source: + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram.timestamp: 0 + value_field.max.value: 1232.0 + _rollup.version: 2 + timestamp.date_histogram.interval: "1s" + value_field.sum.value: 1232.0 + value_field.min.value: 1232.0 + timestamp.date_histogram._count: 1 + _rollup.id: "foo" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml index a33fcdb5297..7a22ad322bf 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml @@ -1,6 +1,8 @@ --- "Test watcher usage stats output": - + - skip: + version: "all" + reason: AwaitsFix at https://github.com/elastic/elasticsearch/issues/33326 - do: catch: missing xpack.watcher.delete_watch: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml index 1e3fc840799..2aea0126e9e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml @@ -10,6 +10,7 @@ - do: nodes.info: {} + - contains: { nodes.$master.modules: { name: x-pack-ccr } } - contains: { nodes.$master.modules: { name: x-pack-core } } - contains: { nodes.$master.modules: { name: x-pack-deprecation } } - contains: { nodes.$master.modules: { name: x-pack-graph } } diff --git a/x-pack/plugin/upgrade/build.gradle b/x-pack/plugin/upgrade/build.gradle index f95cde7134c..56ce274dd11 100644 --- a/x-pack/plugin/upgrade/build.gradle +++ b/x-pack/plugin/upgrade/build.gradle @@ -14,7 +14,8 @@ esplugin { archivesBaseName = 'x-pack-upgrade' dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java index 07017e6fc00..ad0ebd6815f 100644 --- a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java +++ b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java @@ -79,7 +79,7 @@ public class IndexUpgradeService extends AbstractComponent { } } // Catch all check for all indices that didn't match the specific checks - if (indexMetaData.getCreationVersion().before(Version.V_5_0_0)) { + if (indexMetaData.getCreationVersion().before(Version.V_6_0_0)) { return UpgradeActionRequired.REINDEX; } else { return null; diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexer.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexer.java index 82208f1f5ce..d7be0a389e3 100644 --- a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexer.java +++ b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexer.java @@ -6,8 +6,6 @@ package org.elasticsearch.xpack.upgrade; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Client; import org.elasticsearch.client.ParentTaskAssigningClient; @@ -116,10 +114,10 @@ public class InternalIndexReindexer { private void reindex(ParentTaskAssigningClient parentAwareClient, String index, String newIndex, ActionListener listener) { - SearchRequest sourceRequest = new SearchRequest(index); - sourceRequest.types(types); - IndexRequest destinationRequest = new IndexRequest(newIndex); - ReindexRequest reindexRequest = new ReindexRequest(sourceRequest, destinationRequest); + ReindexRequest reindexRequest = new ReindexRequest(); + reindexRequest.setSourceIndices(index); + reindexRequest.setSourceDocTypes(types); + reindexRequest.setDestIndex(newIndex); reindexRequest.setRefresh(true); reindexRequest.setScript(transformScript); parentAwareClient.execute(ReindexAction.INSTANCE, reindexRequest, listener); diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java index 568397e3739..e454ac4a014 100644 --- a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java +++ b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java @@ -44,7 +44,7 @@ import java.util.function.Supplier; public class Upgrade extends Plugin implements ActionPlugin { - public static final Version UPGRADE_INTRODUCED = Version.V_5_6_0; + public static final Version UPGRADE_INTRODUCED = Version.CURRENT.minimumCompatibilityVersion(); private final Settings settings; private final List> upgradeCheckFactories; diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java index 5939777572b..f980450c07f 100644 --- a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java +++ b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java @@ -166,7 +166,7 @@ public class IndexUpgradeServiceTests extends ESTestCase { .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_CREATION_DATE, 1) .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) - .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_5_0_0_beta1) + .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_6_0_0) .put(indexSettings) .build(); IndexMetaData.Builder builder = IndexMetaData.builder(name).settings(build); diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexerIT.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexerIT.java index cd83803d188..71e3348b058 100644 --- a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexerIT.java +++ b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexerIT.java @@ -206,9 +206,9 @@ public class InternalIndexReindexerIT extends IndexUpgradeIntegTestCase { DiscoveryNode node = discoveryNodes.get(nodeId); DiscoveryNode newNode = new DiscoveryNode(node.getName(), node.getId(), node.getEphemeralId(), node.getHostName(), node.getHostAddress(), node.getAddress(), node.getAttributes(), node.getRoles(), - randomVersionBetween(random(), Version.V_5_0_0, Version.V_5_4_0)); + randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_4_0)); return ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(discoveryNodes).remove(node).add(newNode)).build(); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/watcher/build.gradle b/x-pack/plugin/watcher/build.gradle index a0feab67463..3412cafc4f4 100644 --- a/x-pack/plugin/watcher/build.gradle +++ b/x-pack/plugin/watcher/build.gradle @@ -25,7 +25,8 @@ dependencyLicenses { dependencies { compileOnly "org.elasticsearch:elasticsearch:${version}" - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') compileOnly project(path: ':modules:transport-netty4', configuration: 'runtime') compileOnly project(path: ':plugins:transport-nio', configuration: 'runtime') @@ -67,7 +68,7 @@ thirdPartyAudit.excludes = [ ] // pulled in as external dependency to work on java 9 -if (JavaVersion.current() <= JavaVersion.VERSION_1_8) { +if (project.runtimeJavaVersion <= JavaVersion.VERSION_1_8) { thirdPartyAudit.excludes += [ 'com.sun.activation.registries.MailcapParseException', 'javax.activation.ActivationDataFlavor', diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index 96237b6e3de..33b79c38cca 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -300,7 +300,7 @@ public class Watcher extends Plugin implements ActionPlugin, ScriptPlugin, Reloa actionFactoryMap.put(EmailAction.TYPE, new EmailActionFactory(settings, emailService, templateEngine, emailAttachmentsParser)); actionFactoryMap.put(WebhookAction.TYPE, new WebhookActionFactory(settings, httpClient, templateEngine)); actionFactoryMap.put(IndexAction.TYPE, new IndexActionFactory(settings, client)); - actionFactoryMap.put(LoggingAction.TYPE, new LoggingActionFactory(settings, templateEngine)); + actionFactoryMap.put(LoggingAction.TYPE, new LoggingActionFactory(templateEngine)); actionFactoryMap.put(HipChatAction.TYPE, new HipChatActionFactory(settings, templateEngine, hipChatService)); actionFactoryMap.put(JiraAction.TYPE, new JiraActionFactory(settings, templateEngine, jiraService)); actionFactoryMap.put(SlackAction.TYPE, new SlackActionFactory(settings, templateEngine, slackService)); @@ -535,7 +535,7 @@ public class Watcher extends Plugin implements ActionPlugin, ScriptPlugin, Reloa String errorMessage = LoggerMessageFormat.format("the [action.auto_create_index] setting value [{}] is too" + " restrictive. disable [action.auto_create_index] or set it to " + - "[{}, {}, {}*]", (Object) value, Watch.INDEX, TriggeredWatchStoreField.INDEX_NAME, HistoryStoreField.INDEX_PREFIX); + "[{},{},{}*]", (Object) value, Watch.INDEX, TriggeredWatchStoreField.INDEX_NAME, HistoryStoreField.INDEX_PREFIX); if (Booleans.isFalse(value)) { throw new IllegalArgumentException(errorMessage); } @@ -602,7 +602,7 @@ public class Watcher extends Plugin implements ActionPlugin, ScriptPlugin, Reloa } @Override - public List getContexts() { + public List> getContexts() { return Arrays.asList(Watcher.SCRIPT_SEARCH_CONTEXT, Watcher.SCRIPT_EXECUTABLE_CONTEXT, Watcher.SCRIPT_TEMPLATE_CONTEXT); } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java index 620d575fc80..279d768fde8 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java @@ -11,7 +11,6 @@ import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; @@ -22,13 +21,16 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.xpack.core.watcher.WatcherMetaData; import org.elasticsearch.xpack.core.watcher.WatcherState; import org.elasticsearch.xpack.core.watcher.watch.Watch; import org.elasticsearch.xpack.watcher.watch.WatchStoreUtils; import java.util.Collections; +import java.util.Comparator; import java.util.List; +import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -45,7 +47,7 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste Setting.boolSetting("xpack.watcher.require_manual_start", false, Property.NodeScope); private final AtomicReference state = new AtomicReference<>(WatcherState.STARTED); - private final AtomicReference> previousAllocationIds = new AtomicReference<>(Collections.emptyList()); + private final AtomicReference> previousShardRoutings = new AtomicReference<>(Collections.emptyList()); private final boolean requireManualStart; private volatile boolean shutDown = false; // indicates that the node has been shutdown and we should never start watcher after this. private volatile WatcherService watcherService; @@ -110,6 +112,7 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste // if this is not a data node, we need to start it ourselves possibly if (event.state().nodes().getLocalNode().isDataNode() == false && isWatcherStoppedManually == false && this.state.get() == WatcherState.STOPPED) { + this.state.set(WatcherState.STARTING); watcherService.start(event.state(), () -> this.state.set(WatcherState.STARTED)); return; } @@ -144,15 +147,20 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste return; } - List currentAllocationIds = localShards.stream() - .map(ShardRouting::allocationId) - .map(AllocationId::getId) - .sorted() + // also check if non local shards have changed, as loosing a shard on a + // remote node or adding a replica on a remote node needs to trigger a reload too + Set localShardIds = localShards.stream().map(ShardRouting::shardId).collect(Collectors.toSet()); + List allShards = event.state().routingTable().index(watchIndex).shardsWithState(STARTED); + allShards.addAll(event.state().routingTable().index(watchIndex).shardsWithState(RELOCATING)); + List localAffectedShardRoutings = allShards.stream() + .filter(shardRouting -> localShardIds.contains(shardRouting.shardId())) + // shardrouting is not comparable, so we need some order mechanism + .sorted(Comparator.comparing(ShardRouting::hashCode)) .collect(Collectors.toList()); - if (previousAllocationIds.get().equals(currentAllocationIds) == false) { + if (previousShardRoutings.get().equals(localAffectedShardRoutings) == false) { if (watcherService.validate(event.state())) { - previousAllocationIds.set(Collections.unmodifiableList(currentAllocationIds)); + previousShardRoutings.set(localAffectedShardRoutings); if (state.get() == WatcherState.STARTED) { watcherService.reload(event.state(), "new local watcher shard allocation ids"); } else if (state.get() == WatcherState.STOPPED) { @@ -187,13 +195,13 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste * @return true, if existing allocation ids were cleaned out, false otherwise */ private boolean clearAllocationIds() { - List previousIds = previousAllocationIds.getAndSet(Collections.emptyList()); - return previousIds.equals(Collections.emptyList()) == false; + List previousIds = previousShardRoutings.getAndSet(Collections.emptyList()); + return previousIds.isEmpty() == false; } // for testing purposes only - List allocationIds() { - return previousAllocationIds.get(); + List shardRoutings() { + return previousShardRoutings.get(); } public WatcherState getState() { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java index 49915674fe9..599287bb50a 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java @@ -183,9 +183,6 @@ public class WatcherService extends AbstractComponent { // by checking the cluster state version before and after loading the watches we can potentially just exit without applying the // changes processedClusterStateVersion.set(state.getVersion()); - triggerService.pauseExecution(); - int cancelledTaskCount = executionService.clearExecutionsAndQueue(); - logger.info("reloading watcher, reason [{}], cancelled [{}] queued tasks", reason, cancelledTaskCount); executor.execute(wrapWatcherService(() -> reloadInner(state, reason, false), e -> logger.error("error reloading watcher", e))); @@ -221,6 +218,7 @@ public class WatcherService extends AbstractComponent { if (processedClusterStateVersion.get() != state.getVersion()) { logger.debug("watch service has not been reloaded for state [{}], another reload for state [{}] in progress", state.getVersion(), processedClusterStateVersion.get()); + return false; } Collection watches = loadWatches(state); @@ -231,7 +229,13 @@ public class WatcherService extends AbstractComponent { // if we had another state coming in the meantime, we will not start the trigger engines with these watches, but wait // until the others are loaded + // also this is the place where we pause the trigger service execution and clear the current execution service, so that we make sure + // that existing executions finish, but no new ones are executed if (processedClusterStateVersion.get() == state.getVersion()) { + triggerService.pauseExecution(); + int cancelledTaskCount = executionService.clearExecutionsAndQueue(); + logger.info("reloading watcher, reason [{}], cancelled [{}] queued tasks", reason, cancelledTaskCount); + executionService.unPause(); triggerService.start(watches); if (triggeredWatches.isEmpty() == false) { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/logging/ExecutableLoggingAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/logging/ExecutableLoggingAction.java index 37f242ca499..b1cb723949d 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/logging/ExecutableLoggingAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/logging/ExecutableLoggingAction.java @@ -6,8 +6,7 @@ package org.elasticsearch.xpack.watcher.actions.logging; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.xpack.core.watcher.actions.Action; import org.elasticsearch.xpack.core.watcher.actions.ExecutableAction; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; @@ -22,9 +21,9 @@ public class ExecutableLoggingAction extends ExecutableAction { private final Logger textLogger; private final TextTemplateEngine templateEngine; - public ExecutableLoggingAction(LoggingAction action, Logger logger, Settings settings, TextTemplateEngine templateEngine) { + public ExecutableLoggingAction(LoggingAction action, Logger logger, TextTemplateEngine templateEngine) { super(action, logger); - this.textLogger = action.category != null ? Loggers.getLogger(action.category, settings) : logger; + this.textLogger = action.category != null ? LogManager.getLogger(action.category) : logger; this.templateEngine = templateEngine; } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/logging/LoggingActionFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/logging/LoggingActionFactory.java index 44a8ace89e9..44bbbd4675a 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/logging/LoggingActionFactory.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/logging/LoggingActionFactory.java @@ -5,8 +5,7 @@ */ package org.elasticsearch.xpack.watcher.actions.logging; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.xpack.core.watcher.actions.ActionFactory; import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; @@ -15,18 +14,16 @@ import java.io.IOException; public class LoggingActionFactory extends ActionFactory { - private final Settings settings; private final TextTemplateEngine templateEngine; - public LoggingActionFactory(Settings settings, TextTemplateEngine templateEngine) { - super(Loggers.getLogger(ExecutableLoggingAction.class, settings)); - this.settings = settings; + public LoggingActionFactory(TextTemplateEngine templateEngine) { + super(LogManager.getLogger(ExecutableLoggingAction.class)); this.templateEngine = templateEngine; } @Override public ExecutableLoggingAction parseExecutable(String watchId, String actionId, XContentParser parser) throws IOException { LoggingAction action = LoggingAction.parse(watchId, actionId, parser); - return new ExecutableLoggingAction(action, actionLogger, settings, templateEngine); + return new ExecutableLoggingAction(action, actionLogger, templateEngine); } } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java index ec309c69476..1e6285f71d7 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java @@ -13,20 +13,15 @@ import java.util.Objects; public abstract class CronnableSchedule implements Schedule { - private static final Comparator CRON_COMPARATOR = new Comparator() { - @Override - public int compare(Cron c1, Cron c2) { - return c1.expression().compareTo(c2.expression()); - } - }; + private static final Comparator CRON_COMPARATOR = Comparator.comparing(Cron::expression); protected final Cron[] crons; - public CronnableSchedule(String... expressions) { + CronnableSchedule(String... expressions) { this(crons(expressions)); } - public CronnableSchedule(Cron... crons) { + private CronnableSchedule(Cron... crons) { assert crons.length > 0; this.crons = crons; Arrays.sort(crons, CRON_COMPARATOR); @@ -35,11 +30,14 @@ public abstract class CronnableSchedule implements Schedule { @Override public long nextScheduledTimeAfter(long startTime, long time) { assert time >= startTime; - long nextTime = Long.MAX_VALUE; - for (Cron cron : crons) { - nextTime = Math.min(nextTime, cron.getNextValidTimeAfter(time)); - } - return nextTime; + return Arrays.stream(crons) + .map(cron -> cron.getNextValidTimeAfter(time)) + // filter out expired dates before sorting + .filter(nextValidTime -> nextValidTime > -1) + .sorted() + .findFirst() + // no date in the future found, return -1 to the caller + .orElse(-1L); } public Cron[] crons() { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java index 05aa7cf3028..4c10f794880 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java @@ -56,7 +56,7 @@ public class TickerScheduleTriggerEngine extends ScheduleTriggerEngine { schedules.put(job.id(), new ActiveSchedule(job.id(), trigger.getSchedule(), startTime)); } } - this.schedules.putAll(schedules); + this.schedules = schedules; } @Override diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java index 33b1217895d..d22d402aa15 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java @@ -61,14 +61,18 @@ public class CronEvalTool extends LoggingAwareCommand { Cron cron = new Cron(expression); long time = date.getMillis(); + for (int i = 0; i < count; i++) { long prevTime = time; time = cron.getNextValidTimeAfter(time); if (time < 0) { - throw new UserException(ExitCodes.OK, (i + 1) + ".\t Could not compute future times since [" - + formatter.print(prevTime) + "] " + "(perhaps the cron expression only points to times in the past?)"); + if (i == 0) { + throw new UserException(ExitCodes.OK, "Could not compute future times since [" + + formatter.print(prevTime) + "] " + "(perhaps the cron expression only points to times in the past?)"); + } + break; } - terminal.println((i+1) + ".\t" + formatter.print(time)); + terminal.println((i + 1) + ".\t" + formatter.print(time)); } } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java index 700901753d4..384338af5a2 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java @@ -254,9 +254,12 @@ public class WatcherLifeCycleServiceTests extends ESTestCase { .add(newNode("node_2")) .build(); + ShardRouting firstShardOnSecondNode = TestShardRouting.newShardRouting(shardId, "node_2", true, STARTED); + ShardRouting secondShardOnFirstNode = TestShardRouting.newShardRouting(secondShardId, "node_1", true, STARTED); + IndexRoutingTable previousWatchRoutingTable = IndexRoutingTable.builder(watchIndex) - .addShard(TestShardRouting.newShardRouting(secondShardId, "node_1", true, STARTED)) - .addShard(TestShardRouting.newShardRouting(shardId, "node_2", true, STARTED)) + .addShard(secondShardOnFirstNode) + .addShard(firstShardOnSecondNode) .build(); IndexMetaData indexMetaData = IndexMetaData.builder(Watch.INDEX) @@ -273,10 +276,19 @@ public class WatcherLifeCycleServiceTests extends ESTestCase { .metaData(MetaData.builder().put(indexMetaData, false)) .build(); + // add a replica in the local node + boolean addShardOnLocalNode = randomBoolean(); + final ShardRouting addedShardRouting; + if (addShardOnLocalNode) { + addedShardRouting = TestShardRouting.newShardRouting(shardId, "node_1", false, STARTED); + } else { + addedShardRouting = TestShardRouting.newShardRouting(secondShardId, "node_2", false, STARTED); + } + IndexRoutingTable currentWatchRoutingTable = IndexRoutingTable.builder(watchIndex) - .addShard(TestShardRouting.newShardRouting(shardId, "node_1", false, STARTED)) - .addShard(TestShardRouting.newShardRouting(secondShardId, "node_1", true, STARTED)) - .addShard(TestShardRouting.newShardRouting(shardId, "node_2", true, STARTED)) + .addShard(secondShardOnFirstNode) + .addShard(firstShardOnSecondNode) + .addShard(addedShardRouting) .build(); ClusterState stateWithReplicaAdded = ClusterState.builder(new ClusterName("my-cluster")) @@ -477,7 +489,67 @@ public class WatcherLifeCycleServiceTests extends ESTestCase { assertThat(lifeCycleService.getState(), is(WatcherState.STARTED)); } - private ClusterState startWatcher() { + // this emulates a node outage somewhere in the cluster that carried a watcher shard + // the number of shards remains the same, but we need to ensure that watcher properly reloads + // previously we only checked the local shard allocations, but we also need to check if shards in the cluster have changed + public void testWatcherReloadsOnNodeOutageWithWatcherShard() { + Index watchIndex = new Index(Watch.INDEX, "foo"); + ShardId shardId = new ShardId(watchIndex, 0); + String localNodeId = randomFrom("node_1", "node_2"); + String outageNodeId = localNodeId.equals("node_1") ? "node_2" : "node_1"; + DiscoveryNodes previousDiscoveryNodes = new DiscoveryNodes.Builder().masterNodeId(localNodeId).localNodeId(localNodeId) + .add(newNode(localNodeId)) + .add(newNode(outageNodeId)) + .build(); + + ShardRouting replicaShardRouting = TestShardRouting.newShardRouting(shardId, localNodeId, false, STARTED); + ShardRouting primartShardRouting = TestShardRouting.newShardRouting(shardId, outageNodeId, true, STARTED); + IndexRoutingTable previousWatchRoutingTable = IndexRoutingTable.builder(watchIndex) + .addShard(replicaShardRouting) + .addShard(primartShardRouting) + .build(); + + IndexMetaData indexMetaData = IndexMetaData.builder(Watch.INDEX) + .settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 6) + ).build(); + + ClusterState previousState = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(previousDiscoveryNodes) + .routingTable(RoutingTable.builder().add(previousWatchRoutingTable).build()) + .metaData(MetaData.builder().put(indexMetaData, false)) + .build(); + + ShardRouting nowPrimaryShardRouting = replicaShardRouting.moveActiveReplicaToPrimary(); + IndexRoutingTable currentWatchRoutingTable = IndexRoutingTable.builder(watchIndex) + .addShard(nowPrimaryShardRouting) + .build(); + + DiscoveryNodes currentDiscoveryNodes = new DiscoveryNodes.Builder().masterNodeId(localNodeId).localNodeId(localNodeId) + .add(newNode(localNodeId)) + .build(); + + ClusterState currentState = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(currentDiscoveryNodes) + .routingTable(RoutingTable.builder().add(currentWatchRoutingTable).build()) + .metaData(MetaData.builder().put(indexMetaData, false)) + .build(); + + // initialize the previous state, so all the allocation ids are loaded + when(watcherService.validate(anyObject())).thenReturn(true); + lifeCycleService.clusterChanged(new ClusterChangedEvent("whatever", previousState, currentState)); + + reset(watcherService); + when(watcherService.validate(anyObject())).thenReturn(true); + ClusterChangedEvent event = new ClusterChangedEvent("whatever", currentState, previousState); + lifeCycleService.clusterChanged(event); + verify(watcherService).reload(eq(event.state()), anyString()); + } + + private void startWatcher() { Index index = new Index(Watch.INDEX, "uuid"); IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); indexRoutingTableBuilder.addShard( @@ -506,12 +578,10 @@ public class WatcherLifeCycleServiceTests extends ESTestCase { lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", state, emptyState)); assertThat(lifeCycleService.getState(), is(WatcherState.STARTED)); verify(watcherService, times(1)).reload(eq(state), anyString()); - assertThat(lifeCycleService.allocationIds(), hasSize(1)); + assertThat(lifeCycleService.shardRoutings(), hasSize(1)); // reset the mock, the user has to mock everything themselves again reset(watcherService); - - return state; } private List randomIndexPatterns() { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java index e345e890db1..b13b035304d 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java @@ -37,7 +37,7 @@ public class WatcherPluginTests extends ESTestCase { IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Watcher.validAutoCreateIndex(Settings.builder().put("action.auto_create_index", false).build(), logger)); - assertThat(exception.getMessage(), containsString("[.watches, .triggered_watches, .watcher-history-*]")); + assertThat(exception.getMessage(), containsString("[.watches,.triggered_watches,.watcher-history-*]")); Watcher.validAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".watches,.triggered_watches,.watcher-history*").build(), logger); @@ -46,16 +46,16 @@ public class WatcherPluginTests extends ESTestCase { exception = expectThrows(IllegalArgumentException.class, () -> Watcher.validAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".watches").build(), logger)); - assertThat(exception.getMessage(), containsString("[.watches, .triggered_watches, .watcher-history-*]")); + assertThat(exception.getMessage(), containsString("[.watches,.triggered_watches,.watcher-history-*]")); exception = expectThrows(IllegalArgumentException.class, () -> Watcher.validAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".triggered_watch").build(), logger)); - assertThat(exception.getMessage(), containsString("[.watches, .triggered_watches, .watcher-history-*]")); + assertThat(exception.getMessage(), containsString("[.watches,.triggered_watches,.watcher-history-*]")); exception = expectThrows(IllegalArgumentException.class, () -> Watcher.validAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".watcher-history-*").build(), logger)); - assertThat(exception.getMessage(), containsString("[.watches, .triggered_watches, .watcher-history-*]")); + assertThat(exception.getMessage(), containsString("[.watches,.triggered_watches,.watcher-history-*]")); } public void testWatcherDisabledTests() throws Exception { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/logging/LoggingActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/logging/LoggingActionTests.java index fdfd8ae0745..6ed6f524738 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/logging/LoggingActionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/logging/LoggingActionTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.watcher.actions.logging; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.SuppressLoggerChecks; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.ESTestCase; @@ -92,8 +91,7 @@ public class LoggingActionTests extends ESTestCase { } public void testParser() throws Exception { - Settings settings = Settings.EMPTY; - LoggingActionFactory parser = new LoggingActionFactory(settings, engine); + LoggingActionFactory parser = new LoggingActionFactory(engine); String text = randomAlphaOfLength(10); TextTemplate template = new TextTemplate(text); @@ -126,14 +124,13 @@ public class LoggingActionTests extends ESTestCase { } public void testParserSelfGenerated() throws Exception { - Settings settings = Settings.EMPTY; - LoggingActionFactory parser = new LoggingActionFactory(settings, engine); + LoggingActionFactory parser = new LoggingActionFactory(engine); String text = randomAlphaOfLength(10); TextTemplate template = new TextTemplate(text); String category = randomAlphaOfLength(10); LoggingAction action = new LoggingAction(template, level, category); - ExecutableLoggingAction executable = new ExecutableLoggingAction(action, logger, settings, engine); + ExecutableLoggingAction executable = new ExecutableLoggingAction(action, logger, engine); XContentBuilder builder = jsonBuilder(); executable.toXContent(builder, Attachment.XContent.EMPTY_PARAMS); @@ -146,8 +143,7 @@ public class LoggingActionTests extends ESTestCase { } public void testParserBuilder() throws Exception { - Settings settings = Settings.EMPTY; - LoggingActionFactory parser = new LoggingActionFactory(settings, engine); + LoggingActionFactory parser = new LoggingActionFactory(engine); String text = randomAlphaOfLength(10); TextTemplate template = new TextTemplate(text); @@ -172,8 +168,7 @@ public class LoggingActionTests extends ESTestCase { } public void testParserFailure() throws Exception { - Settings settings = Settings.EMPTY; - LoggingActionFactory parser = new LoggingActionFactory(settings, engine); + LoggingActionFactory parser = new LoggingActionFactory(engine); XContentBuilder builder = jsonBuilder() .startObject().endObject(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java index bb593bcb67a..d3f46d3d452 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java @@ -33,7 +33,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationField; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.watcher.actions.Action; import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; import org.elasticsearch.xpack.core.watcher.actions.ActionWrapper; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java index 7cf29632538..0f6fd33497b 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java @@ -24,7 +24,7 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; +import org.elasticsearch.search.aggregations.metrics.Percentiles; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.threadpool.ThreadPoolStats; import org.elasticsearch.xpack.core.watcher.WatcherState; @@ -94,7 +94,12 @@ public class WatcherScheduleEngineBenchmark { // First clean everything and index the watcher (but not via put alert api!) - try (Node node = new Node(Settings.builder().put(SETTINGS).put("node.data", false).build()).start()) { + try (Node node = new Node(Settings.builder().put(SETTINGS).put("node.data", false).build()) { + @Override + protected void registerDerivedNodeNameWithLogger(String nodeName) { + // Nothing to do because test uses the thread name + } + }.start()) { try (Client client = node.client()) { ClusterHealthResponse response = client.admin().cluster().prepareHealth().setWaitForNodes("2").get(); if (response.getNumberOfNodes() != 2 && response.getNumberOfDataNodes() != 1) { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronScheduleTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronScheduleTests.java index 1ade767410b..6cbdf6e1226 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronScheduleTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronScheduleTests.java @@ -11,11 +11,15 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; + import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.hasItemInArray; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; public class CronScheduleTests extends ScheduleTestCase { public void testInvalid() throws Exception { @@ -54,18 +58,25 @@ public class CronScheduleTests extends ScheduleTestCase { assertThat(crons, hasItemInArray("0 0/3 * * * ?")); } + public void testMultipleCronsNextScheduledAfter() { + CronSchedule schedule = new CronSchedule("0 5 9 1 1 ? 2019", "0 5 9 1 1 ? 2020", "0 5 9 1 1 ? 2017"); + ZonedDateTime start2019 = ZonedDateTime.of(2019, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + ZonedDateTime start2020 = ZonedDateTime.of(2020, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + long firstSchedule = schedule.nextScheduledTimeAfter(0, start2019.toInstant().toEpochMilli()); + long secondSchedule = schedule.nextScheduledTimeAfter(0, start2020.toInstant().toEpochMilli()); + + assertThat(firstSchedule, is(not(-1L))); + assertThat(secondSchedule, is(not(-1L))); + assertThat(firstSchedule, is(not(secondSchedule))); + } + public void testParseInvalidBadExpression() throws Exception { XContentBuilder builder = jsonBuilder().value("0 0/5 * * ?"); BytesReference bytes = BytesReference.bytes(builder); XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); parser.nextToken(); - try { - new CronSchedule.Parser().parse(parser); - fail("expected cron parsing to fail when using invalid cron expression"); - } catch (ElasticsearchParseException pe) { - // expected - assertThat(pe.getCause(), instanceOf(IllegalArgumentException.class)); - } + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> new CronSchedule.Parser().parse(parser)); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); } public void testParseInvalidEmpty() throws Exception { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java index 7949998867b..6680b38ab94 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java @@ -35,7 +35,9 @@ import java.util.function.Consumer; import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.daily; import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.weekly; +import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.startsWith; import static org.joda.time.DateTimeZone.UTC; import static org.mockito.Mockito.mock; @@ -50,8 +52,12 @@ public class TickerScheduleEngineTests extends ESTestCase { } private TriggerEngine createEngine() { - return new TickerScheduleTriggerEngine(Settings.EMPTY, - mock(ScheduleRegistry.class), clock); + Settings settings = Settings.EMPTY; + // having a low value here speeds up the tests tremendously, we still want to run with the defaults every now and then + if (usually()) { + settings = Settings.builder().put(TickerScheduleTriggerEngine.TICKER_INTERVAL_SETTING.getKey(), "10ms").build(); + } + return new TickerScheduleTriggerEngine(settings, mock(ScheduleRegistry.class), clock); } private void advanceClockIfNeeded(DateTime newCurrentDateTime) { @@ -104,6 +110,40 @@ public class TickerScheduleEngineTests extends ESTestCase { assertThat(bits.cardinality(), is(count)); } + public void testStartClearsExistingSchedules() throws Exception { + final CountDownLatch latch = new CountDownLatch(1); + List firedWatchIds = new ArrayList<>(); + engine.register(new Consumer>() { + @Override + public void accept(Iterable events) { + for (TriggerEvent event : events) { + firedWatchIds.add(event.jobName()); + } + latch.countDown(); + } + }); + + int count = randomIntBetween(2, 5); + List watches = new ArrayList<>(); + for (int i = 0; i < count; i++) { + watches.add(createWatch(String.valueOf(i), interval("1s"))); + } + engine.start(watches); + + watches.clear(); + for (int i = 0; i < count; i++) { + watches.add(createWatch("another_id" + i, interval("1s"))); + } + engine.start(watches); + + advanceClockIfNeeded(new DateTime(clock.millis(), UTC).plusMillis(1100)); + if (!latch.await(3 * count, TimeUnit.SECONDS)) { + fail("waiting too long for all watches to be triggered"); + } + + assertThat(firedWatchIds, everyItem(startsWith("another_id"))); + } + public void testAddHourly() throws Exception { final String name = "job_name"; final CountDownLatch latch = new CountDownLatch(1); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalToolTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalToolTests.java index 22388424948..f1e864d547c 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalToolTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalToolTests.java @@ -8,6 +8,13 @@ package org.elasticsearch.xpack.watcher.trigger.schedule.tool; import org.elasticsearch.cli.Command; import org.elasticsearch.cli.CommandTestCase; +import java.util.Calendar; +import java.util.Locale; +import java.util.TimeZone; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.not; + public class CronEvalToolTests extends CommandTestCase { @Override protected Command newCommand() { @@ -18,6 +25,27 @@ public class CronEvalToolTests extends CommandTestCase { String countOption = randomBoolean() ? "-c" : "--count"; int count = randomIntBetween(1, 100); String output = execute(countOption, Integer.toString(count), "0 0 0 1-6 * ?"); - assertTrue(output, output.contains("Here are the next " + count + " times this cron expression will trigger")); + assertThat(output, containsString("Here are the next " + count + " times this cron expression will trigger")); + } + + public void testGetNextValidTimes() throws Exception { + final int year = Calendar.getInstance(TimeZone.getTimeZone("UTC"), Locale.ROOT).get(Calendar.YEAR) + 1; + { + String output = execute("0 3 23 8 9 ? " + year); + assertThat(output, containsString("Here are the next 10 times this cron expression will trigger:")); + assertThat(output, not(containsString("ERROR"))); + assertThat(output, not(containsString("2.\t"))); + } + { + String output = execute("0 3 23 */4 9 ? " + year); + assertThat(output, containsString("Here are the next 10 times this cron expression will trigger:")); + assertThat(output, not(containsString("ERROR"))); + } + { + Exception expectThrows = expectThrows(Exception.class, () -> execute("0 3 23 */4 9 ? 2017")); + String message = expectThrows.getMessage(); + assertThat(message, containsString("Could not compute future times since")); + assertThat(message, containsString("(perhaps the cron expression only points to times in the past?)")); + } } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java index 8029b1c7a49..ae3066a3ee6 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java @@ -438,7 +438,7 @@ public class WatchTests extends ESTestCase { private WatchParser createWatchparser() throws Exception { LoggingAction loggingAction = new LoggingAction(new TextTemplate("foo"), null, null); List actions = Collections.singletonList(new ActionWrapper("_logging_", randomThrottler(), null, null, - new ExecutableLoggingAction(loggingAction, logger, settings, new MockTextTemplateEngine()))); + new ExecutableLoggingAction(loggingAction, logger, new MockTextTemplateEngine()))); ScheduleRegistry scheduleRegistry = registry(new IntervalSchedule(new IntervalSchedule.Interval(1, IntervalSchedule.Interval.Unit.SECONDS))); @@ -622,7 +622,7 @@ public class WatchTests extends ESTestCase { parsers.put(WebhookAction.TYPE, new WebhookActionFactory(settings, httpClient, templateEngine)); break; case LoggingAction.TYPE: - parsers.put(LoggingAction.TYPE, new LoggingActionFactory(settings, new MockTextTemplateEngine())); + parsers.put(LoggingAction.TYPE, new LoggingActionFactory(new MockTextTemplateEngine())); break; } } diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java index 3b9032f0921..1d3e51c11e0 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.protocol.xpack; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; @@ -412,8 +411,7 @@ public class XPackInfoResponse extends ActionResponse implements ToXContentObjec } public FeatureSet(StreamInput in) throws IOException { - this(in.readString(), in.readOptionalString(), in.readBoolean(), in.readBoolean(), - in.getVersion().onOrAfter(Version.V_5_4_0) ? in.readMap() : null); + this(in.readString(), in.readOptionalString(), in.readBoolean(), in.readBoolean(), in.readMap()); } @Override @@ -422,9 +420,7 @@ public class XPackInfoResponse extends ActionResponse implements ToXContentObjec out.writeOptionalString(description); out.writeBoolean(available); out.writeBoolean(enabled); - if (out.getVersion().onOrAfter(Version.V_5_4_0)) { - out.writeMap(nativeCodeInfo); - } + out.writeMap(nativeCodeInfo); } public String name() { diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java new file mode 100644 index 00000000000..455434f7ac4 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java @@ -0,0 +1,229 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import com.carrotsearch.hppc.ObjectIntHashMap; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContent.Params; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.graph.Vertex.VertexId; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * A Connection links exactly two {@link Vertex} objects. The basis of a + * connection is one or more documents have been found that contain + * this pair of terms and the strength of the connection is recorded + * as a weight. + */ +public class Connection { + private Vertex from; + private Vertex to; + private double weight; + private long docCount; + + public Connection(Vertex from, Vertex to, double weight, long docCount) { + this.from = from; + this.to = to; + this.weight = weight; + this.docCount = docCount; + } + + public Connection(StreamInput in, Map vertices) throws IOException { + from = vertices.get(new VertexId(in.readString(), in.readString())); + to = vertices.get(new VertexId(in.readString(), in.readString())); + weight = in.readDouble(); + docCount = in.readVLong(); + } + + Connection() { + } + + void writeTo(StreamOutput out) throws IOException { + out.writeString(from.getField()); + out.writeString(from.getTerm()); + out.writeString(to.getField()); + out.writeString(to.getTerm()); + out.writeDouble(weight); + out.writeVLong(docCount); + } + + public ConnectionId getId() { + return new ConnectionId(from.getId(), to.getId()); + } + + public Vertex getFrom() { + return from; + } + + public Vertex getTo() { + return to; + } + + /** + * @return a measure of the relative connectedness between a pair of {@link Vertex} objects + */ + public double getWeight() { + return weight; + } + + /** + * @return the number of documents in the sampled set that contained this + * pair of {@link Vertex} objects. + */ + public long getDocCount() { + return docCount; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + Connection other = (Connection) obj; + return docCount == other.docCount && + weight == other.weight && + Objects.equals(to, other.to) && + Objects.equals(from, other.from); + } + + @Override + public int hashCode() { + return Objects.hash(docCount, weight, from, to); + } + + + private static final ParseField SOURCE = new ParseField("source"); + private static final ParseField TARGET = new ParseField("target"); + private static final ParseField WEIGHT = new ParseField("weight"); + private static final ParseField DOC_COUNT = new ParseField("doc_count"); + + + void toXContent(XContentBuilder builder, Params params, ObjectIntHashMap vertexNumbers) throws IOException { + builder.field(SOURCE.getPreferredName(), vertexNumbers.get(from)); + builder.field(TARGET.getPreferredName(), vertexNumbers.get(to)); + builder.field(WEIGHT.getPreferredName(), weight); + builder.field(DOC_COUNT.getPreferredName(), docCount); + } + + //When deserializing from XContent we need to wait for all vertices to be loaded before + // Connection objects can be created that reference them. This class provides the interim + // state for connections. + static class UnresolvedConnection { + int fromIndex; + int toIndex; + double weight; + long docCount; + UnresolvedConnection(int fromIndex, int toIndex, double weight, long docCount) { + super(); + this.fromIndex = fromIndex; + this.toIndex = toIndex; + this.weight = weight; + this.docCount = docCount; + } + public Connection resolve(List vertices) { + return new Connection(vertices.get(fromIndex), vertices.get(toIndex), weight, docCount); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "ConnectionParser", true, + args -> { + int source = (Integer) args[0]; + int target = (Integer) args[1]; + double weight = (Double) args[2]; + long docCount = (Long) args[3]; + return new UnresolvedConnection(source, target, weight, docCount); + }); + + static { + PARSER.declareInt(constructorArg(), SOURCE); + PARSER.declareInt(constructorArg(), TARGET); + PARSER.declareDouble(constructorArg(), WEIGHT); + PARSER.declareLong(constructorArg(), DOC_COUNT); + } + static UnresolvedConnection fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } + } + + + /** + * An identifier (implements hashcode and equals) that represents a + * unique key for a {@link Connection} + */ + public static class ConnectionId { + private final VertexId source; + private final VertexId target; + + public ConnectionId(VertexId source, VertexId target) { + this.source = source; + this.target = target; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + + ConnectionId vertexId = (ConnectionId) o; + + if (source != null ? !source.equals(vertexId.source) : vertexId.source != null) + return false; + if (target != null ? !target.equals(vertexId.target) : vertexId.target != null) + return false; + + return true; + } + + @Override + public int hashCode() { + int result = source != null ? source.hashCode() : 0; + result = 31 * result + (target != null ? target.hashCode() : 0); + return result; + } + + public VertexId getSource() { + return source; + } + + public VertexId getTarget() { + return target; + } + + @Override + public String toString() { + return getSource() + "->" + getTarget(); + } + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java new file mode 100644 index 00000000000..495ea5fd28a --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java @@ -0,0 +1,401 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; + +/** + * Holds the criteria required to guide the exploration of connected terms which + * can be returned as a graph. + */ +public class GraphExploreRequest extends ActionRequest implements IndicesRequest.Replaceable, ToXContentObject { + + public static final String NO_HOPS_ERROR_MESSAGE = "Graph explore request must have at least one hop"; + public static final String NO_VERTICES_ERROR_MESSAGE = "Graph explore hop must have at least one VertexRequest"; + private String[] indices = Strings.EMPTY_ARRAY; + private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false); + private String[] types = Strings.EMPTY_ARRAY; + private String routing; + private TimeValue timeout; + + private int sampleSize = SamplerAggregationBuilder.DEFAULT_SHARD_SAMPLE_SIZE; + private String sampleDiversityField; + private int maxDocsPerDiversityValue; + private boolean useSignificance = true; + private boolean returnDetailedInfo; + + private List hops = new ArrayList<>(); + + public GraphExploreRequest() { + } + + /** + * Constructs a new graph request to run against the provided indices. No + * indices means it will run against all indices. + */ + public GraphExploreRequest(String... indices) { + this.indices = indices; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (hops.size() == 0) { + validationException = ValidateActions.addValidationError(NO_HOPS_ERROR_MESSAGE, validationException); + } + for (Hop hop : hops) { + validationException = hop.validate(validationException); + } + return validationException; + } + + @Override + public String[] indices() { + return this.indices; + } + + @Override + public GraphExploreRequest indices(String... indices) { + this.indices = indices; + return this; + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public GraphExploreRequest indicesOptions(IndicesOptions indicesOptions) { + if (indicesOptions == null) { + throw new IllegalArgumentException("IndicesOptions must not be null"); + } + this.indicesOptions = indicesOptions; + return this; + } + + public String[] types() { + return this.types; + } + + public GraphExploreRequest types(String... types) { + this.types = types; + return this; + } + + public String routing() { + return this.routing; + } + + public GraphExploreRequest routing(String routing) { + this.routing = routing; + return this; + } + + public GraphExploreRequest routing(String... routings) { + this.routing = Strings.arrayToCommaDelimitedString(routings); + return this; + } + + public TimeValue timeout() { + return timeout; + } + + /** + * Graph exploration can be set to timeout after the given period. Search + * operations involved in each hop are limited to the remaining time + * available but can still overrun due to the nature of their "best efforts" + * timeout support. When a timeout occurs partial results are returned. + * + * @param timeout + * a {@link TimeValue} object which determines the maximum length + * of time to spend exploring + */ + public GraphExploreRequest timeout(TimeValue timeout) { + if (timeout == null) { + throw new IllegalArgumentException("timeout must not be null"); + } + this.timeout = timeout; + return this; + } + + public GraphExploreRequest timeout(String timeout) { + timeout(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout")); + return this; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + types = in.readStringArray(); + routing = in.readOptionalString(); + timeout = in.readOptionalTimeValue(); + sampleSize = in.readInt(); + sampleDiversityField = in.readOptionalString(); + maxDocsPerDiversityValue = in.readInt(); + + useSignificance = in.readBoolean(); + returnDetailedInfo = in.readBoolean(); + + int numHops = in.readInt(); + Hop parentHop = null; + for (int i = 0; i < numHops; i++) { + Hop hop = new Hop(parentHop); + hop.readFrom(in); + hops.add(hop); + parentHop = hop; + } + + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + out.writeStringArray(types); + out.writeOptionalString(routing); + out.writeOptionalTimeValue(timeout); + + out.writeInt(sampleSize); + out.writeOptionalString(sampleDiversityField); + out.writeInt(maxDocsPerDiversityValue); + + out.writeBoolean(useSignificance); + out.writeBoolean(returnDetailedInfo); + out.writeInt(hops.size()); + for (Iterator iterator = hops.iterator(); iterator.hasNext();) { + Hop hop = iterator.next(); + hop.writeTo(out); + } + } + + @Override + public String toString() { + return "graph explore [" + Arrays.toString(indices) + "][" + Arrays.toString(types) + "]"; + } + + /** + * The number of top-matching documents that are considered during each hop + * (default is {@link SamplerAggregationBuilder#DEFAULT_SHARD_SAMPLE_SIZE} + * Very small values (less than 50) may not provide sufficient + * weight-of-evidence to identify significant connections between terms. + *

    + * Very large values (many thousands) are not recommended with loosely + * defined queries (fuzzy queries or those with many OR clauses). This is + * because any useful signals in the best documents are diluted with + * irrelevant noise from low-quality matches. Performance is also typically + * better with smaller samples as there are less look-ups required for + * background frequencies of terms found in the documents + *

    + * + * @param maxNumberOfDocsPerHop + * shard-level sample size in documents + */ + public void sampleSize(int maxNumberOfDocsPerHop) { + sampleSize = maxNumberOfDocsPerHop; + } + + public int sampleSize() { + return sampleSize; + } + + /** + * Optional choice of single-value field on which to diversify sampled + * search results + */ + public void sampleDiversityField(String name) { + sampleDiversityField = name; + } + + public String sampleDiversityField() { + return sampleDiversityField; + } + + /** + * Optional number of permitted docs with same value in sampled search + * results. Must also declare which field using sampleDiversityField + */ + public void maxDocsPerDiversityValue(int maxDocs) { + this.maxDocsPerDiversityValue = maxDocs; + } + + public int maxDocsPerDiversityValue() { + return maxDocsPerDiversityValue; + } + + /** + * Controls the choice of algorithm used to select interesting terms. The + * default value is true which means terms are selected based on + * significance (see the {@link SignificantTerms} aggregation) rather than + * popularity (using the {@link TermsAggregator}). + * + * @param value + * true if the significant_terms algorithm should be used. + */ + public void useSignificance(boolean value) { + this.useSignificance = value; + } + + public boolean useSignificance() { + return useSignificance; + } + + /** + * Return detailed information about vertex frequencies as part of JSON + * results - defaults to false + * + * @param value + * true if detailed information is required in JSON responses + */ + public void returnDetailedInfo(boolean value) { + this.returnDetailedInfo = value; + } + + public boolean returnDetailedInfo() { + return returnDetailedInfo; + } + + /** + * Add a stage in the graph exploration. Each hop represents a stage of + * querying elasticsearch to identify terms which can then be connnected to + * other terms in a subsequent hop. + * + * @param guidingQuery + * optional choice of query which influences which documents are + * considered in this stage + * @return a {@link Hop} object that holds settings for a stage in the graph + * exploration + */ + public Hop createNextHop(QueryBuilder guidingQuery) { + Hop parent = null; + if (hops.size() > 0) { + parent = hops.get(hops.size() - 1); + } + Hop newHop = new Hop(parent); + newHop.guidingQuery = guidingQuery; + hops.add(newHop); + return newHop; + } + + public int getHopNumbers() { + return hops.size(); + } + + public Hop getHop(int hopNumber) { + return hops.get(hopNumber); + } + + public static class TermBoost { + String term; + float boost; + + public TermBoost(String term, float boost) { + super(); + this.term = term; + if (boost <= 0) { + throw new IllegalArgumentException("Boosts must be a positive non-zero number"); + } + this.boost = boost; + } + + TermBoost() { + } + + public String getTerm() { + return term; + } + + public float getBoost() { + return boost; + } + + void readFrom(StreamInput in) throws IOException { + this.term = in.readString(); + this.boost = in.readFloat(); + } + + void writeTo(StreamOutput out) throws IOException { + out.writeString(term); + out.writeFloat(boost); + } + + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + builder.startObject("controls"); + { + if (sampleSize != SamplerAggregationBuilder.DEFAULT_SHARD_SAMPLE_SIZE) { + builder.field("sample_size", sampleSize); + } + if (sampleDiversityField != null) { + builder.startObject("sample_diversity"); + builder.field("field", sampleDiversityField); + builder.field("max_docs_per_value", maxDocsPerDiversityValue); + builder.endObject(); + } + builder.field("use_significance", useSignificance); + if (returnDetailedInfo) { + builder.field("return_detailed_stats", returnDetailedInfo); + } + } + builder.endObject(); + + for (Hop hop : hops) { + if (hop.parentHop != null) { + builder.startObject("connections"); + } + hop.toXContent(builder, params); + } + for (Hop hop : hops) { + if (hop.parentHop != null) { + builder.endObject(); + } + } + builder.endObject(); + + return builder; + } + +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java new file mode 100644 index 00000000000..baaaedf0163 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java @@ -0,0 +1,261 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import com.carrotsearch.hppc.ObjectIntHashMap; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.graph.Connection.ConnectionId; +import org.elasticsearch.protocol.xpack.graph.Connection.UnresolvedConnection; +import org.elasticsearch.protocol.xpack.graph.Vertex.VertexId; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Graph explore response holds a graph of {@link Vertex} and {@link Connection} objects + * (nodes and edges in common graph parlance). + * + * @see GraphExploreRequest + */ +public class GraphExploreResponse extends ActionResponse implements ToXContentObject { + + private long tookInMillis; + private boolean timedOut = false; + private ShardOperationFailedException[] shardFailures = ShardSearchFailure.EMPTY_ARRAY; + private Map vertices; + private Map connections; + private boolean returnDetailedInfo; + static final String RETURN_DETAILED_INFO_PARAM = "returnDetailedInfo"; + + public GraphExploreResponse() { + } + + public GraphExploreResponse(long tookInMillis, boolean timedOut, ShardOperationFailedException[] shardFailures, + Map vertices, Map connections, boolean returnDetailedInfo) { + this.tookInMillis = tookInMillis; + this.timedOut = timedOut; + this.shardFailures = shardFailures; + this.vertices = vertices; + this.connections = connections; + this.returnDetailedInfo = returnDetailedInfo; + } + + + public TimeValue getTook() { + return new TimeValue(tookInMillis); + } + + public long getTookInMillis() { + return tookInMillis; + } + + /** + * @return true if the time stated in {@link GraphExploreRequest#timeout(TimeValue)} was exceeded + * (not all hops may have been completed in this case) + */ + public boolean isTimedOut() { + return this.timedOut; + } + public ShardOperationFailedException[] getShardFailures() { + return shardFailures; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + tookInMillis = in.readVLong(); + timedOut = in.readBoolean(); + + int size = in.readVInt(); + if (size == 0) { + shardFailures = ShardSearchFailure.EMPTY_ARRAY; + } else { + shardFailures = new ShardSearchFailure[size]; + for (int i = 0; i < shardFailures.length; i++) { + shardFailures[i] = readShardSearchFailure(in); + } + } + // read vertices + size = in.readVInt(); + vertices = new HashMap<>(); + for (int i = 0; i < size; i++) { + Vertex n = Vertex.readFrom(in); + vertices.put(n.getId(), n); + } + + size = in.readVInt(); + + connections = new HashMap<>(); + for (int i = 0; i < size; i++) { + Connection e = new Connection(in, vertices); + connections.put(e.getId(), e); + } + + returnDetailedInfo = in.readBoolean(); + + } + + public Collection getConnections() { + return connections.values(); + } + + public Collection getVertices() { + return vertices.values(); + } + + public Vertex getVertex(VertexId id) { + return vertices.get(id); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVLong(tookInMillis); + out.writeBoolean(timedOut); + + out.writeVInt(shardFailures.length); + for (ShardOperationFailedException shardSearchFailure : shardFailures) { + shardSearchFailure.writeTo(out); + } + + out.writeVInt(vertices.size()); + for (Vertex vertex : vertices.values()) { + vertex.writeTo(out); + } + + out.writeVInt(connections.size()); + for (Connection connection : connections.values()) { + connection.writeTo(out); + } + + out.writeBoolean(returnDetailedInfo); + + } + + private static final ParseField TOOK = new ParseField("took"); + private static final ParseField TIMED_OUT = new ParseField("timed_out"); + private static final ParseField VERTICES = new ParseField("vertices"); + private static final ParseField CONNECTIONS = new ParseField("connections"); + private static final ParseField FAILURES = new ParseField("failures"); + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(TOOK.getPreferredName(), tookInMillis); + builder.field(TIMED_OUT.getPreferredName(), timedOut); + + builder.startArray(FAILURES.getPreferredName()); + if (shardFailures != null) { + for (ShardOperationFailedException shardFailure : shardFailures) { + builder.startObject(); + shardFailure.toXContent(builder, params); + builder.endObject(); + } + } + builder.endArray(); + + ObjectIntHashMap vertexNumbers = new ObjectIntHashMap<>(vertices.size()); + + Map extraParams = new HashMap<>(); + extraParams.put(RETURN_DETAILED_INFO_PARAM, Boolean.toString(returnDetailedInfo)); + Params extendedParams = new DelegatingMapParams(extraParams, params); + + builder.startArray(VERTICES.getPreferredName()); + for (Vertex vertex : vertices.values()) { + builder.startObject(); + vertexNumbers.put(vertex, vertexNumbers.size()); + vertex.toXContent(builder, extendedParams); + builder.endObject(); + } + builder.endArray(); + + builder.startArray(CONNECTIONS.getPreferredName()); + for (Connection connection : connections.values()) { + builder.startObject(); + connection.toXContent(builder, extendedParams, vertexNumbers); + builder.endObject(); + } + builder.endArray(); + builder.endObject(); + return builder; + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "GraphExploreResponsenParser", true, + args -> { + GraphExploreResponse result = new GraphExploreResponse(); + result.vertices = new HashMap<>(); + result.connections = new HashMap<>(); + + result.tookInMillis = (Long) args[0]; + result.timedOut = (Boolean) args[1]; + + @SuppressWarnings("unchecked") + List vertices = (List) args[2]; + @SuppressWarnings("unchecked") + List unresolvedConnections = (List) args[3]; + @SuppressWarnings("unchecked") + List failures = (List) args[4]; + for (Vertex vertex : vertices) { + // reverse-engineer if detailed stats were requested - + // mainly here for testing framework's equality tests + result.returnDetailedInfo = result.returnDetailedInfo || vertex.getFg() > 0; + result.vertices.put(vertex.getId(), vertex); + } + for (UnresolvedConnection unresolvedConnection : unresolvedConnections) { + Connection resolvedConnection = unresolvedConnection.resolve(vertices); + result.connections.put(resolvedConnection.getId(), resolvedConnection); + } + if (failures.size() > 0) { + result.shardFailures = failures.toArray(new ShardSearchFailure[failures.size()]); + } + return result; + }); + + static { + PARSER.declareLong(constructorArg(), TOOK); + PARSER.declareBoolean(constructorArg(), TIMED_OUT); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> Vertex.fromXContent(p), VERTICES); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> UnresolvedConnection.fromXContent(p), CONNECTIONS); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ShardSearchFailure.fromXContent(p), FAILURES); + } + + public static GraphExploreResponse fromXContext(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } + +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java new file mode 100644 index 00000000000..70ec61067f5 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java @@ -0,0 +1,173 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * A Hop represents one of potentially many stages in a graph exploration. + * Each Hop identifies one or more fields in which it will attempt to find + * terms that are significantly connected to the previous Hop. Each field is identified + * using a {@link VertexRequest} + * + *

    An example series of Hops on webserver logs would be: + *

      + *
    1. an initial Hop to find + * the top ten IPAddresses trying to access urls containing the word "admin"
    2. + *
    3. a secondary Hop to see which other URLs those IPAddresses were trying to access
    4. + *
    + * + *

    + * Optionally, each hop can contain a "guiding query" that further limits the set of documents considered. + * In our weblog example above we might choose to constrain the second hop to only look at log records that + * had a reponse code of 404. + *

    + *

    + * If absent, the list of {@link VertexRequest}s is inherited from the prior Hop's list to avoid repeating + * the fields that will be examined at each stage. + *

    + * + */ +public class Hop implements ToXContentFragment{ + final Hop parentHop; + List vertices = null; + QueryBuilder guidingQuery = null; + + public Hop(Hop parent) { + this.parentHop = parent; + } + + public ActionRequestValidationException validate(ActionRequestValidationException validationException) { + + if (getEffectiveVertexRequests().size() == 0) { + validationException = ValidateActions.addValidationError(GraphExploreRequest.NO_VERTICES_ERROR_MESSAGE, validationException); + } + return validationException; + + } + + public Hop getParentHop() { + return parentHop; + } + + void writeTo(StreamOutput out) throws IOException { + out.writeOptionalNamedWriteable(guidingQuery); + if (vertices == null) { + out.writeVInt(0); + } else { + out.writeVInt(vertices.size()); + for (VertexRequest vr : vertices) { + vr.writeTo(out); + } + } + } + + void readFrom(StreamInput in) throws IOException { + guidingQuery = in.readOptionalNamedWriteable(QueryBuilder.class); + int size = in.readVInt(); + if (size > 0) { + vertices = new ArrayList<>(); + for (int i = 0; i < size; i++) { + VertexRequest vr = new VertexRequest(); + vr.readFrom(in); + vertices.add(vr); + } + } + } + + public QueryBuilder guidingQuery() { + if (guidingQuery != null) { + return guidingQuery; + } + return QueryBuilders.matchAllQuery(); + } + + /** + * Add a field in which this {@link Hop} will look for terms that are highly linked to + * previous hops and optionally the guiding query. + * + * @param fieldName a field in the chosen index + */ + public VertexRequest addVertexRequest(String fieldName) { + if (vertices == null) { + vertices = new ArrayList<>(); + } + VertexRequest vr = new VertexRequest(); + vr.fieldName(fieldName); + vertices.add(vr); + return vr; + } + + /** + * An optional parameter that focuses the exploration on documents that + * match the given query. + * + * @param queryBuilder any query + */ + public void guidingQuery(QueryBuilder queryBuilder) { + guidingQuery = queryBuilder; + } + + protected List getEffectiveVertexRequests() { + if (vertices != null) { + return vertices; + } + if (parentHop == null) { + return Collections.emptyList(); + } + // otherwise inherit settings from parent + return parentHop.getEffectiveVertexRequests(); + } + + public int getNumberVertexRequests() { + return getEffectiveVertexRequests().size(); + } + + public VertexRequest getVertexRequest(int requestNumber) { + return getEffectiveVertexRequests().get(requestNumber); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (guidingQuery != null) { + builder.field("query"); + guidingQuery.toXContent(builder, params); + } + if(vertices != null && vertices.size()>0) { + builder.startArray("vertices"); + for (VertexRequest vertexRequest : vertices) { + vertexRequest.toXContent(builder, params); + } + builder.endArray(); + } + return builder; + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java new file mode 100644 index 00000000000..cfc26f44fac --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java @@ -0,0 +1,268 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * A vertex in a graph response represents a single term (a field and value pair) + * which appears in one or more documents found as part of the graph exploration. + * + * A vertex term could be a bank account number, an email address, a hashtag or any + * other term that appears in documents and is interesting to represent in a network. + */ +public class Vertex implements ToXContentFragment { + + private final String field; + private final String term; + private double weight; + private final int depth; + private final long bg; + private long fg; + private static final ParseField FIELD = new ParseField("field"); + private static final ParseField TERM = new ParseField("term"); + private static final ParseField WEIGHT = new ParseField("weight"); + private static final ParseField DEPTH = new ParseField("depth"); + private static final ParseField FG = new ParseField("fg"); + private static final ParseField BG = new ParseField("bg"); + + + public Vertex(String field, String term, double weight, int depth, long bg, long fg) { + super(); + this.field = field; + this.term = term; + this.weight = weight; + this.depth = depth; + this.bg = bg; + this.fg = fg; + } + + static Vertex readFrom(StreamInput in) throws IOException { + return new Vertex(in.readString(), in.readString(), in.readDouble(), in.readVInt(), in.readVLong(), in.readVLong()); + } + + void writeTo(StreamOutput out) throws IOException { + out.writeString(field); + out.writeString(term); + out.writeDouble(weight); + out.writeVInt(depth); + out.writeVLong(bg); + out.writeVLong(fg); + } + + @Override + public int hashCode() { + return Objects.hash(field, term, weight, depth, bg, fg); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + Vertex other = (Vertex) obj; + return depth == other.depth && + weight == other.weight && + bg == other.bg && + fg == other.fg && + Objects.equals(field, other.field) && + Objects.equals(term, other.term); + + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + boolean returnDetailedInfo = params.paramAsBoolean(GraphExploreResponse.RETURN_DETAILED_INFO_PARAM, false); + builder.field(FIELD.getPreferredName(), field); + builder.field(TERM.getPreferredName(), term); + builder.field(WEIGHT.getPreferredName(), weight); + builder.field(DEPTH.getPreferredName(), depth); + if (returnDetailedInfo) { + builder.field(FG.getPreferredName(), fg); + builder.field(BG.getPreferredName(), bg); + } + return builder; + } + + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "VertexParser", true, + args -> { + String field = (String) args[0]; + String term = (String) args[1]; + double weight = (Double) args[2]; + int depth = (Integer) args[3]; + Long optionalBg = (Long) args[4]; + Long optionalFg = (Long) args[5]; + long bg = optionalBg == null ? 0 : optionalBg; + long fg = optionalFg == null ? 0 : optionalFg; + return new Vertex(field, term, weight, depth, bg, fg); + }); + + static { + PARSER.declareString(constructorArg(), FIELD); + PARSER.declareString(constructorArg(), TERM); + PARSER.declareDouble(constructorArg(), WEIGHT); + PARSER.declareInt(constructorArg(), DEPTH); + PARSER.declareLong(optionalConstructorArg(), BG); + PARSER.declareLong(optionalConstructorArg(), FG); + } + + static Vertex fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } + + + /** + * @return a {@link VertexId} object that uniquely identifies this Vertex + */ + public VertexId getId() { + return createId(field, term); + } + + /** + * A convenience method for creating a {@link VertexId} + * @param field the field + * @param term the term + * @return a {@link VertexId} that can be used for looking up vertices + */ + public static VertexId createId(String field, String term) { + return new VertexId(field,term); + } + + @Override + public String toString() { + return getId().toString(); + } + + public String getField() { + return field; + } + + public String getTerm() { + return term; + } + + /** + * The weight of a vertex is an accumulation of all of the {@link Connection}s + * that are linked to this {@link Vertex} as part of a graph exploration. + * It is used internally to identify the most interesting vertices to be returned. + * @return a measure of the {@link Vertex}'s relative importance. + */ + public double getWeight() { + return weight; + } + + public void setWeight(final double weight) { + this.weight = weight; + } + + /** + * If the {@link GraphExploreRequest#useSignificance(boolean)} is true (the default) + * this statistic is available. + * @return the number of documents in the index that contain this term (see bg_count in + * + * the significant_terms aggregation) + */ + public long getBg() { + return bg; + } + + /** + * If the {@link GraphExploreRequest#useSignificance(boolean)} is true (the default) + * this statistic is available. + * Together with {@link #getBg()} these numbers are used to derive the significance of a term. + * @return the number of documents in the sample of best matching documents that contain this term (see fg_count in + * + * the significant_terms aggregation) + */ + public long getFg() { + return fg; + } + + public void setFg(final long fg) { + this.fg = fg; + } + + /** + * @return the sequence number in the series of hops where this Vertex term was first encountered + */ + public int getHopDepth() { + return depth; + } + + /** + * An identifier (implements hashcode and equals) that represents a + * unique key for a {@link Vertex} + */ + public static class VertexId { + private final String field; + private final String term; + + public VertexId(String field, String term) { + this.field = field; + this.term = term; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + + VertexId vertexId = (VertexId) o; + + if (field != null ? !field.equals(vertexId.field) : vertexId.field != null) + return false; + if (term != null ? !term.equals(vertexId.term) : vertexId.term != null) + return false; + + return true; + } + + @Override + public int hashCode() { + int result = field != null ? field.hashCode() : 0; + result = 31 * result + (term != null ? term.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return field + ":" + term; + } + } + +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/VertexRequest.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/VertexRequest.java new file mode 100644 index 00000000000..116497fe230 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/VertexRequest.java @@ -0,0 +1,248 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest.TermBoost; + +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +/** + * A request to identify terms from a choice of field as part of a {@link Hop}. + * Optionally, a set of terms can be provided that are used as an exclusion or + * inclusion list to filter which terms are considered. + * + */ +public class VertexRequest implements ToXContentObject { + private String fieldName; + private int size = DEFAULT_SIZE; + public static final int DEFAULT_SIZE = 5; + private Map includes; + private Set excludes; + public static final int DEFAULT_MIN_DOC_COUNT = 3; + private int minDocCount = DEFAULT_MIN_DOC_COUNT; + public static final int DEFAULT_SHARD_MIN_DOC_COUNT = 2; + private int shardMinDocCount = DEFAULT_SHARD_MIN_DOC_COUNT; + + + public VertexRequest() { + + } + + void readFrom(StreamInput in) throws IOException { + fieldName = in.readString(); + size = in.readVInt(); + minDocCount = in.readVInt(); + shardMinDocCount = in.readVInt(); + + int numIncludes = in.readVInt(); + if (numIncludes > 0) { + includes = new HashMap<>(); + for (int i = 0; i < numIncludes; i++) { + TermBoost tb = new TermBoost(); + tb.readFrom(in); + includes.put(tb.term, tb); + } + } + + int numExcludes = in.readVInt(); + if (numExcludes > 0) { + excludes = new HashSet<>(); + for (int i = 0; i < numExcludes; i++) { + excludes.add(in.readString()); + } + } + + } + + void writeTo(StreamOutput out) throws IOException { + out.writeString(fieldName); + out.writeVInt(size); + out.writeVInt(minDocCount); + out.writeVInt(shardMinDocCount); + + if (includes != null) { + out.writeVInt(includes.size()); + for (TermBoost tb : includes.values()) { + tb.writeTo(out); + } + } else { + out.writeVInt(0); + } + + if (excludes != null) { + out.writeVInt(excludes.size()); + for (String term : excludes) { + out.writeString(term); + } + } else { + out.writeVInt(0); + } + } + + public String fieldName() { + return fieldName; + } + + public VertexRequest fieldName(String fieldName) { + this.fieldName = fieldName; + return this; + } + + public int size() { + return size; + } + + /** + * @param size The maximum number of terms that should be returned from this field as part of this {@link Hop} + */ + public VertexRequest size(int size) { + this.size = size; + return this; + } + + public boolean hasIncludeClauses() { + return includes != null && includes.size() > 0; + } + + public boolean hasExcludeClauses() { + return excludes != null && excludes.size() > 0; + } + + /** + * Adds a term that should be excluded from results + * @param term A term to be excluded + */ + public void addExclude(String term) { + if (includes != null) { + throw new IllegalArgumentException("Cannot have both include and exclude clauses"); + } + if (excludes == null) { + excludes = new HashSet<>(); + } + excludes.add(term); + } + + /** + * Adds a term to the set of allowed values - the boost defines the relative + * importance when pursuing connections in subsequent {@link Hop}s. The boost value + * appears as part of the query. + * @param term a required term + * @param boost an optional boost + */ + public void addInclude(String term, float boost) { + if (excludes != null) { + throw new IllegalArgumentException("Cannot have both include and exclude clauses"); + } + if (includes == null) { + includes = new HashMap<>(); + } + includes.put(term, new TermBoost(term, boost)); + } + + public TermBoost[] includeValues() { + return includes.values().toArray(new TermBoost[includes.size()]); + } + + public String[] includeValuesAsStringArray() { + String[] result = new String[includes.size()]; + int i = 0; + for (TermBoost tb : includes.values()) { + result[i++] = tb.term; + } + return result; + } + + public String[] excludesAsArray() { + return excludes.toArray(new String[excludes.size()]); + } + + public int minDocCount() { + return minDocCount; + } + + /** + * A "certainty" threshold which defines the weight-of-evidence required before + * a term found in this field is identified as a useful connection + * + * @param value The minimum number of documents that contain this term found in the samples used across all shards + */ + public VertexRequest minDocCount(int value) { + minDocCount = value; + return this; + } + + + public int shardMinDocCount() { + return Math.min(shardMinDocCount, minDocCount); + } + + /** + * A "certainty" threshold which defines the weight-of-evidence required before + * a term found in this field is identified as a useful connection + * + * @param value The minimum number of documents that contain this term found in the samples used across all shards + */ + public VertexRequest shardMinDocCount(int value) { + shardMinDocCount = value; + return this; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("field", fieldName); + if (size != DEFAULT_SIZE) { + builder.field("size", size); + } + if (minDocCount != DEFAULT_MIN_DOC_COUNT) { + builder.field("min_doc_count", minDocCount); + } + if (shardMinDocCount != DEFAULT_SHARD_MIN_DOC_COUNT) { + builder.field("shard_min_doc_count", shardMinDocCount); + } + if(includes!=null) { + builder.startArray("include"); + for (TermBoost tb : includes.values()) { + builder.startObject(); + builder.field("term", tb.term); + builder.field("boost", tb.boost); + builder.endObject(); + } + builder.endArray(); + } + if(excludes!=null) { + builder.startArray("exclude"); + for (String value : excludes) { + builder.value(value); + } + builder.endArray(); + } + builder.endObject(); + return builder; + } + +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/security/package-info.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/package-info.java similarity index 86% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/security/package-info.java rename to x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/package-info.java index 216990d9f0e..f4f666074a1 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/security/package-info.java +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/package-info.java @@ -18,7 +18,7 @@ */ /** - * Request and Response objects for the default distribution's Security + * Request and Response objects for the default distribution's Graph * APIs. */ -package org.elasticsearch.protocol.xpack.security; +package org.elasticsearch.protocol.xpack.graph; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java new file mode 100644 index 00000000000..9c4a76fdcec --- /dev/null +++ b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java @@ -0,0 +1,136 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Predicate; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; + +public class GraphExploreResponseTests extends AbstractXContentTestCase< GraphExploreResponse> { + + @Override + protected GraphExploreResponse createTestInstance() { + return createInstance(0); + } + private static GraphExploreResponse createInstance(int numFailures) { + int numItems = randomIntBetween(4, 128); + boolean timedOut = randomBoolean(); + boolean showDetails = randomBoolean(); + long overallTookInMillis = randomNonNegativeLong(); + Map vertices = new HashMap<>(); + Map connections = new HashMap<>(); + ShardOperationFailedException [] failures = new ShardOperationFailedException [numFailures]; + for (int i = 0; i < failures.length; i++) { + failures[i] = new ShardSearchFailure(new ElasticsearchException("an error")); + } + + //Create random set of vertices + for (int i = 0; i < numItems; i++) { + Vertex v = new Vertex("field1", randomAlphaOfLength(5), randomDouble(), 0, + showDetails?randomIntBetween(100, 200):0, + showDetails?randomIntBetween(1, 100):0); + vertices.put(v.getId(), v); + } + + //Wire up half the vertices randomly + Vertex[] vs = vertices.values().toArray(new Vertex[vertices.size()]); + for (int i = 0; i < numItems/2; i++) { + Vertex v1 = vs[randomIntBetween(0, vs.length-1)]; + Vertex v2 = vs[randomIntBetween(0, vs.length-1)]; + if(v1 != v2) { + Connection conn = new Connection(v1, v2, randomDouble(), randomLongBetween(1, 10)); + connections.put(conn.getId(), conn); + } + } + return new GraphExploreResponse(overallTookInMillis, timedOut, failures, vertices, connections, showDetails); + } + + + private static GraphExploreResponse createTestInstanceWithFailures() { + return createInstance(randomIntBetween(1, 128)); + } + + @Override + protected GraphExploreResponse doParseInstance(XContentParser parser) throws IOException { + return GraphExploreResponse.fromXContext(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected String[] getShuffleFieldsExceptions() { + return new String[]{"vertices", "connections"}; + } + + protected Predicate getRandomFieldsExcludeFilterWhenResultHasErrors() { + return field -> field.startsWith("responses"); + } + + @Override + protected void assertEqualInstances( GraphExploreResponse expectedInstance, GraphExploreResponse newInstance) { + assertThat(newInstance.getTook(), equalTo(expectedInstance.getTook())); + assertThat(newInstance.isTimedOut(), equalTo(expectedInstance.isTimedOut())); + + Connection[] newConns = newInstance.getConnections().toArray(new Connection[0]); + Connection[] expectedConns = expectedInstance.getConnections().toArray(new Connection[0]); + assertArrayEquals(expectedConns, newConns); + + Vertex[] newVertices = newInstance.getVertices().toArray(new Vertex[0]); + Vertex[] expectedVertices = expectedInstance.getVertices().toArray(new Vertex[0]); + assertArrayEquals(expectedVertices, newVertices); + + ShardOperationFailedException[] newFailures = newInstance.getShardFailures(); + ShardOperationFailedException[] expectedFailures = expectedInstance.getShardFailures(); + assertEquals(expectedFailures.length, newFailures.length); + + } + + /** + * Test parsing {@link GraphExploreResponse} with inner failures as they don't support asserting on xcontent equivalence, given + * exceptions are not parsed back as the same original class. We run the usual {@link AbstractXContentTestCase#testFromXContent()} + * without failures, and this other test with failures where we disable asserting on xcontent equivalence at the end. + */ + public void testFromXContentWithFailures() throws IOException { + Supplier< GraphExploreResponse> instanceSupplier = GraphExploreResponseTests::createTestInstanceWithFailures; + //with random fields insertion in the inner exceptions, some random stuff may be parsed back as metadata, + //but that does not bother our assertions, as we only want to test that we don't break. + boolean supportsUnknownFields = true; + //exceptions are not of the same type whenever parsed back + boolean assertToXContentEquivalence = false; + AbstractXContentTestCase.testFromXContent( + NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields, getShuffleFieldsExceptions(), + getRandomFieldsExcludeFilterWhenResultHasErrors(), this::createParser, this::doParseInstance, + this::assertEqualInstances, assertToXContentEquivalence, ToXContent.EMPTY_PARAMS); + } + +} diff --git a/x-pack/qa/audit-tests/src/test/java/org/elasticsearch/xpack/security/audit/IndexAuditIT.java b/x-pack/qa/audit-tests/src/test/java/org/elasticsearch/xpack/security/audit/IndexAuditIT.java index c0111e57c74..d1ee4f2d9e1 100644 --- a/x-pack/qa/audit-tests/src/test/java/org/elasticsearch/xpack/security/audit/IndexAuditIT.java +++ b/x-pack/qa/audit-tests/src/test/java/org/elasticsearch/xpack/security/audit/IndexAuditIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.security.audit; import com.carrotsearch.hppc.cursors.ObjectCursor; -import org.apache.http.message.BasicHeader; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -111,10 +110,12 @@ public class IndexAuditIT extends ESIntegTestCase { } public void testIndexAuditTrailWorking() throws Exception { - Response response = getRestClient().performRequest("GET", "/", - new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, - UsernamePasswordToken.basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray())))); - assertThat(response.getStatusLine().getStatusCode(), is(200)); + Request request = new Request("GET", "/"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + UsernamePasswordToken.basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray()))); + request.setOptions(options); + Response response = getRestClient().performRequest(request); final AtomicReference lastClusterState = new AtomicReference<>(); final boolean found = awaitSecurityAuditIndex(lastClusterState, QueryBuilders.matchQuery("principal", USER)); diff --git a/x-pack/qa/build.gradle b/x-pack/qa/build.gradle index 24b6618b7d8..ae77c8f8935 100644 --- a/x-pack/qa/build.gradle +++ b/x-pack/qa/build.gradle @@ -25,12 +25,11 @@ gradle.projectsEvaluated { subprojects { Task assemble = project.tasks.findByName('assemble') if (assemble) { - project.tasks.remove(assemble) - project.build.dependsOn.remove('assemble') + assemble.enabled = false } Task dependenciesInfo = project.tasks.findByName('dependenciesInfo') if (dependenciesInfo) { - project.precommit.dependsOn.remove('dependenciesInfo') + dependenciesInfo.enabled = false } } } diff --git a/x-pack/qa/evil-tests/build.gradle b/x-pack/qa/evil-tests/build.gradle new file mode 100644 index 00000000000..03f2a569873 --- /dev/null +++ b/x-pack/qa/evil-tests/build.gradle @@ -0,0 +1,9 @@ +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" +} + +test { + systemProperty 'tests.security.manager', 'false' +} diff --git a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/core/scheduler/EvilSchedulerEngineTests.java b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/core/scheduler/EvilSchedulerEngineTests.java new file mode 100644 index 00000000000..2dfd314ffb0 --- /dev/null +++ b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/core/scheduler/EvilSchedulerEngineTests.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.scheduler; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import java.time.Clock; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.not; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verifyNoMoreInteractions; + +public class EvilSchedulerEngineTests extends ESTestCase { + + public void testOutOfMemoryErrorWhileTriggeredIsRethrownAndIsUncaught() throws InterruptedException { + final AtomicReference maybeFatal = new AtomicReference<>(); + final CountDownLatch uncaughtLatuch = new CountDownLatch(1); + final Thread.UncaughtExceptionHandler uncaughtExceptionHandler = Thread.getDefaultUncaughtExceptionHandler(); + try { + /* + * We want to test that the out of memory error thrown from the scheduler engine goes uncaught on another thread; this gives us + * confidence that an error thrown during a triggered event will lead to the node being torn down. + */ + final AtomicReference maybeThread = new AtomicReference<>(); + Thread.setDefaultUncaughtExceptionHandler((t, e) -> { + maybeFatal.set(e); + maybeThread.set(Thread.currentThread()); + uncaughtLatuch.countDown(); + }); + final Logger mockLogger = mock(Logger.class); + final SchedulerEngine engine = new SchedulerEngine(Settings.EMPTY, Clock.systemUTC(), mockLogger); + try { + final AtomicBoolean trigger = new AtomicBoolean(); + engine.register(event -> { + if (trigger.compareAndSet(false, true)) { + throw new OutOfMemoryError("640K ought to be enough for anybody"); + } else { + fail("listener invoked twice"); + } + }); + final CountDownLatch schedulerLatch = new CountDownLatch(1); + engine.add(new SchedulerEngine.Job( + getTestName(), + (startTime, now) -> { + if (schedulerLatch.getCount() == 1) { + schedulerLatch.countDown(); + return 0; + } else { + throw new AssertionError("nextScheduledTimeAfter invoked more than the expected number of times"); + } + })); + + uncaughtLatuch.await(); + assertTrue(trigger.get()); + assertNotNull(maybeFatal.get()); + assertThat(maybeFatal.get(), instanceOf(OutOfMemoryError.class)); + assertThat(maybeFatal.get(), hasToString(containsString("640K ought to be enough for anybody"))); + assertNotNull(maybeThread.get()); + assertThat(maybeThread.get(), not(equalTo(Thread.currentThread()))); // the error should be rethrown on another thread + schedulerLatch.await(); + verifyNoMoreInteractions(mockLogger); // we never logged anything + } finally { + engine.stop(); + } + } finally { + // restore the uncaught exception handler + Thread.setDefaultUncaughtExceptionHandler(uncaughtExceptionHandler); + } + } + +} diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index 3cf29701206..ab8f9172b69 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -11,7 +11,8 @@ apply plugin: 'elasticsearch.build' test.enabled = false dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile (project(path: xpackModule('security'), configuration: 'runtime')) { // Need to drop the guava dependency here or we get a conflict with watcher's guava dependency. // This is total #$%, but the solution is to get the SAML realm (which uses guava) out of security proper @@ -249,7 +250,8 @@ subprojects { check.dependsOn(integTest) dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('watcher'), configuration: 'runtime') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 6ead87aba61..8a6944fb870 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.Booleans; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentType; @@ -18,6 +17,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; import org.elasticsearch.xpack.security.support.SecurityIndexManager; @@ -54,35 +54,13 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; -public class FullClusterRestartIT extends ESRestTestCase { - private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster")); - private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); +public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { @Before public void waitForMlTemplates() throws Exception { XPackRestTestHelper.waitForMlTemplates(client()); } - @Override - protected boolean preserveIndicesUponCompletion() { - return true; - } - - @Override - protected boolean preserveSnapshotsUponCompletion() { - return true; - } - - @Override - protected boolean preserveReposUponCompletion() { - return true; - } - - @Override - protected boolean preserveTemplatesUponCompletion() { - return true; - } - @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); @@ -103,7 +81,7 @@ public class FullClusterRestartIT extends ESRestTestCase { String docLocation = "/testsingledoc/doc/1"; String doc = "{\"test\": \"test\"}"; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { Request createDoc = new Request("PUT", docLocation); createDoc.addParameter("refresh", "true"); createDoc.setJsonEntity(doc); @@ -115,7 +93,7 @@ public class FullClusterRestartIT extends ESRestTestCase { @SuppressWarnings("unchecked") public void testSecurityNativeRealm() throws Exception { - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { createUser("preupgrade_user"); createRole("preupgrade_role"); } else { @@ -165,15 +143,15 @@ public class FullClusterRestartIT extends ESRestTestCase { assertUserInfo("preupgrade_user"); assertRoleInfo("preupgrade_role"); - if (!runningAgainstOldCluster) { + if (isRunningAgainstOldCluster() == false) { assertUserInfo("postupgrade_user"); assertRoleInfo("postupgrade_role"); } } public void testWatcher() throws Exception { - if (runningAgainstOldCluster) { - logger.info("Adding a watch on old cluster {}", oldClusterVersion); + if (isRunningAgainstOldCluster()) { + logger.info("Adding a watch on old cluster {}", getOldClusterVersion()); Request createBwcWatch = new Request("PUT", "_xpack/watcher/watch/bwc_watch"); createBwcWatch.setJsonEntity(loadWatch("simple-watch.json")); client().performRequest(createBwcWatch); @@ -194,7 +172,7 @@ public class FullClusterRestartIT extends ESRestTestCase { waitForHits(".watcher-history*", 2); logger.info("Done creating watcher-related indices"); } else { - logger.info("testing against {}", oldClusterVersion); + logger.info("testing against {}", getOldClusterVersion()); waitForYellow(".watches,bwc_watch_index,.watcher-history*"); logger.info("checking if the upgrade procedure on the new cluster is required"); @@ -264,8 +242,8 @@ public class FullClusterRestartIT extends ESRestTestCase { * Tests that a RollUp job created on a old cluster is correctly restarted after the upgrade. */ public void testRollupAfterRestart() throws Exception { - assumeTrue("Rollup can be tested with 6.3.0 and onwards", oldClusterVersion.onOrAfter(Version.V_6_3_0)); - if (runningAgainstOldCluster) { + assumeTrue("Rollup can be tested with 6.3.0 and onwards", getOldClusterVersion().onOrAfter(Version.V_6_3_0)); + if (isRunningAgainstOldCluster()) { final int numDocs = 59; final int year = randomIntBetween(1970, 2018); @@ -315,7 +293,7 @@ public class FullClusterRestartIT extends ESRestTestCase { final Request clusterHealthRequest = new Request("GET", "/_cluster/health"); clusterHealthRequest.addParameter("wait_for_status", "yellow"); clusterHealthRequest.addParameter("wait_for_no_relocating_shards", "true"); - if (oldClusterVersion.onOrAfter(Version.V_6_2_0)) { + if (getOldClusterVersion().onOrAfter(Version.V_6_2_0)) { clusterHealthRequest.addParameter("wait_for_no_initializing_shards", "true"); } Map clusterHealthResponse = entityAsMap(client().performRequest(clusterHealthRequest)); @@ -325,11 +303,10 @@ public class FullClusterRestartIT extends ESRestTestCase { } } - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/32773") public void testRollupIDSchemeAfterRestart() throws Exception { - assumeTrue("Rollup can be tested with 6.3.0 and onwards", oldClusterVersion.onOrAfter(Version.V_6_3_0)); - assumeTrue("Rollup ID scheme changed in 6.4", oldClusterVersion.before(Version.V_6_4_0)); - if (runningAgainstOldCluster) { + assumeTrue("Rollup can be tested with 6.3.0 and onwards", getOldClusterVersion().onOrAfter(Version.V_6_3_0)); + assumeTrue("Rollup ID scheme changed in 6.4", getOldClusterVersion().before(Version.V_6_4_0)); + if (isRunningAgainstOldCluster()) { final Request indexRequest = new Request("POST", "/id-test-rollup/_doc/1"); indexRequest.setJsonEntity("{\"timestamp\":\"2018-01-01T00:00:01\",\"value\":123}"); @@ -393,6 +370,8 @@ public class FullClusterRestartIT extends ESRestTestCase { indexRequest.setJsonEntity("{\"timestamp\":\"2018-01-02T00:00:01\",\"value\":345}"); client().performRequest(indexRequest); + assertRollUpJob("rollup-id-test"); + // stop the rollup job to force a state save, which will upgrade the ID final Request stopRollupJobRequest = new Request("POST", "_xpack/rollup/job/rollup-id-test/_stop"); Map stopRollupJobResponse = entityAsMap(client().performRequest(stopRollupJobRequest)); @@ -438,8 +417,8 @@ public class FullClusterRestartIT extends ESRestTestCase { public void testSqlFailsOnIndexWithTwoTypes() throws IOException { // TODO this isn't going to trigger until we backport to 6.1 assumeTrue("It is only possible to build an index that sql doesn't like before 6.0.0", - oldClusterVersion.before(Version.V_6_0_0_alpha1)); - if (runningAgainstOldCluster) { + getOldClusterVersion().before(Version.V_6_0_0_alpha1)); + if (isRunningAgainstOldCluster()) { Request doc1 = new Request("POST", "/testsqlfailsonindexwithtwotypes/type1"); doc1.setJsonEntity("{}"); client().performRequest(doc1); @@ -549,7 +528,7 @@ public class FullClusterRestartIT extends ESRestTestCase { request.addParameter("wait_for_status", "yellow"); request.addParameter("timeout", "30s"); request.addParameter("wait_for_no_relocating_shards", "true"); - if (oldClusterVersion.onOrAfter(Version.V_6_2_0)) { + if (getOldClusterVersion().onOrAfter(Version.V_6_2_0)) { request.addParameter("wait_for_no_initializing_shards", "true"); } Map response = entityAsMap(client().performRequest(request)); @@ -667,7 +646,7 @@ public class FullClusterRestartIT extends ESRestTestCase { // Persistent task state field has been renamed in 6.4.0 from "status" to "state" final String stateFieldName - = (runningAgainstOldCluster && oldClusterVersion.before(Version.V_6_4_0)) ? "status" : "state"; + = (isRunningAgainstOldCluster() && getOldClusterVersion().before(Version.V_6_4_0)) ? "status" : "state"; final String jobStateField = "task.xpack/rollup/job." + stateFieldName + ".job_state"; assertThat("Expected field [" + jobStateField + "] to be started or indexing in " + task.get("id"), diff --git a/x-pack/qa/kerberos-tests/build.gradle b/x-pack/qa/kerberos-tests/build.gradle index 59667d9ee78..f680a45bd7f 100644 --- a/x-pack/qa/kerberos-tests/build.gradle +++ b/x-pack/qa/kerberos-tests/build.gradle @@ -7,7 +7,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') } diff --git a/x-pack/qa/ml-basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java b/x-pack/qa/ml-basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java deleted file mode 100644 index e7381050260..00000000000 --- a/x-pack/qa/ml-basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.integration; - -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.xpack.ml.MachineLearning; - -import java.io.IOException; -import java.net.URLEncoder; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.common.xcontent.XContentType.JSON; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; - -public class MlBasicMultiNodeIT extends ESRestTestCase { - - @SuppressWarnings("unchecked") - public void testMachineLearningInstalled() throws Exception { - Response response = client().performRequest("get", "/_xpack"); - assertEquals(200, response.getStatusLine().getStatusCode()); - Map features = (Map) responseEntityToMap(response).get("features"); - Map ml = (Map) features.get("ml"); - assertNotNull(ml); - assertTrue((Boolean) ml.get("available")); - assertTrue((Boolean) ml.get("enabled")); - } - - public void testInvalidJob() throws Exception { - // The job name is invalid because it contains a space - String jobId = "invalid job"; - ResponseException e = expectThrows(ResponseException.class, () -> createFarequoteJob(jobId)); - assertTrue(e.getMessage(), e.getMessage().contains("can contain lowercase alphanumeric (a-z and 0-9), hyphens or underscores")); - // If validation of the invalid job is not done until after transportation to the master node then the - // root cause gets reported as a remote_transport_exception. The code in PubJobAction is supposed to - // validate before transportation to avoid this. This test must be done in a multi-node cluster to have - // a chance of catching a problem, hence it is here rather than in the single node integration tests. - assertFalse(e.getMessage(), e.getMessage().contains("remote_transport_exception")); - } - - public void testMiniFarequote() throws Exception { - String jobId = "mini-farequote-job"; - createFarequoteJob(jobId); - - Response response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open"); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("opened", true), responseEntityToMap(response)); - - String postData = - "{\"airline\":\"AAL\",\"responsetime\":\"132.2046\",\"sourcetype\":\"farequote\",\"time\":\"1403481600\"}\n" + - "{\"airline\":\"JZA\",\"responsetime\":\"990.4628\",\"sourcetype\":\"farequote\",\"time\":\"1403481700\"}"; - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data", - Collections.emptyMap(), - new StringEntity(postData, randomFrom(ContentType.APPLICATION_JSON, ContentType.create("application/x-ndjson")))); - assertEquals(202, response.getStatusLine().getStatusCode()); - Map responseBody = responseEntityToMap(response); - assertEquals(2, responseBody.get("processed_record_count")); - assertEquals(4, responseBody.get("processed_field_count")); - assertEquals(177, responseBody.get("input_bytes")); - assertEquals(6, responseBody.get("input_field_count")); - assertEquals(0, responseBody.get("invalid_date_count")); - assertEquals(0, responseBody.get("missing_field_count")); - assertEquals(0, responseBody.get("out_of_order_timestamp_count")); - assertEquals(0, responseBody.get("bucket_count")); - assertEquals(1403481600000L, responseBody.get("earliest_record_timestamp")); - assertEquals(1403481700000L, responseBody.get("latest_record_timestamp")); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_flush"); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertFlushResponse(response, true, 1403481600000L); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close", - Collections.singletonMap("timeout", "20s")); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("closed", true), responseEntityToMap(response)); - - response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - assertEquals(200, response.getStatusLine().getStatusCode()); - @SuppressWarnings("unchecked") - Map dataCountsDoc = (Map) - ((Map)((List) responseEntityToMap(response).get("jobs")).get(0)).get("data_counts"); - assertEquals(2, dataCountsDoc.get("processed_record_count")); - assertEquals(4, dataCountsDoc.get("processed_field_count")); - assertEquals(177, dataCountsDoc.get("input_bytes")); - assertEquals(6, dataCountsDoc.get("input_field_count")); - assertEquals(0, dataCountsDoc.get("invalid_date_count")); - assertEquals(0, dataCountsDoc.get("missing_field_count")); - assertEquals(0, dataCountsDoc.get("out_of_order_timestamp_count")); - assertEquals(0, dataCountsDoc.get("bucket_count")); - assertEquals(1403481600000L, dataCountsDoc.get("earliest_record_timestamp")); - assertEquals(1403481700000L, dataCountsDoc.get("latest_record_timestamp")); - - response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); - assertEquals(200, response.getStatusLine().getStatusCode()); - } - - public void testMiniFarequoteWithDatafeeder() throws Exception { - String mappings = "{" - + " \"mappings\": {" - + " \"response\": {" - + " \"properties\": {" - + " \"time\": { \"type\":\"date\"}," - + " \"airline\": { \"type\":\"keyword\"}," - + " \"responsetime\": { \"type\":\"float\"}" - + " }" - + " }" - + " }" - + "}"; - client().performRequest("put", "airline-data", Collections.emptyMap(), new StringEntity(mappings, ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data/response/1", Collections.emptyMap(), - new StringEntity("{\"time\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data/response/2", Collections.emptyMap(), - new StringEntity("{\"time\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}", - ContentType.APPLICATION_JSON)); - - // Ensure all data is searchable - client().performRequest("post", "_refresh"); - - String jobId = "mini-farequote-with-data-feeder-job"; - createFarequoteJob(jobId); - String datafeedId = "bar"; - createDatafeed(datafeedId, jobId); - - Response response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open"); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("opened", true), responseEntityToMap(response)); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start", - Collections.singletonMap("start", "0")); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("started", true), responseEntityToMap(response)); - - assertBusy(() -> { - try { - Response statsResponse = - client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - assertEquals(200, statsResponse.getStatusLine().getStatusCode()); - @SuppressWarnings("unchecked") - Map dataCountsDoc = (Map) - ((Map)((List) responseEntityToMap(statsResponse).get("jobs")).get(0)).get("data_counts"); - assertEquals(2, dataCountsDoc.get("input_record_count")); - assertEquals(2, dataCountsDoc.get("processed_record_count")); - } catch (IOException e) { - throw new RuntimeException(e); - } - }); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_stop"); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("stopped", true), responseEntityToMap(response)); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close", - Collections.singletonMap("timeout", "20s")); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("closed", true), responseEntityToMap(response)); - - response = client().performRequest("delete", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId); - assertEquals(200, response.getStatusLine().getStatusCode()); - - response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); - assertEquals(200, response.getStatusLine().getStatusCode()); - } - - public void testMiniFarequoteReopen() throws Exception { - String jobId = "mini-farequote-reopen"; - createFarequoteJob(jobId); - - Response response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open"); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("opened", true), responseEntityToMap(response)); - - String postData = - "{\"airline\":\"AAL\",\"responsetime\":\"132.2046\",\"sourcetype\":\"farequote\",\"time\":\"1403481600\"}\n" + - "{\"airline\":\"JZA\",\"responsetime\":\"990.4628\",\"sourcetype\":\"farequote\",\"time\":\"1403481700\"}\n" + - "{\"airline\":\"JBU\",\"responsetime\":\"877.5927\",\"sourcetype\":\"farequote\",\"time\":\"1403481800\"}\n" + - "{\"airline\":\"KLM\",\"responsetime\":\"1355.4812\",\"sourcetype\":\"farequote\",\"time\":\"1403481900\"}\n" + - "{\"airline\":\"NKS\",\"responsetime\":\"9991.3981\",\"sourcetype\":\"farequote\",\"time\":\"1403482000\"}"; - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data", - Collections.emptyMap(), - new StringEntity(postData, randomFrom(ContentType.APPLICATION_JSON, ContentType.create("application/x-ndjson")))); - assertEquals(202, response.getStatusLine().getStatusCode()); - Map responseBody = responseEntityToMap(response); - assertEquals(5, responseBody.get("processed_record_count")); - assertEquals(10, responseBody.get("processed_field_count")); - assertEquals(446, responseBody.get("input_bytes")); - assertEquals(15, responseBody.get("input_field_count")); - assertEquals(0, responseBody.get("invalid_date_count")); - assertEquals(0, responseBody.get("missing_field_count")); - assertEquals(0, responseBody.get("out_of_order_timestamp_count")); - assertEquals(0, responseBody.get("bucket_count")); - assertEquals(1403481600000L, responseBody.get("earliest_record_timestamp")); - assertEquals(1403482000000L, responseBody.get("latest_record_timestamp")); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_flush"); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertFlushResponse(response, true, 1403481600000L); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close", - Collections.singletonMap("timeout", "20s")); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("closed", true), responseEntityToMap(response)); - - response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - assertEquals(200, response.getStatusLine().getStatusCode()); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open", - Collections.singletonMap("timeout", "20s")); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("opened", true), responseEntityToMap(response)); - - // feed some more data points - postData = - "{\"airline\":\"AAL\",\"responsetime\":\"136.2361\",\"sourcetype\":\"farequote\",\"time\":\"1407081600\"}\n" + - "{\"airline\":\"VRD\",\"responsetime\":\"282.9847\",\"sourcetype\":\"farequote\",\"time\":\"1407081700\"}\n" + - "{\"airline\":\"JAL\",\"responsetime\":\"493.0338\",\"sourcetype\":\"farequote\",\"time\":\"1407081800\"}\n" + - "{\"airline\":\"UAL\",\"responsetime\":\"8.4275\",\"sourcetype\":\"farequote\",\"time\":\"1407081900\"}\n" + - "{\"airline\":\"FFT\",\"responsetime\":\"221.8693\",\"sourcetype\":\"farequote\",\"time\":\"1407082000\"}"; - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data", - Collections.emptyMap(), - new StringEntity(postData, randomFrom(ContentType.APPLICATION_JSON, ContentType.create("application/x-ndjson")))); - assertEquals(202, response.getStatusLine().getStatusCode()); - responseBody = responseEntityToMap(response); - assertEquals(5, responseBody.get("processed_record_count")); - assertEquals(10, responseBody.get("processed_field_count")); - assertEquals(442, responseBody.get("input_bytes")); - assertEquals(15, responseBody.get("input_field_count")); - assertEquals(0, responseBody.get("invalid_date_count")); - assertEquals(0, responseBody.get("missing_field_count")); - assertEquals(0, responseBody.get("out_of_order_timestamp_count")); - assertEquals(1000, responseBody.get("bucket_count")); - - // unintuitive: should return the earliest record timestamp of this feed??? - assertEquals(null, responseBody.get("earliest_record_timestamp")); - assertEquals(1407082000000L, responseBody.get("latest_record_timestamp")); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close", - Collections.singletonMap("timeout", "20s")); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("closed", true), responseEntityToMap(response)); - - // counts should be summed up - response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - assertEquals(200, response.getStatusLine().getStatusCode()); - - @SuppressWarnings("unchecked") - Map dataCountsDoc = (Map) - ((Map)((List) responseEntityToMap(response).get("jobs")).get(0)).get("data_counts"); - assertEquals(10, dataCountsDoc.get("processed_record_count")); - assertEquals(20, dataCountsDoc.get("processed_field_count")); - assertEquals(888, dataCountsDoc.get("input_bytes")); - assertEquals(30, dataCountsDoc.get("input_field_count")); - assertEquals(0, dataCountsDoc.get("invalid_date_count")); - assertEquals(0, dataCountsDoc.get("missing_field_count")); - assertEquals(0, dataCountsDoc.get("out_of_order_timestamp_count")); - assertEquals(1000, dataCountsDoc.get("bucket_count")); - assertEquals(1403481600000L, dataCountsDoc.get("earliest_record_timestamp")); - assertEquals(1407082000000L, dataCountsDoc.get("latest_record_timestamp")); - - response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); - assertEquals(200, response.getStatusLine().getStatusCode()); - } - - private Response createDatafeed(String datafeedId, String jobId) throws Exception { - XContentBuilder xContentBuilder = jsonBuilder(); - xContentBuilder.startObject(); - xContentBuilder.field("job_id", jobId); - xContentBuilder.array("indexes", "airline-data"); - xContentBuilder.array("types", "response"); - xContentBuilder.field("_source", true); - xContentBuilder.endObject(); - return client().performRequest("put", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId, - Collections.emptyMap(), new StringEntity(Strings.toString(xContentBuilder), ContentType.APPLICATION_JSON)); - } - - private Response createFarequoteJob(String jobId) throws Exception { - XContentBuilder xContentBuilder = jsonBuilder(); - xContentBuilder.startObject(); - xContentBuilder.field("job_id", jobId); - xContentBuilder.field("description", "Analysis of response time by airline"); - - xContentBuilder.startObject("analysis_config"); - xContentBuilder.field("bucket_span", "3600s"); - xContentBuilder.startArray("detectors"); - xContentBuilder.startObject(); - xContentBuilder.field("function", "metric"); - xContentBuilder.field("field_name", "responsetime"); - xContentBuilder.field("by_field_name", "airline"); - xContentBuilder.endObject(); - xContentBuilder.endArray(); - xContentBuilder.endObject(); - - xContentBuilder.startObject("data_description"); - xContentBuilder.field("format", "xcontent"); - xContentBuilder.field("time_field", "time"); - xContentBuilder.field("time_format", "epoch"); - xContentBuilder.endObject(); - xContentBuilder.endObject(); - - return client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + URLEncoder.encode(jobId, "UTF-8"), - Collections.emptyMap(), new StringEntity(Strings.toString(xContentBuilder), ContentType.APPLICATION_JSON)); - } - - private static Map responseEntityToMap(Response response) throws IOException { - return XContentHelper.convertToMap(JSON.xContent(), response.getEntity().getContent(), false); - } - - private static void assertFlushResponse(Response response, boolean expectedFlushed, long expectedLastFinalizedBucketEnd) - throws IOException { - Map asMap = responseEntityToMap(response); - assertThat(asMap.size(), equalTo(2)); - assertThat(asMap.get("flushed"), is(true)); - assertThat(asMap.get("last_finalized_bucket_end"), equalTo(expectedLastFinalizedBucketEnd)); - } -} diff --git a/x-pack/qa/ml-disabled/src/test/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java b/x-pack/qa/ml-disabled/src/test/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java deleted file mode 100644 index 3bb9566e5bf..00000000000 --- a/x-pack/qa/ml-disabled/src/test/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.integration; - -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.xpack.ml.MachineLearning; - -import java.util.Collections; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.Matchers.containsString; - -public class MlPluginDisabledIT extends ESRestTestCase { - - /** - * Check that when the ml plugin is disabled, you cannot create a job as the - * rest handler is not registered - */ - public void testActionsFail() throws Exception { - XContentBuilder xContentBuilder = jsonBuilder(); - xContentBuilder.startObject(); - xContentBuilder.field("actions-fail-job", "foo"); - xContentBuilder.field("description", "Analysis of response time by airline"); - - xContentBuilder.startObject("analysis_config"); - xContentBuilder.field("bucket_span", "3600s"); - xContentBuilder.startArray("detectors"); - xContentBuilder.startObject(); - xContentBuilder.field("function", "metric"); - xContentBuilder.field("field_name", "responsetime"); - xContentBuilder.field("by_field_name", "airline"); - xContentBuilder.endObject(); - xContentBuilder.endArray(); - xContentBuilder.endObject(); - - xContentBuilder.startObject("data_description"); - xContentBuilder.field("format", "xcontent"); - xContentBuilder.field("time_field", "time"); - xContentBuilder.field("time_format", "epoch"); - xContentBuilder.endObject(); - xContentBuilder.endObject(); - - ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("put", - MachineLearning.BASE_PATH + "anomaly_detectors/foo", Collections.emptyMap(), - new StringEntity(Strings.toString(xContentBuilder), ContentType.APPLICATION_JSON))); - assertThat(exception.getMessage(), containsString("no handler found for uri [/_xpack/ml/anomaly_detectors/foo] and method [PUT]")); - } -} diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java b/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java deleted file mode 100644 index 07529acdb88..00000000000 --- a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java +++ /dev/null @@ -1,701 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.integration; - -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.test.SecuritySettingsSourceField; -import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.xpack.core.ml.integration.MlRestTestStateCleaner; -import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; -import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; -import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; -import org.junit.After; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStreamReader; -import java.nio.charset.StandardCharsets; -import java.util.Collections; -import java.util.Locale; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; - -import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; - -public class MlJobIT extends ESRestTestCase { - - private static final String BASIC_AUTH_VALUE = basicAuthHeaderValue("x_pack_rest_user", - SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); - - @Override - protected Settings restClientSettings() { - return Settings.builder().put(super.restClientSettings()).put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE).build(); - } - - @Override - protected boolean preserveTemplatesUponCompletion() { - return true; - } - - public void testPutJob_GivenFarequoteConfig() throws Exception { - Response response = createFarequoteJob("given-farequote-config-job"); - - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - String responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"job_id\":\"given-farequote-config-job\"")); - } - - public void testGetJob_GivenNoSuchJob() throws Exception { - ResponseException e = expectThrows(ResponseException.class, - () -> client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/non-existing-job/_stats")); - - assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(404)); - assertThat(e.getMessage(), containsString("No known job with id 'non-existing-job'")); - } - - public void testGetJob_GivenJobExists() throws Exception { - createFarequoteJob("get-job_given-job-exists-job"); - - Response response = client().performRequest("get", - MachineLearning.BASE_PATH + "anomaly_detectors/get-job_given-job-exists-job/_stats"); - - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - String responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":1")); - assertThat(responseAsString, containsString("\"job_id\":\"get-job_given-job-exists-job\"")); - } - - public void testGetJobs_GivenSingleJob() throws Exception { - String jobId = "get-jobs_given-single-job-job"; - createFarequoteJob(jobId); - - // Explicit _all - Response response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/_all"); - - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - String responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":1")); - assertThat(responseAsString, containsString("\"job_id\":\"" + jobId + "\"")); - - // Implicit _all - response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors"); - - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":1")); - assertThat(responseAsString, containsString("\"job_id\":\"" + jobId + "\"")); - } - - public void testGetJobs_GivenMultipleJobs() throws Exception { - createFarequoteJob("given-multiple-jobs-job-1"); - createFarequoteJob("given-multiple-jobs-job-2"); - createFarequoteJob("given-multiple-jobs-job-3"); - - // Explicit _all - Response response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/_all"); - - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - String responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":3")); - assertThat(responseAsString, containsString("\"job_id\":\"given-multiple-jobs-job-1\"")); - assertThat(responseAsString, containsString("\"job_id\":\"given-multiple-jobs-job-2\"")); - assertThat(responseAsString, containsString("\"job_id\":\"given-multiple-jobs-job-3\"")); - - // Implicit _all - response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors"); - - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":3")); - assertThat(responseAsString, containsString("\"job_id\":\"given-multiple-jobs-job-1\"")); - assertThat(responseAsString, containsString("\"job_id\":\"given-multiple-jobs-job-2\"")); - assertThat(responseAsString, containsString("\"job_id\":\"given-multiple-jobs-job-3\"")); - } - - private Response createFarequoteJob(String jobId) throws IOException { - String job = "{\n" + " \"description\":\"Analysis of response time by airline\",\n" - + " \"analysis_config\" : {\n" + " \"bucket_span\": \"3600s\",\n" - + " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"responsetime\",\"by_field_name\":\"airline\"}]\n" - + " },\n" + " \"data_description\" : {\n" + " \"field_delimiter\":\",\",\n" + " " + - "\"time_field\":\"time\",\n" - + " \"time_format\":\"yyyy-MM-dd HH:mm:ssX\"\n" + " }\n" + "}"; - - return client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, - Collections.emptyMap(), new StringEntity(job, ContentType.APPLICATION_JSON)); - } - - public void testCantCreateJobWithSameID() throws Exception { - String jobTemplate = "{\n" + - " \"analysis_config\" : {\n" + - " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"responsetime\"}]\n" + - " },\n" + - " \"data_description\": {},\n" + - " \"results_index_name\" : \"%s\"}"; - - String jobConfig = String.format(Locale.ROOT, jobTemplate, "index-1"); - - String jobId = "cant-create-job-with-same-id-job"; - Response response = client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId , - Collections.emptyMap(), - new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); - assertEquals(200, response.getStatusLine().getStatusCode()); - - final String jobConfig2 = String.format(Locale.ROOT, jobTemplate, "index-2"); - ResponseException e = expectThrows(ResponseException.class, - () ->client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, - Collections.emptyMap(), new StringEntity(jobConfig2, ContentType.APPLICATION_JSON))); - - assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400)); - assertThat(e.getMessage(), containsString("The job cannot be created with the Id '" + jobId + "'. The Id is already used.")); - } - - public void testCreateJobsWithIndexNameOption() throws Exception { - String jobTemplate = "{\n" + - " \"analysis_config\" : {\n" + - " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"responsetime\"}]\n" + - " },\n" + - " \"data_description\": {},\n" + - " \"results_index_name\" : \"%s\"}"; - - String jobId1 = "create-jobs-with-index-name-option-job-1"; - String indexName = "non-default-index"; - String jobConfig = String.format(Locale.ROOT, jobTemplate, indexName); - - Response response = client().performRequest("put", MachineLearning.BASE_PATH - + "anomaly_detectors/" + jobId1, Collections.emptyMap(), new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); - assertEquals(200, response.getStatusLine().getStatusCode()); - - String jobId2 = "create-jobs-with-index-name-option-job-2"; - response = client().performRequest("put", MachineLearning.BASE_PATH - + "anomaly_detectors/" + jobId2, Collections.emptyMap(), new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); - assertEquals(200, response.getStatusLine().getStatusCode()); - - // With security enabled GET _aliases throws an index_not_found_exception - // if no aliases have been created. In multi-node tests the alias may not - // appear immediately so wait here. - assertBusy(() -> { - try { - Response aliasesResponse = client().performRequest("get", "_aliases"); - assertEquals(200, aliasesResponse.getStatusLine().getStatusCode()); - String responseAsString = responseEntityToString(aliasesResponse); - assertThat(responseAsString, - containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName("custom-" + indexName) + "\":{\"aliases\":{")); - assertThat(responseAsString, containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) - + "\":{\"filter\":{\"term\":{\"job_id\":{\"value\":\"" + jobId1 + "\",\"boost\":1.0}}}}")); - assertThat(responseAsString, containsString("\"" + AnomalyDetectorsIndex.resultsWriteAlias(jobId1) + "\":{}")); - assertThat(responseAsString, containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName(jobId2) - + "\":{\"filter\":{\"term\":{\"job_id\":{\"value\":\"" + jobId2 + "\",\"boost\":1.0}}}}")); - assertThat(responseAsString, containsString("\"" + AnomalyDetectorsIndex.resultsWriteAlias(jobId2) + "\":{}")); - } catch (ResponseException e) { - throw new AssertionError(e); - } - }); - - Response indicesResponse = client().performRequest("get", "_cat/indices"); - assertEquals(200, indicesResponse.getStatusLine().getStatusCode()); - String responseAsString = responseEntityToString(indicesResponse); - assertThat(responseAsString, - containsString(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName)); - assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId1)))); - assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId2)))); - - String bucketResult = String.format(Locale.ROOT, - "{\"job_id\":\"%s\", \"timestamp\": \"%s\", \"result_type\":\"bucket\", \"bucket_span\": \"%s\"}", - jobId1, "1234", 1); - String id = String.format(Locale.ROOT, "%s_bucket_%s_%s", jobId1, "1234", 300); - response = client().performRequest("put", AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + "/doc/" + id, - Collections.emptyMap(), new StringEntity(bucketResult, ContentType.APPLICATION_JSON)); - assertEquals(201, response.getStatusLine().getStatusCode()); - - bucketResult = String.format(Locale.ROOT, - "{\"job_id\":\"%s\", \"timestamp\": \"%s\", \"result_type\":\"bucket\", \"bucket_span\": \"%s\"}", - jobId1, "1236", 1); - id = String.format(Locale.ROOT, "%s_bucket_%s_%s", jobId1, "1236", 300); - response = client().performRequest("put", AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + "/doc/" + id, - Collections.emptyMap(), new StringEntity(bucketResult, ContentType.APPLICATION_JSON)); - assertEquals(201, response.getStatusLine().getStatusCode()); - - client().performRequest("post", "_refresh"); - - response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1 + "/results/buckets"); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":2")); - - response = client().performRequest("get", AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + "/_search"); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"total\":2")); - - response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - - // check that indices still exist, but are empty and aliases are gone - response = client().performRequest("get", "_aliases"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId1)))); - assertThat(responseAsString, containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId2))); //job2 still exists - - response = client().performRequest("get", "_cat/indices"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName)); - - client().performRequest("post", "_refresh"); - - response = client().performRequest("get", AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName + "/_count"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":0")); - } - - public void testCreateJobInSharedIndexUpdatesMapping() throws Exception { - String jobTemplate = "{\n" + - " \"analysis_config\" : {\n" + - " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"metric\", \"by_field_name\":\"%s\"}]\n" + - " },\n" + - " \"data_description\": {}\n" + - "}"; - - String jobId1 = "create-job-in-shared-index-updates-mapping-job-1"; - String byFieldName1 = "responsetime"; - String jobId2 = "create-job-in-shared-index-updates-mapping-job-2"; - String byFieldName2 = "cpu-usage"; - String jobConfig = String.format(Locale.ROOT, jobTemplate, byFieldName1); - - Response response = client().performRequest("put", MachineLearning.BASE_PATH - + "anomaly_detectors/" + jobId1, Collections.emptyMap(), new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); - assertEquals(200, response.getStatusLine().getStatusCode()); - - // Check the index mapping contains the first by_field_name - response = client().performRequest("get", AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX - + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT + "/_mapping?pretty"); - assertEquals(200, response.getStatusLine().getStatusCode()); - String responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString(byFieldName1)); - assertThat(responseAsString, not(containsString(byFieldName2))); - - jobConfig = String.format(Locale.ROOT, jobTemplate, byFieldName2); - response = client().performRequest("put", MachineLearning.BASE_PATH - + "anomaly_detectors/" + jobId2, Collections.emptyMap(), new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); - assertEquals(200, response.getStatusLine().getStatusCode()); - - // Check the index mapping now contains both fields - response = client().performRequest("get", AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX - + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT + "/_mapping?pretty"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString(byFieldName1)); - assertThat(responseAsString, containsString(byFieldName2)); - } - - public void testCreateJobInCustomSharedIndexUpdatesMapping() throws Exception { - String jobTemplate = "{\n" + - " \"analysis_config\" : {\n" + - " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"metric\", \"by_field_name\":\"%s\"}]\n" + - " },\n" + - " \"data_description\": {},\n" + - " \"results_index_name\" : \"shared-index\"}"; - - String jobId1 = "create-job-in-custom-shared-index-updates-mapping-job-1"; - String byFieldName1 = "responsetime"; - String jobId2 = "create-job-in-custom-shared-index-updates-mapping-job-2"; - String byFieldName2 = "cpu-usage"; - String jobConfig = String.format(Locale.ROOT, jobTemplate, byFieldName1); - - Response response = client().performRequest("put", MachineLearning.BASE_PATH - + "anomaly_detectors/" + jobId1, Collections.emptyMap(), new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); - assertEquals(200, response.getStatusLine().getStatusCode()); - - // Check the index mapping contains the first by_field_name - response = client().performRequest("get", - AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-shared-index" + "/_mapping?pretty"); - assertEquals(200, response.getStatusLine().getStatusCode()); - String responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString(byFieldName1)); - assertThat(responseAsString, not(containsString(byFieldName2))); - - jobConfig = String.format(Locale.ROOT, jobTemplate, byFieldName2); - response = client().performRequest("put", MachineLearning.BASE_PATH - + "anomaly_detectors/" + jobId2, Collections.emptyMap(), new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); - assertEquals(200, response.getStatusLine().getStatusCode()); - - // Check the index mapping now contains both fields - response = client().performRequest("get", - AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-shared-index" + "/_mapping?pretty"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString(byFieldName1)); - assertThat(responseAsString, containsString(byFieldName2)); - } - - public void testCreateJob_WithClashingFieldMappingsFails() throws Exception { - String jobTemplate = "{\n" + - " \"analysis_config\" : {\n" + - " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"metric\", \"by_field_name\":\"%s\"}]\n" + - " },\n" + - " \"data_description\": {}\n" + - "}"; - - String jobId1 = "job-with-response-field"; - String byFieldName1; - String jobId2 = "job-will-fail-with-mapping-error-on-response-field"; - String byFieldName2; - // we should get the friendly advice nomatter which way around the clashing fields are seen - if (randomBoolean()) { - byFieldName1 = "response"; - byFieldName2 = "response.time"; - } else { - byFieldName1 = "response.time"; - byFieldName2 = "response"; - } - String jobConfig = String.format(Locale.ROOT, jobTemplate, byFieldName1); - - Response response = client().performRequest("put", MachineLearning.BASE_PATH - + "anomaly_detectors/" + jobId1, Collections.emptyMap(), new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); - assertEquals(200, response.getStatusLine().getStatusCode()); - - final String failingJobConfig = String.format(Locale.ROOT, jobTemplate, byFieldName2); - ResponseException e = expectThrows(ResponseException.class, - () -> client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId2, - Collections.emptyMap(), new StringEntity(failingJobConfig, ContentType.APPLICATION_JSON))); - - assertThat(e.getMessage(), - containsString("This job would cause a mapping clash with existing field [response] - " + - "avoid the clash by assigning a dedicated results index")); - } - - public void testDeleteJob() throws Exception { - String jobId = "delete-job-job"; - String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; - createFarequoteJob(jobId); - - Response response = client().performRequest("get", "_cat/indices"); - assertEquals(200, response.getStatusLine().getStatusCode()); - String responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString(indexName)); - - response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - - // check that the index still exists (it's shared by default) - response = client().performRequest("get", "_cat/indices"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString(indexName)); - - assertBusy(() -> { - try { - Response r = client().performRequest("get", indexName + "/_count"); - assertEquals(200, r.getStatusLine().getStatusCode()); - String responseString = responseEntityToString(r); - assertThat(responseString, containsString("\"count\":0")); - } catch (Exception e) { - fail(e.getMessage()); - } - - }); - - // check that the job itself is gone - expectThrows(ResponseException.class, () -> - client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); - } - - public void testDeleteJobAfterMissingIndex() throws Exception { - String jobId = "delete-job-after-missing-index-job"; - String aliasName = AnomalyDetectorsIndex.jobResultsAliasedName(jobId); - String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; - createFarequoteJob(jobId); - - Response response = client().performRequest("get", "_cat/indices"); - assertEquals(200, response.getStatusLine().getStatusCode()); - String responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString(indexName)); - - // Manually delete the index so that we can test that deletion proceeds - // normally anyway - response = client().performRequest("delete", indexName); - assertEquals(200, response.getStatusLine().getStatusCode()); - - response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - - // check index was deleted - response = client().performRequest("get", "_cat/indices"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, not(containsString(aliasName))); - assertThat(responseAsString, not(containsString(indexName))); - - expectThrows(ResponseException.class, () -> - client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); - } - - public void testDeleteJobAfterMissingAliases() throws Exception { - String jobId = "delete-job-after-missing-alias-job"; - String readAliasName = AnomalyDetectorsIndex.jobResultsAliasedName(jobId); - String writeAliasName = AnomalyDetectorsIndex.resultsWriteAlias(jobId); - String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; - createFarequoteJob(jobId); - - // With security enabled cat aliases throws an index_not_found_exception - // if no aliases have been created. In multi-node tests the alias may not - // appear immediately so wait here. - assertBusy(() -> { - try { - Response aliasesResponse = client().performRequest(new Request("get", "_cat/aliases")); - assertEquals(200, aliasesResponse.getStatusLine().getStatusCode()); - String responseAsString = responseEntityToString(aliasesResponse); - assertThat(responseAsString, containsString(readAliasName)); - assertThat(responseAsString, containsString(writeAliasName)); - } catch (ResponseException e) { - throw new AssertionError(e); - } - }); - - // Manually delete the aliases so that we can test that deletion proceeds - // normally anyway - Response response = client().performRequest("delete", indexName + "/_alias/" + readAliasName); - assertEquals(200, response.getStatusLine().getStatusCode()); - response = client().performRequest("delete", indexName + "/_alias/" + writeAliasName); - assertEquals(200, response.getStatusLine().getStatusCode()); - - // check aliases were deleted - expectThrows(ResponseException.class, () -> client().performRequest("get", indexName + "/_alias/" + readAliasName)); - expectThrows(ResponseException.class, () -> client().performRequest("get", indexName + "/_alias/" + writeAliasName)); - - response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - } - - public void testMultiIndexDelete() throws Exception { - String jobId = "multi-index-delete-job"; - String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; - createFarequoteJob(jobId); - - Response response = client().performRequest("put", indexName + "-001"); - assertEquals(200, response.getStatusLine().getStatusCode()); - - response = client().performRequest("put", indexName + "-002"); - assertEquals(200, response.getStatusLine().getStatusCode()); - - response = client().performRequest("get", "_cat/indices"); - assertEquals(200, response.getStatusLine().getStatusCode()); - String responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString(indexName)); - assertThat(responseAsString, containsString(indexName + "-001")); - assertThat(responseAsString, containsString(indexName + "-002")); - - // Add some documents to each index to make sure the DBQ clears them out - String recordResult = - String.format(Locale.ROOT, - "{\"job_id\":\"%s\", \"timestamp\": \"%s\", \"bucket_span\":%d, \"result_type\":\"record\"}", - jobId, 123, 1); - client().performRequest("put", indexName + "/doc/" + 123, - Collections.singletonMap("refresh", "true"), new StringEntity(recordResult, ContentType.APPLICATION_JSON)); - client().performRequest("put", indexName + "-001/doc/" + 123, - Collections.singletonMap("refresh", "true"), new StringEntity(recordResult, ContentType.APPLICATION_JSON)); - client().performRequest("put", indexName + "-002/doc/" + 123, - Collections.singletonMap("refresh", "true"), new StringEntity(recordResult, ContentType.APPLICATION_JSON)); - - // Also index a few through the alias for the first job - client().performRequest("put", indexName + "/doc/" + 456, - Collections.singletonMap("refresh", "true"), new StringEntity(recordResult, ContentType.APPLICATION_JSON)); - - - client().performRequest("post", "_refresh"); - - // check for the documents - response = client().performRequest("get", indexName+ "/_count"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":2")); - - response = client().performRequest("get", indexName + "-001/_count"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":1")); - - response = client().performRequest("get", indexName + "-002/_count"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":1")); - - // Delete - response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - - client().performRequest("post", "_refresh"); - - // check that the indices still exist but are empty - response = client().performRequest("get", "_cat/indices"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString(indexName)); - assertThat(responseAsString, containsString(indexName + "-001")); - assertThat(responseAsString, containsString(indexName + "-002")); - - response = client().performRequest("get", indexName + "/_count"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":0")); - - response = client().performRequest("get", indexName + "-001/_count"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":0")); - - response = client().performRequest("get", indexName + "-002/_count"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":0")); - - - expectThrows(ResponseException.class, () -> - client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); - } - - public void testDelete_multipleRequest() throws Exception { - String jobId = "delete-job-mulitple-times"; - createFarequoteJob(jobId); - - ConcurrentMapLong responses = ConcurrentCollections.newConcurrentMapLong(); - ConcurrentMapLong responseExceptions = ConcurrentCollections.newConcurrentMapLong(); - AtomicReference ioe = new AtomicReference<>(); - AtomicInteger recreationGuard = new AtomicInteger(0); - AtomicReference recreationResponse = new AtomicReference<>(); - AtomicReference recreationException = new AtomicReference<>(); - - Runnable deleteJob = () -> { - try { - boolean forceDelete = randomBoolean(); - String url = MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId; - if (forceDelete) { - url += "?force=true"; - } - Response response = client().performRequest("delete", url); - responses.put(Thread.currentThread().getId(), response); - } catch (ResponseException re) { - responseExceptions.put(Thread.currentThread().getId(), re); - } catch (IOException e) { - ioe.set(e); - } - - // Immediately after the first deletion finishes, recreate the job. This should pick up - // race conditions where another delete request deletes part of the newly created job. - if (recreationGuard.getAndIncrement() == 0) { - try { - recreationResponse.set(createFarequoteJob(jobId)); - } catch (ResponseException re) { - recreationException.set(re); - } catch (IOException e) { - ioe.set(e); - } - } - }; - - // The idea is to hit the situation where one request waits for - // the other to complete. This is difficult to schedule but - // hopefully it will happen in CI - int numThreads = 5; - Thread [] threads = new Thread[numThreads]; - for (int i=0; i remoteClusterTest.nodes.get(0).transportUri()}\"" - setting 'search.remote.connections_per_cluster', 1 - setting 'search.remote.connect', true + setting 'cluster.remote.my_remote_cluster.seeds', "\"${-> remoteClusterTest.nodes.get(0).transportUri()}\"" + setting 'cluster.remote.connections_per_cluster', 1 + setting 'cluster.remote.connect', true } mixedClusterTestRunner { diff --git a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml index dc18ecd8a70..35c6212451c 100644 --- a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml +++ b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml @@ -160,16 +160,16 @@ teardown: cluster.get_settings: include_defaults: true - - set: { defaults.search.remote.my_remote_cluster.seeds.0: remote_ip } + - set: { defaults.cluster.remote.my_remote_cluster.seeds.0: remote_ip } - do: cluster.put_settings: flat_settings: true body: transient: - search.remote.test_remote_cluster.seeds: $remote_ip + cluster.remote.test_remote_cluster.seeds: $remote_ip - - match: {transient: {search.remote.test_remote_cluster.seeds: $remote_ip}} + - match: {transient: {cluster.remote.test_remote_cluster.seeds: $remote_ip}} - do: headers: { Authorization: "Basic am9lOnMza3JpdA==" } diff --git a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml index 5ff92df69b8..490edf794f6 100644 --- a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml +++ b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml @@ -48,16 +48,16 @@ teardown: cluster.get_settings: include_defaults: true - - set: { defaults.search.remote.my_remote_cluster.seeds.0: remote_ip } + - set: { defaults.cluster.remote.my_remote_cluster.seeds.0: remote_ip } - do: cluster.put_settings: flat_settings: true body: transient: - search.remote.test_remote_cluster.seeds: $remote_ip + cluster.remote.test_remote_cluster.seeds: $remote_ip - - match: {transient: {search.remote.test_remote_cluster.seeds: $remote_ip}} + - match: {transient: {cluster.remote.test_remote_cluster.seeds: $remote_ip}} # we do another search here since this will enforce the connection to be established # otherwise the cluster might not have been connected yet. diff --git a/x-pack/qa/multi-node/build.gradle b/x-pack/qa/multi-node/build.gradle index 19729cf367e..4369287caba 100644 --- a/x-pack/qa/multi-node/build.gradle +++ b/x-pack/qa/multi-node/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } integTestCluster { diff --git a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/GlobalCheckpointSyncActionIT.java b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/GlobalCheckpointSyncActionIT.java index abc784b4cb2..18cd67ff271 100644 --- a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/GlobalCheckpointSyncActionIT.java +++ b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/GlobalCheckpointSyncActionIT.java @@ -5,8 +5,7 @@ */ package org.elasticsearch.multi_node; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; @@ -16,10 +15,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.yaml.ObjectPath; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.equalTo; @@ -59,12 +54,15 @@ public class GlobalCheckpointSyncActionIT extends ESRestTestCase { builder.endObject(); } builder.endObject(); - final StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); - client().performRequest("PUT", "test-index", Collections.emptyMap(), entity); + Request createIndexRequest = new Request("PUT", "/test-index"); + createIndexRequest.setJsonEntity(Strings.toString(builder)); + client().performRequest(createIndexRequest); } // wait for the replica to recover - client().performRequest("GET", "/_cluster/health", Collections.singletonMap("wait_for_status", "green")); + Request healthRequest = new Request("GET", "/_cluster/health"); + healthRequest.addParameter("wait_for_status", "green"); + client().performRequest(healthRequest); // index some documents final int numberOfDocuments = randomIntBetween(0, 128); @@ -75,17 +73,18 @@ public class GlobalCheckpointSyncActionIT extends ESRestTestCase { builder.field("foo", i); } builder.endObject(); - final StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); - client().performRequest("PUT", "/test-index/test-type/" + i, Collections.emptyMap(), entity); + Request indexRequest = new Request("PUT", "/test-index/test-type/" + i); + indexRequest.setJsonEntity(Strings.toString(builder)); + client().performRequest(indexRequest); } } // we have to wait for the post-operation global checkpoint sync to propagate to the replica assertBusy(() -> { - final Map params = new HashMap<>(2); - params.put("level", "shards"); - params.put("filter_path", "**.seq_no"); - final Response response = client().performRequest("GET", "/test-index/_stats", params); + final Request request = new Request("GET", "/test-index/_stats"); + request.addParameter("level", "shards"); + request.addParameter("filter_path", "**.seq_no"); + final Response response = client().performRequest(request); final ObjectPath path = ObjectPath.createFromResponse(response); // int looks funny here since global checkpoints are longs but the response parser does not know enough to treat them as long final int shard0GlobalCheckpoint = path.evaluate("indices.test-index.shards.0.0.seq_no.global_checkpoint"); diff --git a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java index 43ad4dc0a45..fb9c665b2bf 100644 --- a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java +++ b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java @@ -173,7 +173,7 @@ public class RollupIT extends ESRestTestCase { " \"date_histo\": {\n" + " \"date_histogram\": {\n" + " \"field\": \"timestamp\",\n" + - " \"interval\": \"1h\",\n" + + " \"interval\": \"60m\",\n" + " \"format\": \"date_time\"\n" + " },\n" + " \"aggs\": {\n" + diff --git a/x-pack/qa/openldap-tests/build.gradle b/x-pack/qa/openldap-tests/build.gradle index 24cd6184afa..bb9a9799289 100644 --- a/x-pack/qa/openldap-tests/build.gradle +++ b/x-pack/qa/openldap-tests/build.gradle @@ -5,7 +5,8 @@ apply plugin: 'elasticsearch.standalone-test' apply plugin: 'elasticsearch.vagrantsupport' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } diff --git a/x-pack/qa/reindex-tests-with-security/build.gradle b/x-pack/qa/reindex-tests-with-security/build.gradle index 097d343b279..97c0e8e17fe 100644 --- a/x-pack/qa/reindex-tests-with-security/build.gradle +++ b/x-pack/qa/reindex-tests-with-security/build.gradle @@ -2,7 +2,8 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: ':modules:reindex') diff --git a/x-pack/qa/rolling-upgrade-basic/build.gradle b/x-pack/qa/rolling-upgrade-basic/build.gradle index 21ac4414d86..5774e5d7856 100644 --- a/x-pack/qa/rolling-upgrade-basic/build.gradle +++ b/x-pack/qa/rolling-upgrade-basic/build.gradle @@ -7,7 +7,8 @@ import java.nio.charset.StandardCharsets apply plugin: 'elasticsearch.standalone-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // to be moved in a later commit } diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index b983caa8669..90da6cf4e58 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -10,7 +10,8 @@ apply plugin: 'elasticsearch.build' test.enabled = false dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'runtime') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // to be moved in a later commit } @@ -157,6 +158,7 @@ subprojects { } else { String systemKeyFile = version.before('6.3.0') ? 'x-pack/system_key' : 'system_key' extraConfigFile systemKeyFile, "${mainProject.projectDir}/src/test/resources/system_key" + keystoreSetting 'xpack.security.authc.token.passphrase', 'token passphrase' } setting 'xpack.watcher.encrypt_sensitive_data', 'true' } @@ -198,6 +200,9 @@ subprojects { setting 'xpack.watcher.encrypt_sensitive_data', 'true' keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" } + if (version.before('6.0.0')) { + keystoreSetting 'xpack.security.authc.token.passphrase', 'token passphrase' + } } } @@ -284,7 +289,8 @@ subprojects { check.dependsOn(integTest) dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('watcher')) } diff --git a/x-pack/qa/saml-idp-tests/build.gradle b/x-pack/qa/saml-idp-tests/build.gradle index 752ec6fb307..11e89d93c8e 100644 --- a/x-pack/qa/saml-idp-tests/build.gradle +++ b/x-pack/qa/saml-idp-tests/build.gradle @@ -6,7 +6,8 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') testCompile 'com.google.jimfs:jimfs:1.1' @@ -36,17 +37,30 @@ integTestCluster { setting 'xpack.security.authc.token.enabled', 'true' setting 'xpack.security.authc.realms.file.type', 'file' setting 'xpack.security.authc.realms.file.order', '0' + // SAML realm 1 (no authorization_realms) setting 'xpack.security.authc.realms.shibboleth.type', 'saml' setting 'xpack.security.authc.realms.shibboleth.order', '1' setting 'xpack.security.authc.realms.shibboleth.idp.entity_id', 'https://test.shibboleth.elastic.local/' setting 'xpack.security.authc.realms.shibboleth.idp.metadata.path', 'idp-metadata.xml' - setting 'xpack.security.authc.realms.shibboleth.sp.entity_id', 'http://mock.http.elastic.local/' + setting 'xpack.security.authc.realms.shibboleth.sp.entity_id', 'http://mock1.http.elastic.local/' // The port in the ACS URL is fake - the test will bind the mock webserver // to a random port and then whenever it needs to connect to a URL on the // mock webserver it will replace 54321 with the real port - setting 'xpack.security.authc.realms.shibboleth.sp.acs', 'http://localhost:54321/saml/acs' + setting 'xpack.security.authc.realms.shibboleth.sp.acs', 'http://localhost:54321/saml/acs1' setting 'xpack.security.authc.realms.shibboleth.attributes.principal', 'uid' setting 'xpack.security.authc.realms.shibboleth.attributes.name', 'urn:oid:2.5.4.3' + // SAML realm 2 (uses authorization_realms) + setting 'xpack.security.authc.realms.shibboleth_native.type', 'saml' + setting 'xpack.security.authc.realms.shibboleth_native.order', '2' + setting 'xpack.security.authc.realms.shibboleth_native.idp.entity_id', 'https://test.shibboleth.elastic.local/' + setting 'xpack.security.authc.realms.shibboleth_native.idp.metadata.path', 'idp-metadata.xml' + setting 'xpack.security.authc.realms.shibboleth_native.sp.entity_id', 'http://mock2.http.elastic.local/' + setting 'xpack.security.authc.realms.shibboleth_native.sp.acs', 'http://localhost:54321/saml/acs2' + setting 'xpack.security.authc.realms.shibboleth_native.attributes.principal', 'uid' + setting 'xpack.security.authc.realms.shibboleth_native.authorization_realms', 'native' + setting 'xpack.security.authc.realms.native.type', 'native' + setting 'xpack.security.authc.realms.native.order', '3' + setting 'xpack.ml.enabled', 'false' extraConfigFile 'idp-metadata.xml', idpFixtureProject.file("src/main/resources/provision/generated/idp-metadata.xml") diff --git a/x-pack/qa/saml-idp-tests/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java b/x-pack/qa/saml-idp-tests/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java index bf4ad79c59d..b3fc7dd0c2f 100644 --- a/x-pack/qa/saml-idp-tests/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java +++ b/x-pack/qa/saml-idp-tests/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java @@ -24,8 +24,6 @@ import org.apache.http.client.utils.URLEncodedUtils; import org.apache.http.cookie.Cookie; import org.apache.http.cookie.CookieOrigin; import org.apache.http.cookie.MalformedCookieException; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; import org.apache.http.impl.cookie.DefaultCookieSpec; @@ -39,9 +37,13 @@ import org.apache.http.util.CharArrayBuffer; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cli.SuppressForbidden; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.SecureString; @@ -49,6 +51,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.mocksocket.MockHttpServer; import org.elasticsearch.test.rest.ESRestTestCase; @@ -65,7 +68,6 @@ import javax.net.ssl.KeyManager; import javax.net.ssl.SSLContext; import javax.net.ssl.TrustManager; import javax.net.ssl.X509ExtendedTrustManager; - import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; @@ -85,7 +87,6 @@ import java.util.concurrent.ExecutorService; import java.util.regex.Matcher; import java.util.regex.Pattern; -import static java.util.Collections.emptyMap; import static org.elasticsearch.common.xcontent.XContentHelper.convertToMap; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.contains; @@ -103,13 +104,16 @@ import static org.hamcrest.Matchers.startsWith; public class SamlAuthenticationIT extends ESRestTestCase { private static final String SP_LOGIN_PATH = "/saml/login"; - private static final String SP_ACS_PATH = "/saml/acs"; + private static final String SP_ACS_PATH_1 = "/saml/acs1"; + private static final String SP_ACS_PATH_2 = "/saml/acs2"; private static final String SAML_RESPONSE_FIELD = "SAMLResponse"; private static final String REQUEST_ID_COOKIE = "saml-request-id"; private static final String KIBANA_PASSWORD = "K1b@na K1b@na K1b@na"; private static HttpServer httpServer; + private URI acs; + @BeforeClass public static void setupHttpServer() throws IOException { InetSocketAddress address = new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0); @@ -134,7 +138,8 @@ public class SamlAuthenticationIT extends ESRestTestCase { @Before public void setupHttpContext() { httpServer.createContext(SP_LOGIN_PATH, wrapFailures(this::httpLogin)); - httpServer.createContext(SP_ACS_PATH, wrapFailures(this::httpAcs)); + httpServer.createContext(SP_ACS_PATH_1, wrapFailures(this::httpAcs)); + httpServer.createContext(SP_ACS_PATH_2, wrapFailures(this::httpAcs)); } /** @@ -158,7 +163,8 @@ public class SamlAuthenticationIT extends ESRestTestCase { @After public void clearHttpContext() { httpServer.removeContext(SP_LOGIN_PATH); - httpServer.removeContext(SP_ACS_PATH); + httpServer.removeContext(SP_ACS_PATH_1); + httpServer.removeContext(SP_ACS_PATH_2); } @Override @@ -176,9 +182,9 @@ public class SamlAuthenticationIT extends ESRestTestCase { */ @Before public void setKibanaPassword() throws IOException { - final HttpEntity json = new StringEntity("{ \"password\" : \"" + KIBANA_PASSWORD + "\" }", ContentType.APPLICATION_JSON); - final Response response = adminClient().performRequest("PUT", "/_xpack/security/user/kibana/_password", emptyMap(), json); - assertOK(response); + Request request = new Request("PUT", "/_xpack/security/user/kibana/_password"); + request.setJsonEntity("{ \"password\" : \"" + KIBANA_PASSWORD + "\" }"); + adminClient().performRequest(request); } /** @@ -188,20 +194,33 @@ public class SamlAuthenticationIT extends ESRestTestCase { */ @Before public void setupRoleMapping() throws IOException { - final StringEntity json = new StringEntity(Strings // top-level - .toString(XContentBuilder.builder(XContentType.JSON.xContent()) - .startObject() - .array("roles", new String[] { "kibana_user"} ) - .field("enabled", true) - .startObject("rules") + Request request = new Request("PUT", "/_xpack/security/role_mapping/thor-kibana"); + request.setJsonEntity(Strings.toString(XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .array("roles", new String[] { "kibana_user"} ) + .field("enabled", true) + .startObject("rules") .startArray("all") - .startObject().startObject("field").field("username", "thor").endObject().endObject() - .startObject().startObject("field").field("realm.name", "shibboleth").endObject().endObject() + .startObject().startObject("field").field("username", "thor").endObject().endObject() + .startObject().startObject("field").field("realm.name", "shibboleth").endObject().endObject() .endArray() // "all" - .endObject() // "rules" - .endObject()), ContentType.APPLICATION_JSON); + .endObject() // "rules" + .endObject())); + adminClient().performRequest(request); + } - final Response response = adminClient().performRequest("PUT", "/_xpack/security/role_mapping/thor-kibana", emptyMap(), json); + /** + * Create a native user for "thor" that is used for user-lookup (authorizing realms) + */ + @Before + public void setupNativeUser() throws IOException { + final Map body = MapBuilder.newMapBuilder() + .put("roles", Collections.singletonList("kibana_dashboard_only_user")) + .put("full_name", "Thor Son of Odin") + .put("password", randomAlphaOfLengthBetween(8, 16)) + .put("metadata", Collections.singletonMap("is_native", true)) + .map(); + final Response response = adminClient().performRequest(buildRequest("PUT", "/_xpack/security/user/thor", body)); assertOK(response); } @@ -221,7 +240,24 @@ public class SamlAuthenticationIT extends ESRestTestCase { *
  • Uses that token to verify the user details
  • * */ - public void testLoginUser() throws Exception { + public void testLoginUserWithSamlRoleMapping() throws Exception { + // this ACS comes from the config in build.gradle + final Tuple authTokens = loginViaSaml("http://localhost:54321" + SP_ACS_PATH_1); + verifyElasticsearchAccessTokenForRoleMapping(authTokens.v1()); + final String accessToken = verifyElasticsearchRefreshToken(authTokens.v2()); + verifyElasticsearchAccessTokenForRoleMapping(accessToken); + } + + public void testLoginUserWithAuthorizingRealm() throws Exception { + // this ACS comes from the config in build.gradle + final Tuple authTokens = loginViaSaml("http://localhost:54321" + SP_ACS_PATH_2); + verifyElasticsearchAccessTokenForAuthorizingRealms(authTokens.v1()); + final String accessToken = verifyElasticsearchRefreshToken(authTokens.v2()); + verifyElasticsearchAccessTokenForAuthorizingRealms(accessToken); + } + + private Tuple loginViaSaml(String acs) throws Exception { + this.acs = new URI(acs); final BasicHttpContext context = new BasicHttpContext(); try (CloseableHttpClient client = getHttpClient()) { final URI loginUri = goToLoginPage(client, context); @@ -237,24 +273,21 @@ public class SamlAuthenticationIT extends ESRestTestCase { final Object accessToken = result.get("access_token"); assertThat(accessToken, notNullValue()); assertThat(accessToken, instanceOf(String.class)); - verifyElasticsearchAccessToken((String) accessToken); final Object refreshToken = result.get("refresh_token"); assertThat(refreshToken, notNullValue()); assertThat(refreshToken, instanceOf(String.class)); - verifyElasticsearchRefreshToken((String) refreshToken); + + return new Tuple<>((String) accessToken, (String) refreshToken); } } /** * Verifies that the provided "Access Token" (see {@link org.elasticsearch.xpack.security.authc.TokenService}) - * is for the expected user with the expected name and roles. + * is for the expected user with the expected name and roles if the user was created from Role-Mapping */ - private void verifyElasticsearchAccessToken(String accessToken) throws IOException { - final BasicHeader authorization = new BasicHeader("Authorization", "Bearer " + accessToken); - final Response response = client().performRequest("GET", "/_xpack/security/_authenticate", authorization); - assertOK(response); - final Map map = parseResponseAsMap(response.getEntity()); + private void verifyElasticsearchAccessTokenForRoleMapping(String accessToken) throws IOException { + final Map map = callAuthenticateApiUsingAccessToken(accessToken); assertThat(map.get("username"), equalTo("thor")); assertThat(map.get("full_name"), equalTo("Thor Odinson")); assertSingletonList(map.get("roles"), "kibana_user"); @@ -268,16 +301,37 @@ public class SamlAuthenticationIT extends ESRestTestCase { } /** - * Verifies that the provided "Refresh Token" (see {@link org.elasticsearch.xpack.security.authc.TokenService}) - * can be used to get a new valid access token and refresh token. + * Verifies that the provided "Access Token" (see {@link org.elasticsearch.xpack.security.authc.TokenService}) + * is for the expected user with the expected name and roles if the user was retrieved from the native realm */ - private void verifyElasticsearchRefreshToken(String refreshToken) throws IOException { - final String body = "{ \"grant_type\":\"refresh_token\", \"refresh_token\":\"" + refreshToken + "\" }"; - final Response response = client().performRequest("POST", "/_xpack/security/oauth2/token", - emptyMap(), new StringEntity(body, ContentType.APPLICATION_JSON), kibanaAuth()); + private void verifyElasticsearchAccessTokenForAuthorizingRealms(String accessToken) throws IOException { + final Map map = callAuthenticateApiUsingAccessToken(accessToken); + assertThat(map.get("username"), equalTo("thor")); + assertThat(map.get("full_name"), equalTo("Thor Son of Odin")); + assertSingletonList(map.get("roles"), "kibana_dashboard_only_user"); + + assertThat(map.get("metadata"), instanceOf(Map.class)); + final Map metadata = (Map) map.get("metadata"); + assertThat(metadata.get("is_native"), equalTo(true)); + } + + private Map callAuthenticateApiUsingAccessToken(String accessToken) throws IOException { + Request request = new Request("GET", "/_xpack/security/_authenticate"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("Authorization", "Bearer " + accessToken); + request.setOptions(options); + return entityAsMap(client().performRequest(request)); + } + + private String verifyElasticsearchRefreshToken(String refreshToken) throws IOException { + final Map body = MapBuilder.newMapBuilder() + .put("grant_type", "refresh_token") + .put("refresh_token", refreshToken) + .map(); + final Response response = client().performRequest(buildRequest("POST", "/_xpack/security/oauth2/token", body, kibanaAuth())); assertOK(response); - final Map result = parseResponseAsMap(response.getEntity()); + final Map result = entityAsMap(response); final Object newRefreshToken = result.get("refresh_token"); assertThat(newRefreshToken, notNullValue()); assertThat(newRefreshToken, instanceOf(String.class)); @@ -285,7 +339,7 @@ public class SamlAuthenticationIT extends ESRestTestCase { final Object accessToken = result.get("access_token"); assertThat(accessToken, notNullValue()); assertThat(accessToken, instanceOf(String.class)); - verifyElasticsearchAccessToken((String) accessToken); + return (String) accessToken; } /** @@ -351,7 +405,7 @@ public class SamlAuthenticationIT extends ESRestTestCase { form.setEntity(new UrlEncodedFormEntity(params)); return execute(client, form, context, - response -> parseSamlSubmissionForm(response.getEntity().getContent())); + response -> parseSamlSubmissionForm(response.getEntity().getContent())); } /** @@ -361,14 +415,14 @@ public class SamlAuthenticationIT extends ESRestTestCase { * @param saml The (deflated + base64 encoded) {@code SAMLResponse} parameter to post the ACS */ private Map submitSamlResponse(BasicHttpContext context, CloseableHttpClient client, URI acs, String saml) - throws IOException { + throws IOException { assertThat("SAML submission target", acs, notNullValue()); - assertThat(acs.getPath(), equalTo(SP_ACS_PATH)); + assertThat(acs, equalTo(this.acs)); assertThat("SAML submission content", saml, notNullValue()); // The ACS url provided from the SP is going to be wrong because the gradle // build doesn't know what the web server's port is, so it uses a fake one. - final HttpPost form = new HttpPost(getUrl(SP_ACS_PATH)); + final HttpPost form = new HttpPost(getUrl(this.acs.getPath())); List params = new ArrayList<>(); params.add(new BasicNameValuePair(SAML_RESPONSE_FIELD, saml)); form.setEntity(new UrlEncodedFormEntity(params)); @@ -463,13 +517,14 @@ public class SamlAuthenticationIT extends ESRestTestCase { * sends a redirect to that page. */ private void httpLogin(HttpExchange http) throws IOException { - final Response prepare = client().performRequest("POST", "/_xpack/security/saml/prepare", - emptyMap(), new StringEntity("{}", ContentType.APPLICATION_JSON), kibanaAuth()); + final Map body = Collections.singletonMap("acs", this.acs.toString()); + Request request = buildRequest("POST", "/_xpack/security/saml/prepare", body, kibanaAuth()); + final Response prepare = client().performRequest(request); assertOK(prepare); - final Map body = parseResponseAsMap(prepare.getEntity()); - logger.info("Created SAML authentication request {}", body); - http.getResponseHeaders().add("Set-Cookie", REQUEST_ID_COOKIE + "=" + body.get("id")); - http.getResponseHeaders().add("Location", (String) body.get("redirect")); + final Map responseBody = parseResponseAsMap(prepare.getEntity()); + logger.info("Created SAML authentication request {}", responseBody); + http.getResponseHeaders().add("Set-Cookie", REQUEST_ID_COOKIE + "=" + responseBody.get("id")); + http.getResponseHeaders().add("Location", (String) responseBody.get("redirect")); http.sendResponseHeaders(302, 0); http.close(); } @@ -504,9 +559,11 @@ public class SamlAuthenticationIT extends ESRestTestCase { final String id = getCookie(REQUEST_ID_COOKIE, http); assertThat(id, notNullValue()); - final String body = "{ \"content\" : \"" + saml + "\", \"ids\": [\"" + id + "\"] }"; - return client().performRequest("POST", "/_xpack/security/saml/authenticate", - emptyMap(), new StringEntity(body, ContentType.APPLICATION_JSON), kibanaAuth()); + final Map body = MapBuilder.newMapBuilder() + .put("content", saml) + .put("ids", Collections.singletonList(id)) + .map(); + return client().performRequest(buildRequest("POST", "/_xpack/security/saml/authenticate", body, kibanaAuth())); } private List parseRequestForm(HttpExchange http) throws IOException { @@ -520,6 +577,7 @@ public class SamlAuthenticationIT extends ESRestTestCase { try { final String cookies = http.getRequestHeaders().getFirst("Cookie"); if (cookies == null) { + logger.warn("No cookies in: {}", http.getResponseHeaders()); return null; } Header header = new BasicHeader("Cookie", cookies); @@ -542,6 +600,20 @@ public class SamlAuthenticationIT extends ESRestTestCase { assertThat(((List) value), contains(expectedElement)); } + private Request buildRequest(String method, String endpoint, Map body, Header... headers) throws IOException { + Request request = new Request(method, endpoint); + XContentBuilder builder = XContentFactory.jsonBuilder().map(body); + if (body != null) { + request.setJsonEntity(BytesReference.bytes(builder).utf8ToString()); + } + final RequestOptions.Builder options = request.getOptions().toBuilder(); + for (Header header : headers) { + options.addHeader(header.getName(), header.getValue()); + } + request.setOptions(options); + return request; + } + private static BasicHeader kibanaAuth() { final String auth = UsernamePasswordToken.basicAuthHeaderValue("kibana", new SecureString(KIBANA_PASSWORD.toCharArray())); return new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, auth); diff --git a/x-pack/qa/security-client-tests/build.gradle b/x-pack/qa/security-client-tests/build.gradle index 97945fb00ef..e676e55a152 100644 --- a/x-pack/qa/security-client-tests/build.gradle +++ b/x-pack/qa/security-client-tests/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') } diff --git a/x-pack/qa/security-example-spi-extension/build.gradle b/x-pack/qa/security-example-spi-extension/build.gradle index 7aeed3ad62d..aef4fc33f6a 100644 --- a/x-pack/qa/security-example-spi-extension/build.gradle +++ b/x-pack/qa/security-example-spi-extension/build.gradle @@ -8,7 +8,7 @@ esplugin { } dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + compileOnly "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') } diff --git a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRealm.java b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRealm.java index c6502c05d25..dfd4a81ea21 100644 --- a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRealm.java +++ b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRealm.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.common.CharArrays; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; public class CustomRealm extends Realm { diff --git a/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmTests.java b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmTests.java index e206de6e392..d1435ebaa3c 100644 --- a/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmTests.java +++ b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; diff --git a/x-pack/qa/security-migrate-tests/build.gradle b/x-pack/qa/security-migrate-tests/build.gradle index 3a8a0cf1005..abc3564ca13 100644 --- a/x-pack/qa/security-migrate-tests/build.gradle +++ b/x-pack/qa/security-migrate-tests/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('security'), configuration: 'runtime') testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') } diff --git a/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolIT.java b/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolIT.java index e810e638f68..4ac927c6646 100644 --- a/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolIT.java +++ b/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolIT.java @@ -25,7 +25,7 @@ import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; import org.elasticsearch.xpack.core.security.client.SecurityClient; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool; import org.junit.Before; diff --git a/x-pack/qa/security-setup-password-tests/build.gradle b/x-pack/qa/security-setup-password-tests/build.gradle index adb159acf6f..c0801a38b57 100644 --- a/x-pack/qa/security-setup-password-tests/build.gradle +++ b/x-pack/qa/security-setup-password-tests/build.gradle @@ -2,7 +2,8 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'runtime') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } diff --git a/x-pack/qa/security-setup-password-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolIT.java b/x-pack/qa/security-setup-password-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolIT.java index 74f1223f4a6..860c30c0ddd 100644 --- a/x-pack/qa/security-setup-password-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolIT.java +++ b/x-pack/qa/security-setup-password-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolIT.java @@ -5,8 +5,9 @@ */ package org.elasticsearch.xpack.security.authc.esnative.tool; -import org.apache.http.message.BasicHeader; import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; @@ -52,7 +53,7 @@ public class SetupPasswordToolIT extends ESRestTestCase { final Path configPath = PathUtils.get(testConfigDir); setSystemPropsForTool(configPath); - Response nodesResponse = client().performRequest("GET", "/_nodes/http"); + Response nodesResponse = client().performRequest(new Request("GET", "/_nodes/http")); Map nodesMap = entityAsMap(nodesResponse); Map nodes = (Map) nodesMap.get("nodes"); @@ -97,15 +98,16 @@ public class SetupPasswordToolIT extends ESRestTestCase { } }); - assertEquals(4, userPasswordMap.size()); + assertEquals(5, userPasswordMap.size()); userPasswordMap.entrySet().forEach(entry -> { final String basicHeader = "Basic " + Base64.getEncoder().encodeToString((entry.getKey() + ":" + entry.getValue()).getBytes(StandardCharsets.UTF_8)); try { - Response authenticateResponse = client().performRequest("GET", "/_xpack/security/_authenticate", - new BasicHeader("Authorization", basicHeader)); - assertEquals(200, authenticateResponse.getStatusLine().getStatusCode()); - Map userInfoMap = entityAsMap(authenticateResponse); + Request request = new Request("GET", "/_xpack/security/_authenticate"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("Authorization", basicHeader); + request.setOptions(options); + Map userInfoMap = entityAsMap(client().performRequest(request)); assertEquals(entry.getKey(), userInfoMap.get("username")); } catch (IOException e) { throw new UncheckedIOException(e); diff --git a/x-pack/qa/smoke-test-graph-with-security/build.gradle b/x-pack/qa/smoke-test-graph-with-security/build.gradle index 9cdfaffccfb..f0f819b46d4 100644 --- a/x-pack/qa/smoke-test-graph-with-security/build.gradle +++ b/x-pack/qa/smoke-test-graph-with-security/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } // bring in graph rest test suite diff --git a/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle b/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle index 8ce0cde7657..7813ff3d3d5 100644 --- a/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle +++ b/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('watcher')) testCompile project(path: xpackModule('monitoring')) } diff --git a/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java b/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java index d89d558f02f..d3b9a974398 100644 --- a/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java +++ b/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java @@ -5,12 +5,10 @@ */ package org.elasticsearch.smoketest; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.lucene.util.LuceneTestCase.AwaitsFix; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.rest.ESRestTestCase; @@ -23,7 +21,6 @@ import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; import org.junit.After; import java.io.IOException; -import java.util.Collections; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; @@ -36,25 +33,25 @@ public class MonitoringWithWatcherRestIT extends ESRestTestCase { @After public void cleanExporters() throws Exception { - String body = Strings.toString(jsonBuilder().startObject().startObject("transient") - .nullField("xpack.monitoring.exporters.*") - .endObject().endObject()); - assertOK(adminClient().performRequest("PUT", "_cluster/settings", Collections.emptyMap(), - new StringEntity(body, ContentType.APPLICATION_JSON))); - - assertOK(adminClient().performRequest("DELETE", ".watch*", Collections.emptyMap())); + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity(Strings.toString(jsonBuilder().startObject() + .startObject("transient") + .nullField("xpack.monitoring.exporters.*") + .endObject().endObject())); + adminClient().performRequest(request); + adminClient().performRequest(new Request("DELETE", "/.watch*")); } public void testThatLocalExporterAddsWatches() throws Exception { String watchId = createMonitoringWatch(); - String body = BytesReference.bytes(jsonBuilder().startObject().startObject("transient") - .field("xpack.monitoring.exporters.my_local_exporter.type", "local") - .field("xpack.monitoring.exporters.my_local_exporter.cluster_alerts.management.enabled", true) - .endObject().endObject()).utf8ToString(); - - adminClient().performRequest("PUT", "_cluster/settings", Collections.emptyMap(), - new StringEntity(body, ContentType.APPLICATION_JSON)); + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity(Strings.toString(jsonBuilder().startObject() + .startObject("transient") + .field("xpack.monitoring.exporters.my_local_exporter.type", "local") + .field("xpack.monitoring.exporters.my_local_exporter.cluster_alerts.management.enabled", true) + .endObject().endObject())); + adminClient().performRequest(request); assertTotalWatchCount(ClusterAlertsUtil.WATCH_IDS.length); @@ -65,14 +62,14 @@ public class MonitoringWithWatcherRestIT extends ESRestTestCase { String watchId = createMonitoringWatch(); String httpHost = getHttpHost(); - String body = BytesReference.bytes(jsonBuilder().startObject().startObject("transient") - .field("xpack.monitoring.exporters.my_http_exporter.type", "http") - .field("xpack.monitoring.exporters.my_http_exporter.host", httpHost) - .field("xpack.monitoring.exporters.my_http_exporter.cluster_alerts.management.enabled", true) - .endObject().endObject()).utf8ToString(); - - adminClient().performRequest("PUT", "_cluster/settings", Collections.emptyMap(), - new StringEntity(body, ContentType.APPLICATION_JSON)); + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity(Strings.toString(jsonBuilder().startObject() + .startObject("transient") + .field("xpack.monitoring.exporters.my_http_exporter.type", "http") + .field("xpack.monitoring.exporters.my_http_exporter.host", httpHost) + .field("xpack.monitoring.exporters.my_http_exporter.cluster_alerts.management.enabled", true) + .endObject().endObject())); + adminClient().performRequest(request); assertTotalWatchCount(ClusterAlertsUtil.WATCH_IDS.length); @@ -80,15 +77,15 @@ public class MonitoringWithWatcherRestIT extends ESRestTestCase { } private void assertMonitoringWatchHasBeenOverWritten(String watchId) throws Exception { - ObjectPath path = ObjectPath.createFromResponse(client().performRequest("GET", "_xpack/watcher/watch/" + watchId)); + ObjectPath path = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "/_xpack/watcher/watch/" + watchId))); String interval = path.evaluate("watch.trigger.schedule.interval"); assertThat(interval, is("1m")); } private void assertTotalWatchCount(int expectedWatches) throws Exception { assertBusy(() -> { - assertOK(client().performRequest("POST", ".watches/_refresh")); - ObjectPath path = ObjectPath.createFromResponse(client().performRequest("POST", ".watches/_count")); + assertOK(client().performRequest(new Request("POST", "/.watches/_refresh"))); + ObjectPath path = ObjectPath.createFromResponse(client().performRequest(new Request("POST", "/.watches/_count"))); int count = path.evaluate("count"); assertThat(count, is(expectedWatches)); }); @@ -97,28 +94,28 @@ public class MonitoringWithWatcherRestIT extends ESRestTestCase { private String createMonitoringWatch() throws Exception { String clusterUUID = getClusterUUID(); String watchId = clusterUUID + "_kibana_version_mismatch"; - String sampleWatch = WatchSourceBuilders.watchBuilder() + Request request = new Request("PUT", "/_xpack/watcher/watch/" + watchId); + request.setJsonEntity(WatchSourceBuilders.watchBuilder() .trigger(TriggerBuilders.schedule(new IntervalSchedule(new IntervalSchedule.Interval(1000, MINUTES)))) .input(simpleInput()) .addAction("logme", ActionBuilders.loggingAction("foo")) - .buildAsBytes(XContentType.JSON).utf8ToString(); - client().performRequest("PUT", "_xpack/watcher/watch/" + watchId, Collections.emptyMap(), - new StringEntity(sampleWatch, ContentType.APPLICATION_JSON)); + .buildAsBytes(XContentType.JSON).utf8ToString()); + client().performRequest(request); return watchId; } private String getClusterUUID() throws Exception { - Response response = client().performRequest("GET", "_cluster/state/metadata", Collections.emptyMap()); + Response response = client().performRequest(new Request("GET", "/_cluster/state/metadata")); ObjectPath objectPath = ObjectPath.createFromResponse(response); String clusterUUID = objectPath.evaluate("metadata.cluster_uuid"); return clusterUUID; } public String getHttpHost() throws IOException { - ObjectPath path = ObjectPath.createFromResponse(client().performRequest("GET", "_cluster/state", Collections.emptyMap())); + ObjectPath path = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "/_cluster/state"))); String masterNodeId = path.evaluate("master_node"); - ObjectPath nodesPath = ObjectPath.createFromResponse(client().performRequest("GET", "_nodes", Collections.emptyMap())); + ObjectPath nodesPath = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "/_nodes"))); String httpHost = nodesPath.evaluate("nodes." + masterNodeId + ".http.publish_address"); return httpHost; } diff --git a/x-pack/qa/smoke-test-plugins-ssl/build.gradle b/x-pack/qa/smoke-test-plugins-ssl/build.gradle index 53533bd9b87..4f338d07fb5 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/build.gradle +++ b/x-pack/qa/smoke-test-plugins-ssl/build.gradle @@ -15,7 +15,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } String outputDir = "${buildDir}/generated-resources/${project.name}" @@ -138,4 +138,4 @@ processTestResources { inputs.properties(expansions) MavenFilteringHack.filter(it, expansions) } -} \ No newline at end of file +} diff --git a/x-pack/qa/smoke-test-plugins/build.gradle b/x-pack/qa/smoke-test-plugins/build.gradle index b66903af18b..3b7661eeeb0 100644 --- a/x-pack/qa/smoke-test-plugins/build.gradle +++ b/x-pack/qa/smoke-test-plugins/build.gradle @@ -4,7 +4,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } ext.pluginsCount = 0 diff --git a/x-pack/qa/smoke-test-security-with-mustache/build.gradle b/x-pack/qa/smoke-test-security-with-mustache/build.gradle index d921c5f5b66..48b525ba3da 100644 --- a/x-pack/qa/smoke-test-security-with-mustache/build.gradle +++ b/x-pack/qa/smoke-test-security-with-mustache/build.gradle @@ -2,7 +2,8 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: ':modules:lang-mustache', configuration: 'runtime') } diff --git a/x-pack/qa/smoke-test-watcher-with-security/build.gradle b/x-pack/qa/smoke-test-watcher-with-security/build.gradle index a843641be80..50e217b28b2 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/build.gradle +++ b/x-pack/qa/smoke-test-watcher-with-security/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } // bring in watcher rest test suite diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java index a989bb47611..0c4afff509e 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java @@ -7,9 +7,7 @@ package org.elasticsearch.smoketest; import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.elasticsearch.client.Response; +import org.elasticsearch.client.Request; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -49,9 +47,9 @@ public class SmokeTestWatcherWithSecurityClientYamlTestSuiteIT extends ESClientY emptyList(), emptyMap()); // create one document in this index, so we can test in the YAML tests, that the index cannot be accessed - Response resp = adminClient().performRequest("PUT", "/index_not_allowed_to_read/doc/1", Collections.emptyMap(), - new StringEntity("{\"foo\":\"bar\"}", ContentType.APPLICATION_JSON)); - assertThat(resp.getStatusLine().getStatusCode(), is(201)); + Request request = new Request("PUT", "/index_not_allowed_to_read/doc/1"); + request.setJsonEntity("{\"foo\":\"bar\"}"); + adminClient().performRequest(request); assertBusy(() -> { ClientYamlTestResponse response = @@ -129,4 +127,3 @@ public class SmokeTestWatcherWithSecurityClientYamlTestSuiteIT extends ESClientY .build(); } } - diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java index 1c8204aa1ec..17fbf0769fd 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java @@ -5,9 +5,8 @@ */ package org.elasticsearch.smoketest; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; @@ -21,7 +20,6 @@ import org.junit.After; import org.junit.Before; import java.io.IOException; -import java.util.Collections; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; @@ -41,27 +39,28 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { @Before public void startWatcher() throws Exception { - StringEntity entity = new StringEntity("{ \"value\" : \"15\" }", ContentType.APPLICATION_JSON); - assertOK(adminClient().performRequest("PUT", "my_test_index/doc/1", Collections.singletonMap("refresh", "true"), entity)); + Request createAllowedDoc = new Request("PUT", "/my_test_index/doc/1"); + createAllowedDoc.setJsonEntity("{ \"value\" : \"15\" }"); + createAllowedDoc.addParameter("refresh", "true"); + adminClient().performRequest(createAllowedDoc); // delete the watcher history to not clutter with entries from other test - adminClient().performRequest("DELETE", ".watcher-history-*", Collections.emptyMap()); + adminClient().performRequest(new Request("DELETE", ".watcher-history-*")); // create one document in this index, so we can test in the YAML tests, that the index cannot be accessed - Response resp = adminClient().performRequest("PUT", "/index_not_allowed_to_read/doc/1", Collections.emptyMap(), - new StringEntity("{\"foo\":\"bar\"}", ContentType.APPLICATION_JSON)); - assertThat(resp.getStatusLine().getStatusCode(), is(201)); + Request createNotAllowedDoc = new Request("PUT", "/index_not_allowed_to_read/doc/1"); + createNotAllowedDoc.setJsonEntity("{\"foo\":\"bar\"}"); + adminClient().performRequest(createNotAllowedDoc); assertBusy(() -> { try { - Response statsResponse = adminClient().performRequest("GET", "_xpack/watcher/stats"); + Response statsResponse = adminClient().performRequest(new Request("GET", "/_xpack/watcher/stats")); ObjectPath objectPath = ObjectPath.createFromResponse(statsResponse); String state = objectPath.evaluate("stats.0.watcher_state"); switch (state) { case "stopped": - Response startResponse = adminClient().performRequest("POST", "_xpack/watcher/_start"); - assertOK(startResponse); + Response startResponse = adminClient().performRequest(new Request("POST", "/_xpack/watcher/_start")); String body = EntityUtils.toString(startResponse.getEntity()); assertThat(body, containsString("\"acknowledged\":true")); break; @@ -82,18 +81,18 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { assertBusy(() -> { for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES) { - assertOK(adminClient().performRequest("HEAD", "_template/" + template)); + assertOK(adminClient().performRequest(new Request("HEAD", "_template/" + template))); } }); } @After public void stopWatcher() throws Exception { - assertOK(adminClient().performRequest("DELETE", "my_test_index")); + adminClient().performRequest(new Request("DELETE", "/my_test_index")); assertBusy(() -> { try { - Response statsResponse = adminClient().performRequest("GET", "_xpack/watcher/stats"); + Response statsResponse = adminClient().performRequest(new Request("GET", "/_xpack/watcher/stats")); ObjectPath objectPath = ObjectPath.createFromResponse(statsResponse); String state = objectPath.evaluate("stats.0.watcher_state"); @@ -106,8 +105,7 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { case "starting": throw new AssertionError("waiting until starting state reached started state to stop"); case "started": - Response stopResponse = adminClient().performRequest("POST", "_xpack/watcher/_stop", Collections.emptyMap()); - assertOK(stopResponse); + Response stopResponse = adminClient().performRequest(new Request("POST", "/_xpack/watcher/_stop")); String body = EntityUtils.toString(stopResponse.getEntity()); assertThat(body, containsString("\"acknowledged\":true")); break; @@ -137,6 +135,7 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33320") public void testSearchInputHasPermissions() throws Exception { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -160,6 +159,7 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { assertThat(conditionMet, is(true)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29893") public void testSearchInputWithInsufficientPrivileges() throws Exception { String indexName = "index_not_allowed_to_read"; try (XContentBuilder builder = jsonBuilder()) { @@ -186,6 +186,7 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { assertThat(conditionMet, is(false)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33320") public void testSearchTransformHasPermissions() throws Exception { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -210,11 +211,12 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { boolean conditionMet = objectPath.evaluate("hits.hits.0._source.result.condition.met"); assertThat(conditionMet, is(true)); - ObjectPath getObjectPath = ObjectPath.createFromResponse(client().performRequest("GET", "my_test_index/doc/my-id")); + ObjectPath getObjectPath = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "/my_test_index/doc/my-id"))); String value = getObjectPath.evaluate("_source.hits.hits.0._source.value"); assertThat(value, is("15")); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33291") public void testSearchTransformInsufficientPermissions() throws Exception { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -238,11 +240,11 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { getWatchHistoryEntry(watchId); - Response response = adminClient().performRequest("GET", "my_test_index/doc/some-id", - Collections.singletonMap("ignore", "404")); + Response response = adminClient().performRequest(new Request("HEAD", "/my_test_index/doc/some-id")); assertThat(response.getStatusLine().getStatusCode(), is(404)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30777") public void testIndexActionHasPermissions() throws Exception { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -262,11 +264,12 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { boolean conditionMet = objectPath.evaluate("hits.hits.0._source.result.condition.met"); assertThat(conditionMet, is(true)); - ObjectPath getObjectPath = ObjectPath.createFromResponse(client().performRequest("GET", "my_test_index/doc/my-id")); + ObjectPath getObjectPath = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "/my_test_index/doc/my-id"))); String spam = getObjectPath.evaluate("_source.spam"); assertThat(spam, is("eggs")); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33320") public void testIndexActionInsufficientPrivileges() throws Exception { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -286,16 +289,14 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { boolean conditionMet = objectPath.evaluate("hits.hits.0._source.result.condition.met"); assertThat(conditionMet, is(true)); - Response response = adminClient().performRequest("GET", "index_not_allowed_to_read/doc/my-id", - Collections.singletonMap("ignore", "404")); + Response response = adminClient().performRequest(new Request("HEAD", "/index_not_allowed_to_read/doc/my-id")); assertThat(response.getStatusLine().getStatusCode(), is(404)); } private void indexWatch(String watchId, XContentBuilder builder) throws Exception { - StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); - - Response response = client().performRequest("PUT", "_xpack/watcher/watch/" + watchId, Collections.emptyMap(), entity); - assertOK(response); + Request request = new Request("PUT", "/_xpack/watcher/watch/" + watchId); + request.setJsonEntity(Strings.toString(builder)); + Response response = client().performRequest(request); Map responseMap = entityAsMap(response); assertThat(responseMap, hasEntry("_id", watchId)); } @@ -307,7 +308,7 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { private ObjectPath getWatchHistoryEntry(String watchId, String state) throws Exception { final AtomicReference objectPathReference = new AtomicReference<>(); assertBusy(() -> { - client().performRequest("POST", ".watcher-history-*/_refresh"); + client().performRequest(new Request("POST", "/.watcher-history-*/_refresh")); try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -323,8 +324,9 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { .endObject().endArray(); builder.endObject(); - StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); - Response response = client().performRequest("POST", ".watcher-history-*/_search", Collections.emptyMap(), entity); + Request searchRequest = new Request("POST", "/.watcher-history-*/_search"); + searchRequest.setJsonEntity(Strings.toString(builder)); + Response response = client().performRequest(searchRequest); ObjectPath objectPath = ObjectPath.createFromResponse(response); int totalHits = objectPath.evaluate("hits.total"); assertThat(totalHits, is(greaterThanOrEqualTo(1))); diff --git a/x-pack/qa/smoke-test-watcher/build.gradle b/x-pack/qa/smoke-test-watcher/build.gradle index dc87248df61..5923afcacad 100644 --- a/x-pack/qa/smoke-test-watcher/build.gradle +++ b/x-pack/qa/smoke-test-watcher/build.gradle @@ -7,7 +7,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('watcher'), configuration: 'runtime') testCompile project(path: ':modules:lang-mustache', configuration: 'runtime') testCompile project(path: ':modules:lang-painless', configuration: 'runtime') diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java index 86d97d01904..f7ecb6d58e5 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java +++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java @@ -5,8 +5,7 @@ */ package org.elasticsearch.smoketest; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; @@ -23,7 +22,6 @@ import java.io.IOException; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; -import static java.util.Collections.emptyMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -39,15 +37,15 @@ public class SmokeTestWatcherTestSuiteIT extends ESRestTestCase { @Before public void startWatcher() throws Exception { // delete the watcher history to not clutter with entries from other test - assertOK(adminClient().performRequest("DELETE", ".watcher-history-*")); + assertOK(adminClient().performRequest(new Request("DELETE", "/.watcher-history-*"))); assertBusy(() -> { - Response response = adminClient().performRequest("GET", "_xpack/watcher/stats"); + Response response = adminClient().performRequest(new Request("GET", "/_xpack/watcher/stats")); String state = ObjectPath.createFromResponse(response).evaluate("stats.0.watcher_state"); switch (state) { case "stopped": - Response startResponse = adminClient().performRequest("POST", "/_xpack/watcher/_start"); + Response startResponse = adminClient().performRequest(new Request("POST", "/_xpack/watcher/_start")); boolean isAcknowledged = ObjectPath.createFromResponse(startResponse).evaluate("acknowledged"); assertThat(isAcknowledged, is(true)); break; @@ -65,7 +63,7 @@ public class SmokeTestWatcherTestSuiteIT extends ESRestTestCase { assertBusy(() -> { for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES) { - Response templateExistsResponse = adminClient().performRequest("HEAD", "_template/" + template, emptyMap()); + Response templateExistsResponse = adminClient().performRequest(new Request("HEAD", "/_template/" + template)); assertThat(templateExistsResponse.getStatusLine().getStatusCode(), is(200)); } }); @@ -74,7 +72,7 @@ public class SmokeTestWatcherTestSuiteIT extends ESRestTestCase { @After public void stopWatcher() throws Exception { assertBusy(() -> { - Response response = adminClient().performRequest("GET", "_xpack/watcher/stats", emptyMap()); + Response response = adminClient().performRequest(new Request("GET", "/_xpack/watcher/stats")); String state = ObjectPath.createFromResponse(response).evaluate("stats.0.watcher_state"); switch (state) { @@ -86,7 +84,7 @@ public class SmokeTestWatcherTestSuiteIT extends ESRestTestCase { case "starting": throw new AssertionError("waiting until starting state reached started state to stop"); case "started": - Response stopResponse = adminClient().performRequest("POST", "/_xpack/watcher/_stop", emptyMap()); + Response stopResponse = adminClient().performRequest(new Request("POST", "/_xpack/watcher/_stop")); boolean isAcknowledged = ObjectPath.createFromResponse(stopResponse).evaluate("acknowledged"); assertThat(isAcknowledged, is(true)); break; @@ -108,16 +106,17 @@ public class SmokeTestWatcherTestSuiteIT extends ESRestTestCase { return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32299") public void testMonitorClusterHealth() throws Exception { String watchId = "cluster_health_watch"; // get master publish address - Response clusterStateResponse = adminClient().performRequest("GET", "_cluster/state"); + Response clusterStateResponse = adminClient().performRequest(new Request("GET", "/_cluster/state")); ObjectPath clusterState = ObjectPath.createFromResponse(clusterStateResponse); String masterNode = clusterState.evaluate("master_node"); assertThat(masterNode, is(notNullValue())); - Response statsResponse = adminClient().performRequest("GET", "_nodes"); + Response statsResponse = adminClient().performRequest(new Request("GET", "/_nodes")); ObjectPath stats = ObjectPath.createFromResponse(statsResponse); String address = stats.evaluate("nodes." + masterNode + ".http.publish_address"); assertThat(address, is(notNullValue())); @@ -163,16 +162,15 @@ public class SmokeTestWatcherTestSuiteIT extends ESRestTestCase { } private void indexWatch(String watchId, XContentBuilder builder) throws Exception { - StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); - - Response response = client().performRequest("PUT", "_xpack/watcher/watch/" + watchId, emptyMap(), entity); - assertOK(response); + Request request = new Request("PUT", "/_xpack/watcher/watch/" + watchId); + request.setJsonEntity(Strings.toString(builder)); + Response response = client().performRequest(request); Map responseMap = entityAsMap(response); assertThat(responseMap, hasEntry("_id", watchId)); } private void deleteWatch(String watchId) throws IOException { - Response response = client().performRequest("DELETE", "_xpack/watcher/watch/" + watchId); + Response response = client().performRequest(new Request("DELETE", "/_xpack/watcher/watch/" + watchId)); assertOK(response); ObjectPath path = ObjectPath.createFromResponse(response); boolean found = path.evaluate("found"); @@ -182,7 +180,7 @@ public class SmokeTestWatcherTestSuiteIT extends ESRestTestCase { private ObjectPath getWatchHistoryEntry(String watchId) throws Exception { final AtomicReference objectPathReference = new AtomicReference<>(); assertBusy(() -> { - client().performRequest("POST", ".watcher-history-*/_refresh"); + client().performRequest(new Request("POST", "/.watcher-history-*/_refresh")); try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -194,8 +192,9 @@ public class SmokeTestWatcherTestSuiteIT extends ESRestTestCase { .endObject().endArray(); builder.endObject(); - StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); - Response response = client().performRequest("POST", ".watcher-history-*/_search", emptyMap(), entity); + Request searchRequest = new Request("POST", "/.watcher-history-*/_search"); + searchRequest.setJsonEntity(Strings.toString(builder)); + Response response = client().performRequest(searchRequest); ObjectPath objectPath = ObjectPath.createFromResponse(response); int totalHits = objectPath.evaluate("hits.total"); assertThat(totalHits, is(greaterThanOrEqualTo(1))); @@ -208,7 +207,7 @@ public class SmokeTestWatcherTestSuiteIT extends ESRestTestCase { } private void assertWatchCount(int expectedWatches) throws IOException { - Response watcherStatsResponse = adminClient().performRequest("GET", "_xpack/watcher/stats"); + Response watcherStatsResponse = adminClient().performRequest(new Request("GET", "/_xpack/watcher/stats")); ObjectPath objectPath = ObjectPath.createFromResponse(watcherStatsResponse); int watchCount = objectPath.evaluate("stats.0.watch_count"); assertThat(watchCount, is(expectedWatches)); diff --git a/x-pack/qa/sql/build.gradle b/x-pack/qa/sql/build.gradle index 17a1d5acdc9..baaf0451e51 100644 --- a/x-pack/qa/sql/build.gradle +++ b/x-pack/qa/sql/build.gradle @@ -1,4 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks import org.elasticsearch.gradle.test.RunTask description = 'Integration tests for SQL' @@ -29,8 +28,7 @@ dependenciesInfo.enabled = false // the main files are actually test files, so use the appropriate forbidden api sigs forbiddenApisMain { - signaturesURLs = [PrecommitTasks.getResource('/forbidden/es-all-signatures.txt'), - PrecommitTasks.getResource('/forbidden/es-test-signatures.txt')] + replaceSignatureFiles 'es-all-signatures', 'es-test-signatures' } thirdPartyAudit.excludes = [ diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDocCsvSpectIT.java b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDocCsvSpecIT.java similarity index 89% rename from x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDocCsvSpectIT.java rename to x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDocCsvSpecIT.java index 2c99c8b5383..017fc4b5238 100644 --- a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDocCsvSpectIT.java +++ b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDocCsvSpecIT.java @@ -11,7 +11,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.client.RestClient; import org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.CsvTestCase; import org.elasticsearch.xpack.qa.sql.jdbc.DataLoader; -import org.elasticsearch.xpack.qa.sql.jdbc.JdbcTestUtils; +import org.elasticsearch.xpack.qa.sql.jdbc.JdbcAssert; import org.elasticsearch.xpack.qa.sql.jdbc.SpecBaseIntegrationTestCase; import org.elasticsearch.xpack.qa.sql.jdbc.SqlSpecTestCase; @@ -36,7 +36,7 @@ import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.specParser; * That's not to say the two cannot be merged however that felt like too much of an effort * at this stage and, to not keep things stalling, started with this approach. */ -public class JdbcDocCsvSpectIT extends SpecBaseIntegrationTestCase { +public class JdbcDocCsvSpecIT extends SpecBaseIntegrationTestCase { private final CsvTestCase testCase; @@ -56,7 +56,7 @@ public class JdbcDocCsvSpectIT extends SpecBaseIntegrationTestCase { return readScriptSpec("/docs.csv-spec", parser); } - public JdbcDocCsvSpectIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + public JdbcDocCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { super(fileName, groupName, testName, lineNumber); this.testCase = testCase; } @@ -68,8 +68,8 @@ public class JdbcDocCsvSpectIT extends SpecBaseIntegrationTestCase { // // uncomment this to printout the result set and create new CSV tests // - JdbcTestUtils.logLikeCLI(elastic, log); - //JdbcAssert.assertResultSets(expected, elastic, log, true); + //JdbcTestUtils.logLikeCLI(elastic, log); + JdbcAssert.assertResultSets(expected, elastic, log, true); } @Override diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcResultSetIT.java b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcResultSetIT.java new file mode 100644 index 00000000000..30756a11f62 --- /dev/null +++ b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcResultSetIT.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.jdbc.ResultSetTestCase; + +/* + * Integration testing class for "no security" (cluster running without the Security plugin, + * or the Security is disbled) scenario. Runs all tests in the base class. + */ +public class JdbcResultSetIT extends ResultSetTestCase { +} diff --git a/x-pack/qa/sql/security/build.gradle b/x-pack/qa/sql/security/build.gradle index f02886f80a1..15f7734f942 100644 --- a/x-pack/qa/sql/security/build.gradle +++ b/x-pack/qa/sql/security/build.gradle @@ -1,5 +1,5 @@ dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } Project mainProject = project @@ -20,7 +20,7 @@ subprojects { } dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } integTestCluster { diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvTestUtils.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvTestUtils.java index abf84dc7311..856629f8d91 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvTestUtils.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvTestUtils.java @@ -113,18 +113,18 @@ public final class CsvTestUtils { } private static Tuple extractColumnTypesFromHeader(String header) { - String[] columnTypes = Strings.delimitedListToStringArray(header, "|", " \t"); + String[] columnTypes = Strings.tokenizeToStringArray(header, "|"); StringBuilder types = new StringBuilder(); StringBuilder columns = new StringBuilder(); for (String column : columnTypes) { - String[] nameType = Strings.delimitedListToStringArray(column, ":"); + String[] nameType = Strings.delimitedListToStringArray(column.trim(), ":"); assertThat("If at least one column has a type associated with it, all columns should have types", nameType, arrayWithSize(2)); if (types.length() > 0) { types.append(","); columns.append("|"); } - columns.append(nameType[0]); - types.append(resolveColumnType(nameType[1])); + columns.append(nameType[0].trim()); + types.append(resolveColumnType(nameType[1].trim())); } return new Tuple<>(columns.toString(), types.toString()); } @@ -160,17 +160,28 @@ public final class CsvTestUtils { } private static class CsvSpecParser implements SpecBaseIntegrationTestCase.Parser { + private final StringBuilder query = new StringBuilder(); private final StringBuilder data = new StringBuilder(); private CsvTestCase testCase; @Override public Object parse(String line) { - // beginning of the section + // read the query if (testCase == null) { - // pick up the query - testCase = new CsvTestCase(); - testCase.query = line.endsWith(";") ? line.substring(0, line.length() - 1) : line; + if (line.endsWith(";")) { + // pick up the query + testCase = new CsvTestCase(); + query.append(line.substring(0, line.length() - 1).trim()); + testCase.query = query.toString(); + query.setLength(0); + } + // keep reading the query + else { + query.append(line); + query.append("\r\n"); + } } + // read the results else { // read data if (line.startsWith(";")) { @@ -195,4 +206,4 @@ public final class CsvTestUtils { public String query; public String expectedResults; } -} \ No newline at end of file +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java index 05140577bcd..22ba2a1037d 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java @@ -42,14 +42,15 @@ public class DataLoader { } protected static void loadEmpDatasetIntoEs(RestClient client) throws Exception { - loadEmpDatasetIntoEs(client, "test_emp"); - loadEmpDatasetIntoEs(client, "test_emp_copy"); + loadEmpDatasetIntoEs(client, "test_emp", "employees"); + loadEmpDatasetIntoEs(client, "test_emp_copy", "employees"); + loadEmpDatasetIntoEs(client, "test_emp_with_nulls", "employees_with_nulls"); makeAlias(client, "test_alias", "test_emp", "test_emp_copy"); makeAlias(client, "test_alias_emp", "test_emp", "test_emp_copy"); } public static void loadDocsDatasetIntoEs(RestClient client) throws Exception { - loadEmpDatasetIntoEs(client, "emp"); + loadEmpDatasetIntoEs(client, "emp", "employees"); loadLibDatasetIntoEs(client, "library"); makeAlias(client, "employees", "emp"); } @@ -62,7 +63,7 @@ public class DataLoader { .endObject(); } - protected static void loadEmpDatasetIntoEs(RestClient client, String index) throws Exception { + protected static void loadEmpDatasetIntoEs(RestClient client, String index, String fileName) throws Exception { Request request = new Request("PUT", "/" + index); XContentBuilder createIndex = JsonXContent.contentBuilder().startObject(); createIndex.startObject("settings"); @@ -129,15 +130,18 @@ public class DataLoader { request = new Request("POST", "/" + index + "/emp/_bulk"); request.addParameter("refresh", "true"); StringBuilder bulk = new StringBuilder(); - csvToLines("employees", (titles, fields) -> { + csvToLines(fileName, (titles, fields) -> { bulk.append("{\"index\":{}}\n"); bulk.append('{'); String emp_no = fields.get(1); for (int f = 0; f < fields.size(); f++) { - if (f != 0) { - bulk.append(','); + // an empty value in the csv file is treated as 'null', thus skipping it in the bulk request + if (fields.get(f).trim().length() > 0) { + if (f != 0) { + bulk.append(','); + } + bulk.append('"').append(titles.get(f)).append("\":\"").append(fields.get(f)).append('"'); } - bulk.append('"').append(titles.get(f)).append("\":\"").append(fields.get(f)).append('"'); } // append department List> list = dep_emp.get(emp_no); diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcAssert.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcAssert.java index 47f531ebd1f..133006c66a8 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcAssert.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcAssert.java @@ -176,8 +176,8 @@ public class JdbcAssert { Object expectedObject = expected.getObject(column); Object actualObject = lenient ? actual.getObject(column, expectedColumnClass) : actual.getObject(column); - String msg = format(Locale.ROOT, "Different result for column [" + metaData.getColumnName(column) + "], " - + "entry [" + (count + 1) + "]"); + String msg = format(Locale.ROOT, "Different result for column [%s], entry [%d]", + metaData.getColumnName(column), count + 1); // handle nulls first if (expectedObject == null || actualObject == null) { @@ -230,4 +230,4 @@ public class JdbcAssert { return columnType; } -} \ No newline at end of file +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ResultSetTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ResultSetTestCase.java index 861a6dccaba..447fc4f17e1 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ResultSetTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ResultSetTestCase.java @@ -5,55 +5,1067 @@ */ package org.elasticsearch.xpack.qa.sql.jdbc; +import org.elasticsearch.client.Request; +import org.elasticsearch.common.CheckedBiFunction; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; +import org.elasticsearch.xpack.sql.jdbc.jdbcx.JdbcDataSource; +import org.elasticsearch.xpack.sql.type.DataType; + import java.io.IOException; +import java.io.InputStream; +import java.io.Reader; +import java.sql.Blob; +import java.sql.Clob; import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.JDBCType; +import java.sql.NClob; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; import java.sql.Timestamp; +import java.sql.Types; +import java.util.Arrays; +import java.util.Calendar; import java.util.Date; +import java.util.GregorianCalendar; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Locale; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Properties; +import java.util.Set; +import java.util.TimeZone; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.lang.String.format; +import static java.util.Calendar.DAY_OF_MONTH; +import static java.util.Calendar.ERA; +import static java.util.Calendar.HOUR_OF_DAY; +import static java.util.Calendar.MILLISECOND; +import static java.util.Calendar.MINUTE; +import static java.util.Calendar.MONTH; +import static java.util.Calendar.SECOND; +import static java.util.Calendar.YEAR; public class ResultSetTestCase extends JdbcIntegrationTestCase { - public void testGettingTimestamp() throws Exception { - long randomMillis = randomLongBetween(0, System.currentTimeMillis()); + + static final Set fieldsNames = Stream.of("test_byte", "test_integer", "test_long", "test_short", "test_double", + "test_float", "test_keyword") + .collect(Collectors.toCollection(HashSet::new)); + static final Map,JDBCType> dateTimeTestingFields = new HashMap,JDBCType>(); + static final String SELECT_ALL_FIELDS = "SELECT test_boolean, test_byte, test_integer," + + "test_long, test_short, test_double, test_float, test_keyword, test_date FROM test"; + static final String SELECT_WILDCARD = "SELECT * FROM test"; + static { + dateTimeTestingFields.put(new Tuple("test_boolean", true), DataType.BOOLEAN.jdbcType); + dateTimeTestingFields.put(new Tuple("test_byte", 1), DataType.BYTE.jdbcType); + dateTimeTestingFields.put(new Tuple("test_integer", 1), DataType.INTEGER.jdbcType); + dateTimeTestingFields.put(new Tuple("test_long", 1L), DataType.LONG.jdbcType); + dateTimeTestingFields.put(new Tuple("test_short", 1), DataType.SHORT.jdbcType); + dateTimeTestingFields.put(new Tuple("test_double", 1d), DataType.DOUBLE.jdbcType); + dateTimeTestingFields.put(new Tuple("test_float", 1f), DataType.FLOAT.jdbcType); + dateTimeTestingFields.put(new Tuple("test_keyword", "true"), DataType.KEYWORD.jdbcType); + } + + // Byte values testing + public void testGettingValidByteWithoutCasting() throws Exception { + byte random1 = randomByte(); + byte random2 = randomValueOtherThan(random1, () -> randomByte()); + byte random3 = randomValueOtherThanMany(Arrays.asList(random1, random2)::contains, () -> randomByte()); + + createTestDataForByteValueTests(random1, random2, random3); + + doWithQuery("SELECT test_byte, test_null_byte, test_keyword FROM test", (results) -> { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(Types.TINYINT, resultSetMetaData.getColumnType(1)); + assertEquals(Types.TINYINT, resultSetMetaData.getColumnType(2)); + assertEquals(random1, results.getByte(1)); + assertEquals(random1, results.getByte("test_byte")); + assertEquals(random1, (byte) results.getObject("test_byte", Byte.class)); + assertTrue(results.getObject(1) instanceof Byte); + + assertEquals(0, results.getByte(2)); + assertTrue(results.wasNull()); + assertEquals(null, results.getObject("test_null_byte")); + assertTrue(results.wasNull()); + + assertTrue(results.next()); + assertEquals(random2, results.getByte(1)); + assertEquals(random2, results.getByte("test_byte")); + assertTrue(results.getObject(1) instanceof Byte); + assertEquals(random3, results.getByte("test_keyword")); + + assertFalse(results.next()); + }); + } + + public void testGettingValidByteWithCasting() throws Exception { + Map map = createTestDataForNumericValueTypes(() -> randomByte()); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + for(Entry e : map.entrySet()) { + byte actual = results.getObject(e.getKey(), Byte.class); + if (e.getValue() instanceof Double) { + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), results.getByte(e.getKey())); + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), actual); + } else if (e.getValue() instanceof Float) { + assertEquals("For field " + e.getKey(), Math.round(e.getValue().floatValue()), results.getByte(e.getKey())); + assertEquals("For field " + e.getKey(), Math.round(e.getValue().floatValue()), actual); + } else { + assertEquals("For field " + e.getKey(), e.getValue().byteValue(), results.getByte(e.getKey())); + assertEquals("For field " + e.getKey(), e.getValue().byteValue(), actual); + } + } + }); + } + + public void testGettingInvalidByte() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_keyword").field("type", "keyword").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + + int intNotByte = randomIntBetween(Byte.MAX_VALUE + 1, Integer.MAX_VALUE); + long longNotByte = randomLongBetween(Byte.MAX_VALUE + 1, Long.MAX_VALUE); + short shortNotByte = (short) randomIntBetween(Byte.MAX_VALUE + 1, Short.MAX_VALUE); + double doubleNotByte = randomDoubleBetween(Byte.MAX_VALUE + 1, Double.MAX_VALUE, true); + float floatNotByte = randomFloatBetween(Byte.MAX_VALUE + 1, Float.MAX_VALUE); + String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); + long randomDate = randomLong(); + + String doubleErrorMessage = (doubleNotByte > Long.MAX_VALUE || doubleNotByte < Long.MIN_VALUE) ? + Double.toString(doubleNotByte) : Long.toString(Math.round(doubleNotByte)); + + index("test", "1", builder -> { + builder.field("test_integer", intNotByte); + builder.field("test_long", longNotByte); + builder.field("test_short", shortNotByte); + builder.field("test_double", doubleNotByte); + builder.field("test_float", floatNotByte); + builder.field("test_keyword", randomString); + builder.field("test_date", randomDate); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + + SQLException sqle = expectThrows(SQLException.class, () -> results.getByte("test_integer")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", intNotByte), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_integer", Byte.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", intNotByte), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getByte("test_short")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", shortNotByte), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_short", Byte.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", shortNotByte), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getByte("test_long")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Long.toString(longNotByte)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_long", Byte.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Long.toString(longNotByte)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getByte("test_double")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", doubleErrorMessage), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_double", Byte.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", doubleErrorMessage), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getByte("test_float")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotByte)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_float", Byte.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotByte)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getByte("test_keyword")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Byte", randomString), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_keyword", Byte.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Byte", randomString), + sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getByte("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Byte", randomDate), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Byte.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Byte", randomDate), + sqle.getMessage()); + }); + } + + // Short values testing + public void testGettingValidShortWithoutCasting() throws Exception { + short random1 = randomShort(); + short random2 = randomValueOtherThan(random1, () -> randomShort()); + short random3 = randomValueOtherThanMany(Arrays.asList(random1, random2)::contains, () -> randomShort()); + + createTestDataForShortValueTests(random1, random2, random3); + + doWithQuery("SELECT test_short, test_null_short, test_keyword FROM test", (results) -> { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(Types.SMALLINT, resultSetMetaData.getColumnType(1)); + assertEquals(Types.SMALLINT, resultSetMetaData.getColumnType(2)); + assertEquals(random1, results.getShort(1)); + assertEquals(random1, results.getShort("test_short")); + assertEquals(random1, results.getObject("test_short")); + assertTrue(results.getObject(1) instanceof Short); + + assertEquals(0, results.getShort(2)); + assertTrue(results.wasNull()); + assertEquals(null, results.getObject("test_null_short")); + assertTrue(results.wasNull()); + + assertTrue(results.next()); + assertEquals(random2, results.getShort(1)); + assertEquals(random2, results.getShort("test_short")); + assertTrue(results.getObject(1) instanceof Short); + assertEquals(random3, results.getShort("test_keyword")); + + assertFalse(results.next()); + }); + } + + public void testGettingValidShortWithCasting() throws Exception { + Map map = createTestDataForNumericValueTypes(() -> randomShort()); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + for(Entry e : map.entrySet()) { + short actual = (short) results.getObject(e.getKey(), Short.class); + if (e.getValue() instanceof Double) { + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), results.getShort(e.getKey())); + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), actual); + } else if (e.getValue() instanceof Float) { + assertEquals("For field " + e.getKey(), Math.round(e.getValue().floatValue()), results.getShort(e.getKey())); + assertEquals("For field " + e.getKey(), Math.round(e.getValue().floatValue()), actual); + } else { + assertEquals("For field " + e.getKey(), + e.getValue().shortValue(), results.getShort(e.getKey())); + assertEquals("For field " + e.getKey(), e.getValue().shortValue(), actual); + } + } + }); + } + + public void testGettingInvalidShort() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_keyword").field("type", "keyword").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + + int intNotShort = randomIntBetween(Short.MAX_VALUE + 1, Integer.MAX_VALUE); + long longNotShort = randomLongBetween(Short.MAX_VALUE + 1, Long.MAX_VALUE); + double doubleNotShort = randomDoubleBetween(Short.MAX_VALUE + 1, Double.MAX_VALUE, true); + float floatNotShort = randomFloatBetween(Short.MAX_VALUE + 1, Float.MAX_VALUE); + String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); + long randomDate = randomLong(); + + String doubleErrorMessage = (doubleNotShort > Long.MAX_VALUE || doubleNotShort < Long.MIN_VALUE) ? + Double.toString(doubleNotShort) : Long.toString(Math.round(doubleNotShort)); + + index("test", "1", builder -> { + builder.field("test_integer", intNotShort); + builder.field("test_long", longNotShort); + builder.field("test_double", doubleNotShort); + builder.field("test_float", floatNotShort); + builder.field("test_keyword", randomString); + builder.field("test_date", randomDate); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + + SQLException sqle = expectThrows(SQLException.class, () -> results.getShort("test_integer")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", intNotShort), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_integer", Short.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", intNotShort), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getShort("test_long")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Long.toString(longNotShort)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_long", Short.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Long.toString(longNotShort)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getShort("test_double")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", doubleErrorMessage), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_double", Short.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", doubleErrorMessage), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getShort("test_float")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotShort)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_float", Short.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotShort)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getShort("test_keyword")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Short", randomString), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_keyword", Short.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Short", randomString), + sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getShort("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Short", randomDate), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Short.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Short", randomDate), + sqle.getMessage()); + }); + } + + // Integer values testing + public void testGettingValidIntegerWithoutCasting() throws Exception { + int random1 = randomInt(); + int random2 = randomValueOtherThan(random1, () -> randomInt()); + int random3 = randomValueOtherThanMany(Arrays.asList(random1, random2)::contains, () -> randomInt()); + + createTestDataForIntegerValueTests(random1, random2, random3); + + doWithQuery("SELECT test_integer,test_null_integer,test_keyword FROM test", (results) -> { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(Types.INTEGER, resultSetMetaData.getColumnType(1)); + assertEquals(Types.INTEGER, resultSetMetaData.getColumnType(2)); + assertEquals(random1, results.getInt(1)); + assertEquals(random1, results.getInt("test_integer")); + assertEquals(random1, (int) results.getObject("test_integer", Integer.class)); + assertTrue(results.getObject(1) instanceof Integer); + + assertEquals(0, results.getInt(2)); + assertTrue(results.wasNull()); + assertEquals(null, results.getObject("test_null_integer")); + assertTrue(results.wasNull()); + + assertTrue(results.next()); + assertEquals(random2, results.getInt(1)); + assertEquals(random2, results.getInt("test_integer")); + assertTrue(results.getObject(1) instanceof Integer); + assertEquals(random3, results.getInt("test_keyword")); + + assertFalse(results.next()); + }); + } + + public void testGettingValidIntegerWithCasting() throws Exception { + Map map = createTestDataForNumericValueTypes(() -> randomInt()); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + for(Entry e : map.entrySet()) { + int actual = results.getObject(e.getKey(), Integer.class); + if (e.getValue() instanceof Double) { + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), results.getInt(e.getKey())); + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), actual); + } else if (e.getValue() instanceof Float) { + assertEquals("For field " + e.getKey(), Math.round(e.getValue().floatValue()), results.getInt(e.getKey())); + assertEquals("For field " + e.getKey(), Math.round(e.getValue().floatValue()), actual); + } else { + assertEquals("For field " + e.getKey(), e.getValue().intValue(), results.getInt(e.getKey())); + assertEquals("For field " + e.getKey(), e.getValue().intValue(), actual); + } + } + }); + } + + public void testGettingInvalidInteger() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_keyword").field("type", "keyword").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + + long longNotInt = randomLongBetween(getMaxIntPlusOne(), Long.MAX_VALUE); + double doubleNotInt = randomDoubleBetween(getMaxIntPlusOne().doubleValue(), Double.MAX_VALUE, true); + float floatNotInt = randomFloatBetween(getMaxIntPlusOne().floatValue(), Float.MAX_VALUE); + String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); + long randomDate = randomLong(); + + String doubleErrorMessage = (doubleNotInt > Long.MAX_VALUE || doubleNotInt < Long.MIN_VALUE) ? + Double.toString(doubleNotInt) : Long.toString(Math.round(doubleNotInt)); + + index("test", "1", builder -> { + builder.field("test_long", longNotInt); + builder.field("test_double", doubleNotInt); + builder.field("test_float", floatNotInt); + builder.field("test_keyword", randomString); + builder.field("test_date", randomDate); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + + SQLException sqle = expectThrows(SQLException.class, () -> results.getInt("test_long")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Long.toString(longNotInt)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_long", Integer.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Long.toString(longNotInt)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getInt("test_double")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", doubleErrorMessage), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_double", Integer.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", doubleErrorMessage), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getInt("test_float")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotInt)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_float", Integer.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotInt)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getInt("test_keyword")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to an Integer", randomString), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_keyword", Integer.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to an Integer", randomString), + sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getInt("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to an Integer", randomDate), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Integer.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to an Integer", randomDate), + sqle.getMessage()); + }); + } + + // Long values testing + public void testGettingValidLongWithoutCasting() throws Exception { + long random1 = randomLong(); + long random2 = randomValueOtherThan(random1, () -> randomLong()); + long random3 = randomValueOtherThanMany(Arrays.asList(random1, random2)::contains, () -> randomLong()); + + createTestDataForLongValueTests(random1, random2, random3); + + doWithQuery("SELECT test_long, test_null_long, test_keyword FROM test", (results) -> { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(Types.BIGINT, resultSetMetaData.getColumnType(1)); + assertEquals(Types.BIGINT, resultSetMetaData.getColumnType(2)); + assertEquals(random1, results.getLong(1)); + assertEquals(random1, results.getLong("test_long")); + assertEquals(random1, (long) results.getObject("test_long", Long.class)); + assertTrue(results.getObject(1) instanceof Long); + + assertEquals(0, results.getLong(2)); + assertTrue(results.wasNull()); + assertEquals(null, results.getObject("test_null_long")); + assertTrue(results.wasNull()); + + assertTrue(results.next()); + assertEquals(random2, results.getLong(1)); + assertEquals(random2, results.getLong("test_long")); + assertTrue(results.getObject(1) instanceof Long); + assertEquals(random3, results.getLong("test_keyword")); + + assertFalse(results.next()); + }); + } + + public void testGettingValidLongWithCasting() throws Exception { + Map map = createTestDataForNumericValueTypes(() -> randomLong()); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + for(Entry e : map.entrySet()) { + long actual = results.getObject(e.getKey(), Long.class); + if (e.getValue() instanceof Double || e.getValue() instanceof Float) { + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), results.getLong(e.getKey())); + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), actual); + } else { + assertEquals("For field " + e.getKey(), e.getValue().longValue(), results.getLong(e.getKey())); + assertEquals("For field " + e.getKey(), e.getValue().longValue(), actual); + } + } + }); + } + + public void testGettingInvalidLong() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_keyword").field("type", "keyword").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + + double doubleNotLong = randomDoubleBetween(getMaxLongPlusOne().doubleValue(), Double.MAX_VALUE, true); + float floatNotLong = randomFloatBetween(getMaxLongPlusOne().floatValue(), Float.MAX_VALUE); + String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); + long randomDate = randomLong(); + + index("test", "1", builder -> { + builder.field("test_double", doubleNotLong); + builder.field("test_float", floatNotLong); + builder.field("test_keyword", randomString); + builder.field("test_date", randomDate); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + + SQLException sqle = expectThrows(SQLException.class, () -> results.getLong("test_double")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(doubleNotLong)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_double", Long.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(doubleNotLong)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getLong("test_float")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotLong)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_float", Long.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotLong)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getLong("test_keyword")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Long", randomString), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_keyword", Long.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Long", randomString), + sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getLong("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Long", randomDate), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Long.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Long", randomDate), + sqle.getMessage()); + }); + } + + // Double values testing + public void testGettingValidDoubleWithoutCasting() throws Exception { + double random1 = randomDouble(); + double random2 = randomValueOtherThan(random1, () -> randomDouble()); + double random3 = randomValueOtherThanMany(Arrays.asList(random1, random2)::contains, () -> randomDouble()); + + createTestDataForDoubleValueTests(random1, random2, random3); + + doWithQuery("SELECT test_double, test_null_double, test_keyword FROM test", (results) -> { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(Types.DOUBLE, resultSetMetaData.getColumnType(1)); + assertEquals(Types.DOUBLE, resultSetMetaData.getColumnType(2)); + assertEquals(random1, results.getDouble(1), 0.0d); + assertEquals(random1, results.getDouble("test_double"), 0.0d); + assertEquals(random1, results.getObject("test_double", Double.class), 0.0d); + assertTrue(results.getObject(1) instanceof Double); + + assertEquals(0, results.getDouble(2), 0.0d); + assertTrue(results.wasNull()); + assertEquals(null, results.getObject("test_null_double")); + assertTrue(results.wasNull()); + + assertTrue(results.next()); + assertEquals(random2, results.getDouble(1), 0.0d); + assertEquals(random2, results.getDouble("test_double"), 0.0d); + assertTrue(results.getObject(1) instanceof Double); + assertEquals(random3, results.getDouble("test_keyword"), 0.0d); + + assertFalse(results.next()); + }); + } + + public void testGettingValidDoubleWithCasting() throws Exception { + Map map = createTestDataForNumericValueTypes(() -> randomDouble()); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + for(Entry e : map.entrySet()) { + assertEquals("For field " + e.getKey(), e.getValue().doubleValue(), results.getDouble(e.getKey()), 0.0d); + assertEquals("For field " + e.getKey(), + e.getValue().doubleValue(), results.getObject(e.getKey(), Double.class), 0.0d); + } + }); + } + + public void testGettingInvalidDouble() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_keyword").field("type", "keyword").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + + String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); + long randomDate = randomLong(); + + index("test", "1", builder -> { + builder.field("test_keyword", randomString); + builder.field("test_date", randomDate); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + + SQLException sqle = expectThrows(SQLException.class, () -> results.getDouble("test_keyword")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Double", randomString), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_keyword", Double.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Double", randomString), + sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getDouble("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Double", randomDate), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Double.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Double", randomDate), + sqle.getMessage()); + }); + } + + // Float values testing + public void testGettingValidFloatWithoutCasting() throws Exception { + float random1 = randomFloat(); + float random2 = randomValueOtherThan(random1, () -> randomFloat()); + float random3 = randomValueOtherThanMany(Arrays.asList(random1, random2)::contains, () -> randomFloat()); + + createTestDataForFloatValueTests(random1, random2, random3); + + doWithQuery("SELECT test_float, test_null_float, test_keyword FROM test", (results) -> { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(Types.REAL, resultSetMetaData.getColumnType(1)); + assertEquals(Types.REAL, resultSetMetaData.getColumnType(2)); + assertEquals(random1, results.getFloat(1), 0.0f); + assertEquals(random1, results.getFloat("test_float"), 0.0f); + assertEquals(random1, results.getObject("test_float", Float.class), 0.0f); + assertTrue(results.getObject(1) instanceof Float); + + assertEquals(0, results.getFloat(2), 0.0d); + assertTrue(results.wasNull()); + assertEquals(null, results.getObject("test_null_float")); + assertTrue(results.wasNull()); + + assertTrue(results.next()); + assertEquals(random2, results.getFloat(1), 0.0d); + assertEquals(random2, results.getFloat("test_float"), 0.0d); + assertTrue(results.getObject(1) instanceof Float); + assertEquals(random3, results.getFloat("test_keyword"), 0.0d); + + assertFalse(results.next()); + }); + } + + public void testGettingValidFloatWithCasting() throws Exception { + Map map = createTestDataForNumericValueTypes(() -> randomFloat()); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + for(Entry e : map.entrySet()) { + assertEquals("For field " + e.getKey(), e.getValue().floatValue(), results.getFloat(e.getKey()), 0.0f); + assertEquals("For field " + e.getKey(), + e.getValue().floatValue(), results.getObject(e.getKey(), Float.class), 0.0f); + } + }); + } + + public void testGettingInvalidFloat() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_keyword").field("type", "keyword").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + + String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); + long randomDate = randomLong(); + + index("test", "1", builder -> { + builder.field("test_keyword", randomString); + builder.field("test_date", randomDate); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + + SQLException sqle = expectThrows(SQLException.class, () -> results.getFloat("test_keyword")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Float", randomString), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_keyword", Float.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Float", randomString), + sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getFloat("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Float", randomDate), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Float.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Float", randomDate), + sqle.getMessage()); + }); + } + + public void testGettingBooleanValues() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_boolean").field("type", "boolean").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + long randomDate1 = randomLong(); + long randomDate2 = randomLong(); + + // true values + indexSimpleDocumentWithTrueValues(randomDate1); + + // false values + index("test", "2", builder -> { + builder.field("test_boolean", false); + builder.field("test_byte", 0); + builder.field("test_integer", 0); + builder.field("test_long", 0L); + builder.field("test_short", 0); + builder.field("test_double", 0d); + builder.field("test_float", 0f); + builder.field("test_keyword", "false"); + builder.field("test_date", randomDate2); + }); + + // other (non 0 = true) values + index("test", "3", builder -> { + builder.field("test_byte", randomValueOtherThan((byte) 0, () -> randomByte())); + builder.field("test_integer", randomValueOtherThan(0, () -> randomInt())); + builder.field("test_long", randomValueOtherThan(0L, () -> randomLong())); + builder.field("test_short", randomValueOtherThan((short) 0, () -> randomShort())); + builder.field("test_double", randomValueOtherThanMany(i -> i < 1.0d && i > -1.0d && i < Double.MAX_VALUE + && i > Double.MIN_VALUE, + () -> randomDouble() * randomInt())); + builder.field("test_float", randomValueOtherThanMany(i -> i < 1.0f && i > -1.0f && i < Float.MAX_VALUE && i > Float.MIN_VALUE, + () -> randomFloat() * randomInt())); + builder.field("test_keyword", "1"); + }); + + // other false values + index("test", "4", builder -> { + builder.field("test_keyword", "0"); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + assertEquals(true, results.getBoolean("test_boolean")); + for(String fld : fieldsNames) { + assertEquals("Expected: but was: for field " + fld, true, results.getBoolean(fld)); + assertEquals("Expected: but was: for field " + fld, true, results.getObject(fld, Boolean.class)); + } + SQLException sqle = expectThrows(SQLException.class, () -> results.getBoolean("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Boolean", randomDate1), + sqle.getMessage()); + + results.next(); + assertEquals(false, results.getBoolean("test_boolean")); + for(String fld : fieldsNames) { + assertEquals("Expected: but was: for field " + fld, false, results.getBoolean(fld)); + assertEquals("Expected: but was: for field " + fld, false, results.getObject(fld, Boolean.class)); + } + sqle = expectThrows(SQLException.class, () -> results.getBoolean("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Boolean", randomDate2), + sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Boolean.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Boolean", randomDate2), + sqle.getMessage()); + + results.next(); + for(String fld : fieldsNames.stream() + .filter((f) -> !f.equals("test_keyword")).collect(Collectors.toCollection(HashSet::new))) { + assertEquals("Expected: but was: for field " + fld, true, results.getBoolean(fld)); + assertEquals("Expected: but was: for field " + fld, true, results.getObject(fld, Boolean.class)); + } + + results.next(); + assertEquals(false, results.getBoolean("test_keyword")); + assertEquals(false, results.getObject("test_keyword", Boolean.class)); + }); + } + + public void testGettingDateWithoutCalendar() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_boolean").field("type", "boolean").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + Long randomLongDate = randomLong(); + indexSimpleDocumentWithTrueValues(randomLongDate); + + String timeZoneId = randomKnownTimeZone(); + Calendar connCalendar = Calendar.getInstance(TimeZone.getTimeZone(timeZoneId), Locale.ROOT); + + doWithQueryAndTimezone(SELECT_ALL_FIELDS, timeZoneId, (results) -> { + results.next(); + connCalendar.setTimeInMillis(randomLongDate); + connCalendar.set(HOUR_OF_DAY, 0); + connCalendar.set(MINUTE, 0); + connCalendar.set(SECOND, 0); + connCalendar.set(MILLISECOND, 0); + + assertEquals(results.getDate("test_date"), new java.sql.Date(connCalendar.getTimeInMillis())); + assertEquals(results.getDate(9), new java.sql.Date(connCalendar.getTimeInMillis())); + assertEquals(results.getObject("test_date", java.sql.Date.class), + new java.sql.Date(randomLongDate - (randomLongDate % 86400000L))); + assertEquals(results.getObject(9, java.sql.Date.class), + new java.sql.Date(randomLongDate - (randomLongDate % 86400000L))); + + // bulk validation for all fields which are not of type date + validateErrorsForDateTimeTestsWithoutCalendar(results::getDate); + }); + } + + public void testGettingDateWithCalendar() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_boolean").field("type", "boolean").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + Long randomLongDate = randomLong(); + indexSimpleDocumentWithTrueValues(randomLongDate); + index("test", "2", builder -> { + builder.timeField("test_date", null); + }); + + String timeZoneId = randomKnownTimeZone(); + String anotherTZId = randomValueOtherThan(timeZoneId, () -> randomKnownTimeZone()); + Calendar c = Calendar.getInstance(TimeZone.getTimeZone(anotherTZId), Locale.ROOT); + + doWithQueryAndTimezone(SELECT_ALL_FIELDS, timeZoneId, (results) -> { + results.next(); + c.setTimeInMillis(randomLongDate); + c.set(HOUR_OF_DAY, 0); + c.set(MINUTE, 0); + c.set(SECOND, 0); + c.set(MILLISECOND, 0); + + assertEquals(results.getDate("test_date", c), new java.sql.Date(c.getTimeInMillis())); + assertEquals(results.getDate(9, c), new java.sql.Date(c.getTimeInMillis())); + + // bulk validation for all fields which are not of type date + validateErrorsForDateTimeTestsWithCalendar(c, results::getDate); + + results.next(); + assertNull(results.getDate("test_date")); + }); + } + + public void testGettingTimeWithoutCalendar() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_boolean").field("type", "boolean").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + Long randomLongDate = randomLong(); + indexSimpleDocumentWithTrueValues(randomLongDate); + + String timeZoneId = randomKnownTimeZone(); + Calendar c = Calendar.getInstance(TimeZone.getTimeZone(timeZoneId), Locale.ROOT); + + doWithQueryAndTimezone(SELECT_ALL_FIELDS, timeZoneId, (results) -> { + results.next(); + c.setTimeInMillis(randomLongDate); + c.set(ERA, GregorianCalendar.AD); + c.set(YEAR, 1970); + c.set(MONTH, 0); + c.set(DAY_OF_MONTH, 1); + + assertEquals(results.getTime("test_date"), new java.sql.Time(c.getTimeInMillis())); + assertEquals(results.getTime(9), new java.sql.Time(c.getTimeInMillis())); + assertEquals(results.getObject("test_date", java.sql.Time.class), + new java.sql.Time(randomLongDate % 86400000L)); + assertEquals(results.getObject(9, java.sql.Time.class), + new java.sql.Time(randomLongDate % 86400000L)); + + validateErrorsForDateTimeTestsWithoutCalendar(results::getTime); + }); + } + + public void testGettingTimeWithCalendar() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_boolean").field("type", "boolean").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + Long randomLongDate = randomLong(); + indexSimpleDocumentWithTrueValues(randomLongDate); + index("test", "2", builder -> { + builder.timeField("test_date", null); + }); + + String timeZoneId = randomKnownTimeZone(); + String anotherTZId = randomValueOtherThan(timeZoneId, () -> randomKnownTimeZone()); + Calendar c = Calendar.getInstance(TimeZone.getTimeZone(anotherTZId), Locale.ROOT); + + doWithQueryAndTimezone(SELECT_ALL_FIELDS, timeZoneId, (results) -> { + results.next(); + c.setTimeInMillis(randomLongDate); + c.set(ERA, GregorianCalendar.AD); + c.set(YEAR, 1970); + c.set(MONTH, 0); + c.set(DAY_OF_MONTH, 1); + + assertEquals(results.getTime("test_date", c), new java.sql.Time(c.getTimeInMillis())); + assertEquals(results.getTime(9, c), new java.sql.Time(c.getTimeInMillis())); + + validateErrorsForDateTimeTestsWithCalendar(c, results::getTime); + + results.next(); + assertNull(results.getTime("test_date")); + }); + } + + public void testGettingTimestampWithoutCalendar() throws Exception { + createIndex("library"); + updateMapping("library", builder -> { + builder.startObject("release_date").field("type", "date").endObject(); + builder.startObject("republish_date").field("type", "date").endObject(); + }); + long randomMillis = randomLong(); index("library", "1", builder -> { builder.field("name", "Don Quixote"); builder.field("page_count", 1072); - builder.timeField("release_date", new Date(randomMillis)); + builder.field("release_date", randomMillis); builder.timeField("republish_date", null); }); index("library", "2", builder -> { builder.field("name", "1984"); builder.field("page_count", 328); - builder.timeField("release_date", new Date(-649036800000L)); - builder.timeField("republish_date", new Date(599616000000L)); + builder.field("release_date", -649036800000L); + builder.field("republish_date", 599616000000L); }); - try (Connection connection = esJdbc()) { - try (PreparedStatement statement = connection.prepareStatement("SELECT name, release_date, republish_date FROM library")) { - try (ResultSet results = statement.executeQuery()) { - ResultSetMetaData resultSetMetaData = results.getMetaData(); + doWithQuery("SELECT name, release_date, republish_date FROM library", (results) -> { + ResultSetMetaData resultSetMetaData = results.getMetaData(); - results.next(); - assertEquals(3, resultSetMetaData.getColumnCount()); - assertEquals(randomMillis, results.getTimestamp("release_date").getTime()); - assertEquals(randomMillis, results.getTimestamp(2).getTime()); - assertTrue(results.getObject(2) instanceof Timestamp); - assertEquals(randomMillis, ((Timestamp) results.getObject("release_date")).getTime()); - - assertNull(results.getTimestamp(3)); - assertNull(results.getObject("republish_date")); + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(randomMillis, results.getTimestamp("release_date").getTime()); + assertEquals(randomMillis, results.getTimestamp(2).getTime()); + assertTrue(results.getObject(2) instanceof Timestamp); + assertEquals(randomMillis, ((Timestamp) results.getObject("release_date")).getTime()); + + assertNull(results.getTimestamp(3)); + assertNull(results.getObject("republish_date")); - assertTrue(results.next()); - assertEquals(599616000000L, results.getTimestamp("republish_date").getTime()); - assertEquals(-649036800000L, ((Timestamp) results.getObject(2)).getTime()); + assertTrue(results.next()); + assertEquals(599616000000L, results.getTimestamp("republish_date").getTime()); + assertEquals(-649036800000L, ((Timestamp) results.getObject(2)).getTime()); - assertFalse(results.next()); - } - } - } + assertFalse(results.next()); + }); + } + + public void testGettingTimestampWithCalendar() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_boolean").field("type", "boolean").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + Long randomLongDate = randomLong(); + indexSimpleDocumentWithTrueValues(randomLongDate); + index("test", "2", builder -> { + builder.timeField("test_date", null); + }); + + String timeZoneId = randomKnownTimeZone(); + String anotherTZId = randomValueOtherThan(timeZoneId, () -> randomKnownTimeZone()); + Calendar c = Calendar.getInstance(TimeZone.getTimeZone(anotherTZId), Locale.ROOT); + + doWithQueryAndTimezone(SELECT_ALL_FIELDS, timeZoneId, (results) -> { + results.next(); + c.setTimeInMillis(randomLongDate); + + assertEquals(results.getTimestamp("test_date", c), new java.sql.Timestamp(c.getTimeInMillis())); + assertEquals(results.getTimestamp(9, c), new java.sql.Timestamp(c.getTimeInMillis())); + + validateErrorsForDateTimeTestsWithCalendar(c, results::getTimestamp); + + results.next(); + assertNull(results.getTimestamp("test_date")); + }); + } + + public void testValidGetObjectCalls() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_boolean").field("type", "boolean").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + + byte b = randomByte(); + int i = randomInt(); + long l = randomLong(); + short s = (short) randomIntBetween(Short.MIN_VALUE, Short.MAX_VALUE); + double d = randomDouble(); + float f = randomFloat(); + boolean randomBool = randomBoolean(); + Long randomLongDate = randomLong(); + String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); + + index("test", "1", builder -> { + builder.field("test_byte", b); + builder.field("test_integer", i); + builder.field("test_long", l); + builder.field("test_short", s); + builder.field("test_double", d); + builder.field("test_float", f); + builder.field("test_keyword", randomString); + builder.field("test_date", randomLongDate); + builder.field("test_boolean", randomBool); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + + assertEquals(b, results.getObject("test_byte")); + assertTrue(results.getObject("test_byte") instanceof Byte); + + assertEquals(i, results.getObject("test_integer")); + assertTrue(results.getObject("test_integer") instanceof Integer); + + assertEquals(l, results.getObject("test_long")); + assertTrue(results.getObject("test_long") instanceof Long); + + assertEquals(s, results.getObject("test_short")); + assertTrue(results.getObject("test_short") instanceof Short); + + assertEquals(d, results.getObject("test_double")); + assertTrue(results.getObject("test_double") instanceof Double); + + assertEquals(f, results.getObject("test_float")); + assertTrue(results.getObject("test_float") instanceof Float); + + assertEquals(randomString, results.getObject("test_keyword")); + assertTrue(results.getObject("test_keyword") instanceof String); + + assertEquals(new Date(randomLongDate), results.getObject("test_date")); + assertTrue(results.getObject("test_date") instanceof Timestamp); + + assertEquals(randomBool, results.getObject("test_boolean")); + assertTrue(results.getObject("test_boolean") instanceof Boolean); + }); } /* @@ -79,4 +1091,458 @@ public class ResultSetTestCase extends JdbcIntegrationTestCase { fail("Infinite recursive call on getObject() method"); } } + + public void testUnsupportedGetMethods() throws IOException, SQLException { + index("test", "1", builder -> { + builder.field("test", "test"); + }); + Connection conn = esJdbc(); + PreparedStatement statement = conn.prepareStatement("SELECT * FROM test"); + ResultSet r = statement.executeQuery(); + + r.next(); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getAsciiStream("test"), "AsciiStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getAsciiStream(1), "AsciiStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getArray("test"), "Array not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getArray(1), "Array not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getBigDecimal("test"), "BigDecimal not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getBigDecimal("test"), "BigDecimal not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getBinaryStream("test"), "BinaryStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getBinaryStream(1), "BinaryStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getBlob("test"), "Blob not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getBlob(1), "Blob not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getCharacterStream("test"), "CharacterStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getCharacterStream(1), "CharacterStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getClob("test"), "Clob not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getClob(1), "Clob not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getNCharacterStream("test"), "NCharacterStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getNCharacterStream(1), "NCharacterStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getNClob("test"), "NClob not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getNClob(1), "NClob not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getNString("test"), "NString not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getNString(1), "NString not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getRef("test"), "Ref not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getRef(1), "Ref not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getRowId("test"), "RowId not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getRowId(1), "RowId not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getSQLXML("test"), "SQLXML not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getSQLXML(1), "SQLXML not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getURL("test"), "URL not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getURL(1), "URL not supported"); + } + + public void testUnsupportedUpdateMethods() throws IOException, SQLException { + index("test", "1", builder -> { + builder.field("test", "test"); + }); + Connection conn = esJdbc(); + PreparedStatement statement = conn.prepareStatement("SELECT * FROM test"); + ResultSet r = statement.executeQuery(); + + r.next(); + Blob b = null; + InputStream i = null; + Clob c = null; + NClob nc = null; + Reader rd = null; + + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBytes(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBytes("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateArray(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateArray("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream("", null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream("", null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBigDecimal(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBigDecimal("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream("", null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream("", null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob(1, b)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob(1, i)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob("", b)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob("", i)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob("", null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBoolean(1, false)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBoolean("", false)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateByte(1, (byte) 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateByte("", (byte) 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream(1, null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream("", null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream("", null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob(1, c)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob(1, rd)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob("", c)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob("", rd)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob(1, null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob("", null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateDate(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateDate("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateDouble(1, 0d)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateDouble("", 0d)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateFloat(1, 0f)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateFloat("", 0f)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateInt(1, 0)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateInt("", 0)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateLong(1, 0L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateLong("", 0L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNCharacterStream(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNCharacterStream("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNCharacterStream(1, null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNCharacterStream("", null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob(1, nc)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob(1, rd)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob("", nc)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob("", rd)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob("", null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNString(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNString("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNull(1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNull("")); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateObject(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateObject("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateObject(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateObject("", null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateRef(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateRef("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateRow()); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateRowId(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateRowId("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateSQLXML(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateSQLXML("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateShort(1, (short) 0)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateShort("", (short) 0)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateString(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateString("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateTime(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateTime("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateTimestamp(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateTimestamp("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.insertRow()); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateRow()); + assertThrowsWritesUnsupportedForUpdate(() -> r.deleteRow()); + assertThrowsWritesUnsupportedForUpdate(() -> r.cancelRowUpdates()); + assertThrowsWritesUnsupportedForUpdate(() -> r.moveToInsertRow()); + assertThrowsWritesUnsupportedForUpdate(() -> r.refreshRow()); + assertThrowsWritesUnsupportedForUpdate(() -> r.moveToCurrentRow()); + assertThrowsWritesUnsupportedForUpdate(() -> r.rowUpdated()); + assertThrowsWritesUnsupportedForUpdate(() -> r.rowInserted()); + assertThrowsWritesUnsupportedForUpdate(() -> r.rowDeleted()); + } + + private void doWithQuery(String query, CheckedConsumer consumer) throws SQLException { + try (Connection connection = esJdbc()) { + try (PreparedStatement statement = connection.prepareStatement(query)) { + try (ResultSet results = statement.executeQuery()) { + consumer.accept(results); + } + } + } + } + + private void doWithQueryAndTimezone(String query, String tz, CheckedConsumer consumer) throws SQLException { + try (Connection connection = esJdbc(tz)) { + try (PreparedStatement statement = connection.prepareStatement(query)) { + try (ResultSet results = statement.executeQuery()) { + consumer.accept(results); + } + } + } + } + + private void createIndex(String index) throws Exception { + Request request = new Request("PUT", "/" + index); + XContentBuilder createIndex = JsonXContent.contentBuilder().startObject(); + createIndex.startObject("settings"); + { + createIndex.field("number_of_shards", 1); + createIndex.field("number_of_replicas", 1); + } + createIndex.endObject(); + createIndex.startObject("mappings"); + { + createIndex.startObject("doc"); + { + createIndex.startObject("properties"); + {} + createIndex.endObject(); + } + createIndex.endObject(); + } + createIndex.endObject().endObject(); + request.setJsonEntity(Strings.toString(createIndex)); + client().performRequest(request); + } + + private void updateMapping(String index, CheckedConsumer body) throws Exception { + Request request = new Request("PUT", "/" + index + "/_mapping/doc"); + XContentBuilder updateMapping = JsonXContent.contentBuilder().startObject(); + updateMapping.startObject("properties"); + { + body.accept(updateMapping); + } + updateMapping.endObject().endObject(); + + request.setJsonEntity(Strings.toString(updateMapping)); + client().performRequest(request); + } + + private void createTestDataForByteValueTests(byte random1, byte random2, byte random3) throws Exception, IOException { + createIndex("test"); + updateMapping("test", builder -> { + builder.startObject("test_byte").field("type", "byte").endObject(); + builder.startObject("test_null_byte").field("type", "byte").endObject(); + builder.startObject("test_keyword").field("type", "keyword").endObject(); + }); + + index("test", "1", builder -> { + builder.field("test_byte", random1); + builder.field("test_null_byte", (Byte) null); + }); + index("test", "2", builder -> { + builder.field("test_byte", random2); + builder.field("test_keyword", random3); + }); + } + + private void createTestDataForShortValueTests(short random1, short random2, short random3) throws Exception, IOException { + createIndex("test"); + updateMapping("test", builder -> { + builder.startObject("test_short").field("type", "short").endObject(); + builder.startObject("test_null_short").field("type", "short").endObject(); + builder.startObject("test_keyword").field("type", "keyword").endObject(); + }); + + index("test", "1", builder -> { + builder.field("test_short", random1); + builder.field("test_null_short", (Short) null); + }); + index("test", "2", builder -> { + builder.field("test_short", random2); + builder.field("test_keyword", random3); + }); + } + + private void createTestDataForIntegerValueTests(int random1, int random2, int random3) throws Exception, IOException { + createIndex("test"); + updateMapping("test", builder -> { + builder.startObject("test_integer").field("type", "integer").endObject(); + builder.startObject("test_null_integer").field("type", "integer").endObject(); + builder.startObject("test_keyword").field("type", "keyword").endObject(); + }); + + index("test", "1", builder -> { + builder.field("test_integer", random1); + builder.field("test_null_integer", (Integer) null); + }); + index("test", "2", builder -> { + builder.field("test_integer", random2); + builder.field("test_keyword", random3); + }); + } + + private void createTestDataForLongValueTests(long random1, long random2, long random3) throws Exception, IOException { + createIndex("test"); + updateMapping("test", builder -> { + builder.startObject("test_long").field("type", "long").endObject(); + builder.startObject("test_null_long").field("type", "long").endObject(); + builder.startObject("test_keyword").field("type", "keyword").endObject(); + }); + + index("test", "1", builder -> { + builder.field("test_long", random1); + builder.field("test_null_long", (Long) null); + }); + index("test", "2", builder -> { + builder.field("test_long", random2); + builder.field("test_keyword", random3); + }); + } + + private void createTestDataForDoubleValueTests(double random1, double random2, double random3) throws Exception, IOException { + createIndex("test"); + updateMapping("test", builder -> { + builder.startObject("test_double").field("type", "double").endObject(); + builder.startObject("test_null_double").field("type", "double").endObject(); + builder.startObject("test_keyword").field("type", "keyword").endObject(); + }); + + index("test", "1", builder -> { + builder.field("test_double", random1); + builder.field("test_null_double", (Double) null); + }); + index("test", "2", builder -> { + builder.field("test_double", random2); + builder.field("test_keyword", random3); + }); + } + + private void createTestDataForFloatValueTests(float random1, float random2, float random3) throws Exception, IOException { + createIndex("test"); + updateMapping("test", builder -> { + builder.startObject("test_float").field("type", "float").endObject(); + builder.startObject("test_null_float").field("type", "float").endObject(); + builder.startObject("test_keyword").field("type", "keyword").endObject(); + }); + + index("test", "1", builder -> { + builder.field("test_float", random1); + builder.field("test_null_float", (Double) null); + }); + index("test", "2", builder -> { + builder.field("test_float", random2); + builder.field("test_keyword", random3); + }); + } + + private void indexSimpleDocumentWithTrueValues(Long randomLongDate) throws IOException { + index("test", "1", builder -> { + builder.field("test_boolean", true); + builder.field("test_byte", 1); + builder.field("test_integer", 1); + builder.field("test_long", 1L); + builder.field("test_short", 1); + builder.field("test_double", 1d); + builder.field("test_float", 1f); + builder.field("test_keyword", "true"); + builder.field("test_date", randomLongDate); + }); + } + + /** + * Creates test data for all numeric get* methods. All values random and different from the other numeric fields already generated. + * It returns a map containing the field name and its randomly generated value to be later used in checking the returned values. + */ + private Map createTestDataForNumericValueTypes(Supplier randomGenerator) throws Exception, IOException { + Map map = new HashMap(); + createIndex("test"); + updateMappingForNumericValuesTests("test"); + + index("test", "1", builder -> { + // random Byte + byte test_byte = randomValueOtherThanMany(map::containsValue, randomGenerator).byteValue(); + builder.field("test_byte", test_byte); + map.put("test_byte", test_byte); + + // random Integer + int test_integer = randomValueOtherThanMany(map::containsValue, randomGenerator).intValue(); + builder.field("test_integer", test_integer); + map.put("test_integer", test_integer); + + // random Short + int test_short = randomValueOtherThanMany(map::containsValue, randomGenerator).shortValue(); + builder.field("test_short", test_short); + map.put("test_short", test_short); + + // random Long + long test_long = randomValueOtherThanMany(map::containsValue, randomGenerator).longValue(); + builder.field("test_long", test_long); + map.put("test_long", test_long); + + // random Double + double test_double = randomValueOtherThanMany(map::containsValue, randomGenerator).doubleValue(); + builder.field("test_double", test_double); + map.put("test_double", test_double); + + // random Float + float test_float = randomValueOtherThanMany(map::containsValue, randomGenerator).floatValue(); + builder.field("test_float", test_float); + map.put("test_float", test_float); + }); + return map; + } + + private void updateMappingForNumericValuesTests(String indexName) throws Exception { + updateMapping(indexName, builder -> { + for(String field : fieldsNames) { + builder.startObject(field).field("type", field.substring(5)).endObject(); + } + }); + } + + private void assertThrowsUnsupportedAndExpectErrorMessage(ThrowingRunnable runnable, String message) { + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, runnable); + assertEquals(message, sqle.getMessage()); + } + + private void assertThrowsWritesUnsupportedForUpdate(ThrowingRunnable r) { + assertThrowsUnsupportedAndExpectErrorMessage(r, "Writes not supported"); + } + + private void validateErrorsForDateTimeTestsWithoutCalendar(CheckedFunction method) { + SQLException sqle; + for(Entry,JDBCType> field : dateTimeTestingFields.entrySet()) { + sqle = expectThrows(SQLException.class, () -> method.apply(field.getKey().v1())); + assertEquals( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Long", + field.getKey().v2(), field.getValue()), sqle.getMessage()); + } + } + + private void validateErrorsForDateTimeTestsWithCalendar(Calendar c, CheckedBiFunction method) { + SQLException sqle; + for(Entry,JDBCType> field : dateTimeTestingFields.entrySet()) { + sqle = expectThrows(SQLException.class, () -> method.apply(field.getKey().v1(), c)); + assertEquals( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Long", + field.getKey().v2(), field.getValue()), sqle.getMessage()); + } + } + + private float randomFloatBetween(float start, float end) { + float result = 0.0f; + while (result < start || result > end || Float.isNaN(result)) { + result = start + randomFloat() * (end - start); + } + + return result; + } + + private Long getMaxIntPlusOne() { + return Long.valueOf(Integer.MAX_VALUE) + 1L; + } + + private Double getMaxLongPlusOne() { + return Double.valueOf(Long.MAX_VALUE) + 1d; + } + + private Connection esJdbc(String timeZoneId) throws SQLException { + return randomBoolean() ? useDriverManager(timeZoneId) : useDataSource(timeZoneId); + } + + private Connection useDriverManager(String timeZoneId) throws SQLException { + String elasticsearchAddress = getProtocol() + "://" + elasticsearchAddress(); + String address = "jdbc:es://" + elasticsearchAddress; + Properties connectionProperties = connectionProperties(); + connectionProperties.put(JdbcConfiguration.TIME_ZONE, timeZoneId); + Connection connection = DriverManager.getConnection(address, connectionProperties); + + assertNotNull("The timezone should be specified", connectionProperties.getProperty(JdbcConfiguration.TIME_ZONE)); + return connection; + } + + private Connection useDataSource(String timeZoneId) throws SQLException { + String elasticsearchAddress = getProtocol() + "://" + elasticsearchAddress(); + JdbcDataSource dataSource = new JdbcDataSource(); + String address = "jdbc:es://" + elasticsearchAddress; + dataSource.setUrl(address); + Properties connectionProperties = connectionProperties(); + connectionProperties.put(JdbcConfiguration.TIME_ZONE, timeZoneId); + dataSource.setProperties(connectionProperties); + Connection connection = dataSource.getConnection(); + + assertNotNull("The timezone should be specified", connectionProperties.getProperty(JdbcConfiguration.TIME_ZONE)); + return connection; + } } diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SimpleExampleTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SimpleExampleTestCase.java index 7621743481a..f5d559d9bf0 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SimpleExampleTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SimpleExampleTestCase.java @@ -25,7 +25,8 @@ public class SimpleExampleTestCase extends JdbcIntegrationTestCase { assertEquals("Don Quixote", results.getString(1)); assertEquals(1072, results.getInt(2)); SQLException e = expectThrows(SQLException.class, () -> results.getInt(1)); - assertTrue(e.getMessage(), e.getMessage().contains("unable to convert column 1 to an int")); + assertTrue(e.getMessage(), + e.getMessage().contains("Unable to convert value [Don Quixote] of type [VARCHAR] to an Integer")); assertFalse(results.next()); } // end::simple_example diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SpecBaseIntegrationTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SpecBaseIntegrationTestCase.java index 9ece8d7d1d3..86cbdec197e 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SpecBaseIntegrationTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SpecBaseIntegrationTestCase.java @@ -85,6 +85,7 @@ public abstract class SpecBaseIntegrationTestCase extends JdbcIntegrationTestCas public final void test() throws Throwable { try { + assumeFalse("Test marked as Ignored", testName.endsWith("-Ignore")); doTest(); } catch (AssertionError ae) { throw reworkException(ae); diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java index d61b4b9a946..605e19807ed 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java @@ -13,6 +13,7 @@ import org.junit.ClassRule; import java.sql.Connection; import java.sql.ResultSet; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Locale; @@ -24,10 +25,13 @@ public abstract class SqlSpecTestCase extends SpecBaseIntegrationTestCase { private String query; @ClassRule - public static LocalH2 H2 = new LocalH2((c) -> c.createStatement().execute("RUNSCRIPT FROM 'classpath:/setup_test_emp.sql'")); + public static LocalH2 H2 = new LocalH2((c) -> { + c.createStatement().execute("RUNSCRIPT FROM 'classpath:/setup_test_emp.sql'"); + c.createStatement().execute("RUNSCRIPT FROM 'classpath:/setup_test_emp_with_nulls.sql'"); + }); @ParametersFactory(argumentFormatting = PARAM_FORMATTING) - public static List readScriptSpec() throws Exception { + public static List readScriptSpec() throws Exception { Parser parser = specParser(); List tests = new ArrayList<>(); tests.addAll(readScriptSpec("/select.sql-spec", parser)); @@ -37,15 +41,28 @@ public abstract class SqlSpecTestCase extends SpecBaseIntegrationTestCase { tests.addAll(readScriptSpec("/agg.sql-spec", parser)); tests.addAll(readScriptSpec("/arithmetic.sql-spec", parser)); tests.addAll(readScriptSpec("/string-functions.sql-spec", parser)); - // AwaitsFix: https://github.com/elastic/elasticsearch/issues/32589 - // tests.addAll(readScriptSpec("/case-functions.sql-spec", parser)); + tests.addAll(readScriptSpec("/case-functions.sql-spec", parser)); + tests.addAll(readScriptSpec("/agg_nulls.sql-spec", parser)); return tests; } private static class SqlSpecParser implements Parser { + private final StringBuilder query = new StringBuilder(); + @Override public Object parse(String line) { - return line.endsWith(";") ? line.substring(0, line.length() - 1) : line; + // not initialized + String q = null; + if (line.endsWith(";")) { + query.append(line.substring(0, line.length() - 1)); + q = query.toString(); + query.setLength(0); + } else { + query.append(line); + query.append("\r\n"); + } + + return q; } } @@ -60,8 +77,11 @@ public abstract class SqlSpecTestCase extends SpecBaseIntegrationTestCase { @Override protected final void doTest() throws Throwable { - boolean goodLocale = !(Locale.getDefault().equals(new Locale.Builder().setLanguageTag("tr").build()) - || Locale.getDefault().equals(new Locale.Builder().setLanguageTag("tr-TR").build())); + // we skip the tests in case of these locales because ES-SQL is Locale-insensitive for now + // while H2 does take the Locale into consideration + String[] h2IncompatibleLocales = new String[] {"tr", "az", "tr-TR", "tr-CY", "az-Latn", "az-Cyrl", "az-Latn-AZ", "az-Cyrl-AZ"}; + boolean goodLocale = !Arrays.stream(h2IncompatibleLocales) + .anyMatch((l) -> Locale.getDefault().equals(new Locale.Builder().setLanguageTag(l).build())); if (fileName.startsWith("case-functions")) { Assume.assumeTrue(goodLocale); } diff --git a/x-pack/qa/sql/src/main/resources/agg.csv-spec b/x-pack/qa/sql/src/main/resources/agg.csv-spec index 0d7b0e14760..1d9592d963d 100644 --- a/x-pack/qa/sql/src/main/resources/agg.csv-spec +++ b/x-pack/qa/sql/src/main/resources/agg.csv-spec @@ -66,11 +66,9 @@ F | 10099.1936 | 10098.021 | 26.35135135 M | 10095.6112 | 10090.846 | 23.41269841269841 ; -// Simple sum used in documentation sum -// tag::sum SELECT SUM(salary) FROM test_emp; -// end::sum + SUM(salary) --------------- 4824855 diff --git a/x-pack/qa/sql/src/main/resources/agg.sql-spec b/x-pack/qa/sql/src/main/resources/agg.sql-spec index f42ce0ef7a0..a86b8b65eef 100644 --- a/x-pack/qa/sql/src/main/resources/agg.sql-spec +++ b/x-pack/qa/sql/src/main/resources/agg.sql-spec @@ -87,9 +87,7 @@ SELECT (emp_no % 3) + 1 AS e, (languages % 3) + 1 AS l FROM test_emp GROUP BY e, // COUNT aggCountImplicit -// tag::countStar SELECT COUNT(*) AS count FROM test_emp; -// end::countStar aggCountImplicitWithCast SELECT CAST(COUNT(*) AS INT) c FROM "test_emp"; aggCountImplicitWithConstant @@ -105,9 +103,7 @@ SELECT gender g, CAST(COUNT(*) AS INT) c FROM "test_emp" WHERE emp_no < 10020 GR aggCountWithAlias SELECT gender g, COUNT(*) c FROM "test_emp" GROUP BY g ORDER BY gender; countDistinct -// tag::countDistinct SELECT COUNT(DISTINCT hire_date) AS count FROM test_emp; -// end::countDistinct aggCountAliasAndWhereClauseMultiGroupBy SELECT gender g, languages l, COUNT(*) c FROM "test_emp" WHERE emp_no < 10020 GROUP BY gender, languages ORDER BY gender, languages; @@ -237,9 +233,7 @@ SELECT gender g, languages l, MIN(emp_no) m FROM "test_emp" GROUP BY g, l HAVING // MAX aggMaxImplicit -// tag::max SELECT MAX(salary) AS max FROM test_emp; -// end::max aggMaxImplicitWithCast SELECT CAST(MAX(emp_no) AS SMALLINT) c FROM "test_emp"; aggMax @@ -310,9 +304,7 @@ SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY gender OR aggAvgWithCastToDouble SELECT gender g, CAST(AVG(emp_no) AS DOUBLE) a FROM "test_emp" GROUP BY gender ORDER BY gender; aggAvg -// tag::avg SELECT AVG(salary) AS avg FROM test_emp; -// end::avg aggAvgWithCastAndCount SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a, COUNT(1) c FROM "test_emp" GROUP BY gender ORDER BY gender; aggAvgWithCastAndCountWithFilter @@ -394,4 +386,12 @@ SELECT MIN(salary) min, MAX(salary) max, gender g, languages l, COUNT(*) c FROM aggMultiWithHavingOnCount SELECT MIN(salary) min, MAX(salary) max, gender g, COUNT(*) c FROM "test_emp" WHERE languages > 0 GROUP BY g HAVING c > 40 ORDER BY gender; aggMultiGroupByMultiWithHavingOnCount -SELECT MIN(salary) min, MAX(salary) max, gender g, languages l, COUNT(*) c FROM "test_emp" WHERE languages > 0 GROUP BY g, languages HAVING c > 40 ORDER BY gender, languages; \ No newline at end of file +SELECT MIN(salary) min, MAX(salary) max, gender g, languages l, COUNT(*) c FROM "test_emp" WHERE languages > 0 GROUP BY g, languages HAVING c > 40 ORDER BY gender, languages; + +// repetion of same aggs to check whether the generated query contains duplicates or not +aggRepeatFunctionAcrossFields +SELECT MIN(emp_no) AS a, 1 + MIN(emp_no) AS b, ABS(MIN(emp_no)) AS c FROM test_emp; +aggRepeatFunctionBetweenSelectAndHaving +SELECT gender, COUNT(DISTINCT languages) AS c FROM test_emp GROUP BY gender HAVING count(DISTINCT languages) > 0 ORDER BY gender; + + diff --git a/x-pack/qa/sql/src/main/resources/agg_nulls.sql-spec b/x-pack/qa/sql/src/main/resources/agg_nulls.sql-spec new file mode 100644 index 00000000000..17fbb70a40b --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/agg_nulls.sql-spec @@ -0,0 +1,14 @@ +selectGenderWithNullsAndGroupByGender +SELECT gender, COUNT(*) count FROM test_emp_with_nulls GROUP BY gender ORDER BY gender; +selectFirstNameWithNullsAndGroupByFirstName +SELECT first_name FROM test_emp_with_nulls GROUP BY first_name ORDER BY first_name; +selectCountWhereIsNull +SELECT COUNT(*) count FROM test_emp_with_nulls WHERE first_name IS NULL; +selectLanguagesCountWithNullsAndGroupByLanguage +SELECT languages l, COUNT(*) c FROM test_emp_with_nulls GROUP BY languages ORDER BY languages; +selectHireDateGroupByHireDate +SELECT hire_date HD, COUNT(*) c FROM test_emp_with_nulls GROUP BY hire_date ORDER BY hire_date DESC; +selectHireDateGroupByHireDate +SELECT hire_date HD, COUNT(*) c FROM test_emp_with_nulls GROUP BY hire_date ORDER BY hire_date DESC; +selectSalaryGroupBySalary +SELECT salary, COUNT(*) c FROM test_emp_with_nulls GROUP BY salary ORDER BY salary DESC; \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/resources/alias.csv-spec b/x-pack/qa/sql/src/main/resources/alias.csv-spec index 839d2cba794..f1fa900706a 100644 --- a/x-pack/qa/sql/src/main/resources/alias.csv-spec +++ b/x-pack/qa/sql/src/main/resources/alias.csv-spec @@ -86,6 +86,7 @@ test_alias | ALIAS test_alias_emp | ALIAS test_emp | BASE TABLE test_emp_copy | BASE TABLE +test_emp_with_nulls | BASE TABLE ; testGroupByOnAlias @@ -98,10 +99,10 @@ F | 10099.28 ; testGroupByOnPattern -SELECT gender, PERCENTILE(emp_no, 97) p1 FROM test_* GROUP BY gender; +SELECT gender, PERCENTILE(emp_no, 97) p1 FROM test_* WHERE gender is NOT NULL GROUP BY gender; gender:s | p1:d -F | 10099.28 -M | 10095.75 +F | 10099.32 +M | 10095.98 ; \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/resources/command.csv-spec b/x-pack/qa/sql/src/main/resources/command.csv-spec index 89e86e887e1..77d397fa2b5 100644 --- a/x-pack/qa/sql/src/main/resources/command.csv-spec +++ b/x-pack/qa/sql/src/main/resources/command.csv-spec @@ -121,7 +121,7 @@ ABS |SCALAR ; showFunctionsWithLeadingPattern -SHOW FUNCTIONS '%DAY%'; +SHOW FUNCTIONS LIKE '%DAY%'; name:s | type:s DAY_OF_MONTH |SCALAR @@ -133,15 +133,102 @@ MINUTE_OF_DAY |SCALAR ; showTables -SHOW TABLES 'test_emp'; +SHOW TABLES; + + name | type +test_alias |ALIAS +test_alias_emp |ALIAS +test_emp |BASE TABLE +test_emp_copy |BASE TABLE +test_emp_with_nulls|BASE TABLE +; + +showTablesSimpleLike +SHOW TABLES LIKE 'test_emp'; name:s | type:s test_emp |BASE TABLE ; +showTablesMultiLike +SHOW TABLES LIKE 'test_emp%'; + + name:s | type:s +test_emp |BASE TABLE +test_emp_copy |BASE TABLE +test_emp_with_nulls|BASE TABLE +; + +showTablesIdentifier +SHOW TABLES "test_emp"; + + name:s | type:s +test_emp |BASE TABLE +; + +showTablesIdentifierPattern +SHOW TABLES "test_e*,-test_emp"; + + name:s | type:s +test_emp_copy |BASE TABLE +test_emp_with_nulls|BASE TABLE +; + +showTablesIdentifierPatternOnAliases +SHOW TABLES "test*,-test_emp*"; + + name:s | type:s +test_alias |ALIAS +test_alias_emp |ALIAS +; + // DESCRIBE -describe +describeSimpleLike +DESCRIBE LIKE 'test_emp'; + + column:s | type:s +birth_date | TIMESTAMP +dep | STRUCT +dep.dep_id | VARCHAR +dep.dep_name | VARCHAR +dep.dep_name.keyword | VARCHAR +dep.from_date | TIMESTAMP +dep.to_date | TIMESTAMP +emp_no | INTEGER +first_name | VARCHAR +first_name.keyword | VARCHAR +gender | VARCHAR +hire_date | TIMESTAMP +languages | TINYINT +last_name | VARCHAR +last_name.keyword | VARCHAR +salary | INTEGER +; + +describeMultiLike +DESCRIBE LIKE 'test_emp%'; + + column:s | type:s +birth_date | TIMESTAMP +dep | STRUCT +dep.dep_id | VARCHAR +dep.dep_name | VARCHAR +dep.dep_name.keyword | VARCHAR +dep.from_date | TIMESTAMP +dep.to_date | TIMESTAMP +emp_no | INTEGER +first_name | VARCHAR +first_name.keyword | VARCHAR +gender | VARCHAR +hire_date | TIMESTAMP +languages | TINYINT +last_name | VARCHAR +last_name.keyword | VARCHAR +salary | INTEGER +; + +describeSimpleIdentifier DESCRIBE "test_emp"; column:s | type:s @@ -162,3 +249,27 @@ last_name | VARCHAR last_name.keyword | VARCHAR salary | INTEGER ; + + +describeIncludeExcludeIdentifier +DESCRIBE "test_emp*,-test_emp_*"; + +column:s | type:s +birth_date | TIMESTAMP +dep | STRUCT +dep.dep_id | VARCHAR +dep.dep_name | VARCHAR +dep.dep_name.keyword | VARCHAR +dep.from_date | TIMESTAMP +dep.to_date | TIMESTAMP +emp_no | INTEGER +first_name | VARCHAR +first_name.keyword | VARCHAR +gender | VARCHAR +hire_date | TIMESTAMP +languages | TINYINT +last_name | VARCHAR +last_name.keyword | VARCHAR +salary | INTEGER +; + diff --git a/x-pack/qa/sql/src/main/resources/docs.csv-spec b/x-pack/qa/sql/src/main/resources/docs.csv-spec index 7385bf14df3..2a4f29fcf5d 100644 --- a/x-pack/qa/sql/src/main/resources/docs.csv-spec +++ b/x-pack/qa/sql/src/main/resources/docs.csv-spec @@ -148,6 +148,29 @@ emp |BASE TABLE // end::showTablesLikeMixed ; +showTablesLikeEscape-Ignore +// tag::showTablesLikeEscape +SHOW TABLES LIKE 'emp!%' ESCAPE '!'; + + name | type +---------------+--------------- + +// end::showTablesLikeEscape +; + + +showTablesEsMultiIndex +// tag::showTablesEsMultiIndex +SHOW TABLES "*,-l*"; + + name | type +---------------+--------------- +emp |BASE TABLE +employees |ALIAS + +// end::showTablesEsMultiIndex +; + /////////////////////////////// // // Show Functions @@ -160,88 +183,88 @@ SHOW FUNCTIONS; name | type ----------------+--------------- -AVG |AGGREGATE -COUNT |AGGREGATE -MAX |AGGREGATE -MIN |AGGREGATE -SUM |AGGREGATE -STDDEV_POP |AGGREGATE -VAR_POP |AGGREGATE -PERCENTILE |AGGREGATE -PERCENTILE_RANK |AGGREGATE -SUM_OF_SQUARES |AGGREGATE -SKEWNESS |AGGREGATE -KURTOSIS |AGGREGATE -DAY_OF_MONTH |SCALAR -DAY |SCALAR -DOM |SCALAR -DAY_OF_WEEK |SCALAR -DOW |SCALAR -DAY_OF_YEAR |SCALAR -DOY |SCALAR -HOUR_OF_DAY |SCALAR -HOUR |SCALAR -MINUTE_OF_DAY |SCALAR -MINUTE_OF_HOUR |SCALAR -MINUTE |SCALAR -SECOND_OF_MINUTE|SCALAR -SECOND |SCALAR -MONTH_OF_YEAR |SCALAR -MONTH |SCALAR -YEAR |SCALAR -WEEK_OF_YEAR |SCALAR -WEEK |SCALAR -ABS |SCALAR -ACOS |SCALAR -ASIN |SCALAR -ATAN |SCALAR -ATAN2 |SCALAR -CBRT |SCALAR -CEIL |SCALAR -CEILING |SCALAR -COS |SCALAR -COSH |SCALAR -COT |SCALAR -DEGREES |SCALAR -E |SCALAR -EXP |SCALAR -EXPM1 |SCALAR -FLOOR |SCALAR -LOG |SCALAR -LOG10 |SCALAR -MOD |SCALAR -PI |SCALAR -POWER |SCALAR -RADIANS |SCALAR -RANDOM |SCALAR -RAND |SCALAR -ROUND |SCALAR -SIGN |SCALAR -SIGNUM |SCALAR -SIN |SCALAR -SINH |SCALAR -SQRT |SCALAR -TAN |SCALAR -ASCII |SCALAR -CHAR |SCALAR -BIT_LENGTH |SCALAR -CHAR_LENGTH |SCALAR -LCASE |SCALAR -LENGTH |SCALAR -LTRIM |SCALAR -RTRIM |SCALAR -SPACE |SCALAR -UCASE |SCALAR -CONCAT |SCALAR -INSERT |SCALAR -LEFT |SCALAR -LOCATE |SCALAR -POSITION |SCALAR -REPEAT |SCALAR -REPLACE |SCALAR -RIGHT |SCALAR -SUBSTRING |SCALAR -SCORE |SCORE +AVG |AGGREGATE +COUNT |AGGREGATE +MAX |AGGREGATE +MIN |AGGREGATE +SUM |AGGREGATE +STDDEV_POP |AGGREGATE +VAR_POP |AGGREGATE +PERCENTILE |AGGREGATE +PERCENTILE_RANK |AGGREGATE +SUM_OF_SQUARES |AGGREGATE +SKEWNESS |AGGREGATE +KURTOSIS |AGGREGATE +DAY_OF_MONTH |SCALAR +DAY |SCALAR +DOM |SCALAR +DAY_OF_WEEK |SCALAR +DOW |SCALAR +DAY_OF_YEAR |SCALAR +DOY |SCALAR +HOUR_OF_DAY |SCALAR +HOUR |SCALAR +MINUTE_OF_DAY |SCALAR +MINUTE_OF_HOUR |SCALAR +MINUTE |SCALAR +SECOND_OF_MINUTE|SCALAR +SECOND |SCALAR +MONTH_OF_YEAR |SCALAR +MONTH |SCALAR +YEAR |SCALAR +WEEK_OF_YEAR |SCALAR +WEEK |SCALAR +ABS |SCALAR +ACOS |SCALAR +ASIN |SCALAR +ATAN |SCALAR +ATAN2 |SCALAR +CBRT |SCALAR +CEIL |SCALAR +CEILING |SCALAR +COS |SCALAR +COSH |SCALAR +COT |SCALAR +DEGREES |SCALAR +E |SCALAR +EXP |SCALAR +EXPM1 |SCALAR +FLOOR |SCALAR +LOG |SCALAR +LOG10 |SCALAR +MOD |SCALAR +PI |SCALAR +POWER |SCALAR +RADIANS |SCALAR +RANDOM |SCALAR +RAND |SCALAR +ROUND |SCALAR +SIGN |SCALAR +SIGNUM |SCALAR +SIN |SCALAR +SINH |SCALAR +SQRT |SCALAR +TAN |SCALAR +ASCII |SCALAR +CHAR |SCALAR +BIT_LENGTH |SCALAR +CHAR_LENGTH |SCALAR +LCASE |SCALAR +LENGTH |SCALAR +LTRIM |SCALAR +RTRIM |SCALAR +SPACE |SCALAR +CONCAT |SCALAR +INSERT |SCALAR +LEFT |SCALAR +LOCATE |SCALAR +POSITION |SCALAR +REPEAT |SCALAR +REPLACE |SCALAR +RIGHT |SCALAR +SUBSTRING |SCALAR +UCASE |SCALAR +SCORE |SCORE // end::showFunctions ; @@ -286,7 +309,7 @@ ABS |SCALAR showFunctionsWithPattern // tag::showFunctionsWithPattern -SHOW FUNCTIONS '%DAY%'; +SHOW FUNCTIONS LIKE '%DAY%'; name | type ---------------+--------------- @@ -308,7 +331,7 @@ MINUTE_OF_DAY |SCALAR selectColumnAlias // tag::selectColumnAlias -SELECT 1 + 1 AS result +SELECT 1 + 1 AS result; result --------------- @@ -480,18 +503,19 @@ groupByMulti // tag::groupByMulti SELECT gender g, languages l, COUNT(*) c FROM "emp" GROUP BY g, l ORDER BY languages ASC, gender DESC; - g | l | c + g | l | c ---------------+---------------+--------------- -F |2 |4 -F |3 |8 -F |4 |7 -F |5 |7 -F |6 |11 -M |2 |12 -M |3 |12 -M |4 |15 -M |5 |11 -M |6 |13 +M |1 |12 +F |1 |4 +M |2 |12 +F |2 |8 +M |3 |15 +F |3 |7 +M |4 |11 +F |4 |7 +M |5 |13 +F |5 |11 + // end::groupByMulti ; @@ -635,7 +659,7 @@ James S.A. Corey |Leviathan Wakes |561 |2011-06-02T00:00:00Z orderByScore // tag::orderByScore -SELECT SCORE(), * FROM library WHERE match(name, 'dune') ORDER BY SCORE() DESC; +SELECT SCORE(), * FROM library WHERE MATCH(name, 'dune') ORDER BY SCORE() DESC; SCORE() | author | name | page_count | release_date ---------------+---------------+-------------------+---------------+-------------------- @@ -649,7 +673,7 @@ SELECT SCORE(), * FROM library WHERE match(name, 'dune') ORDER BY SCORE() DESC; orderByScoreWithMatch // tag::orderByScoreWithMatch -SELECT SCORE(), * FROM library WHERE match(name, 'dune') ORDER BY page_count DESC; +SELECT SCORE(), * FROM library WHERE MATCH(name, 'dune') ORDER BY page_count DESC; SCORE() | author | name | page_count | release_date ---------------+---------------+-------------------+---------------+-------------------- @@ -661,6 +685,19 @@ SELECT SCORE(), * FROM library WHERE match(name, 'dune') ORDER BY page_count DES // end::orderByScoreWithMatch ; +scoreWithMatch +// tag::scoreWithMatch +SELECT SCORE() AS score, name, release_date FROM library WHERE QUERY('dune') ORDER BY YEAR(release_date) DESC; + + score | name | release_date +---------------+-------------------+-------------------- +1.4005898 |God Emperor of Dune|1981-05-28T00:00:00Z +1.6086555 |Children of Dune |1976-04-21T00:00:00Z +1.8893257 |Dune Messiah |1969-10-15T00:00:00Z +2.288635 |Dune |1965-06-01T00:00:00Z +// end::scoreWithMatch +; + /////////////////////////////// // @@ -678,3 +715,399 @@ Georgi |Facello |10001 // end::limitBasic ; + +/////////////////////////////// +// +// Aggregations +// +/////////////////////////////// + +aggAvg +// tag::aggAvg +SELECT AVG(salary) AS avg FROM emp; + + avg +--------------- +48248 +// end::aggAvg +; + +aggCountStar +// tag::aggCountStar +SELECT COUNT(*) AS count FROM emp; + + count +--------------- +100 +// end::aggCountStar +; + +aggCountDistinct +// tag::aggCountDistinct + +SELECT COUNT(DISTINCT hire_date) AS hires FROM emp; + + hires +--------------- +99 +// end::aggCountDistinct +; + +aggMax +// tag::aggMax +SELECT MAX(salary) AS max FROM emp; + + max +--------------- +74999 +// end::aggMax +; + +aggMin +// tag::aggMin +SELECT MIN(salary) AS min FROM emp; + + min +--------------- +25324 +// end::aggMin +; + +aggSum +// tag::aggSum +SELECT SUM(salary) AS sum FROM emp; + + sum +--------------- +4824855 +// end::aggSum +; + +aggKurtosis +// tag::aggKurtosis +SELECT MIN(salary) AS min, MAX(salary) AS max, KURTOSIS(salary) AS k FROM emp; + + min | max | k +---------------+---------------+------------------ +25324 |74999 |2.0444718929142986 +// end::aggKurtosis +; + +aggPercentile +// tag::aggPercentile +SELECT languages, PERCENTILE(salary, 95) AS "95th" FROM emp + GROUP BY languages; + + languages | 95th +---------------+----------------- +1 |72605.2 +2 |71741.0 +3 |74981.6 +4 |72115.59999999999 +5 |68927.19999999998 +// end::aggPercentile +; + +aggPercentileRank +// tag::aggPercentileRank +SELECT languages, PERCENTILE_RANK(salary, 65000) AS rank FROM emp GROUP BY languages; + + languages | rank +---------------+----------------- +1 |75.37108985853756 +2 |89.43605326660112 +3 |77.74873333978765 +4 |85.70446389643493 +5 |92.52677973666592 +// end::aggPercentileRank +; + +aggSkewness +// tag::aggSkewness +SELECT MIN(salary) AS min, MAX(salary) AS max, SKEWNESS(salary) AS s FROM emp; + + min | max | s +---------------+---------------+------------------ +25324 |74999 |0.2707722118423227 +// end::aggSkewness +; + +aggStddevPop +// tag::aggStddevPop +SELECT MIN(salary) AS min, MAX(salary) AS max, STDDEV_POP(salary) AS stddev + FROM emp; + + min | max | stddev +---------------+---------------+------------------ +25324 |74999 |13765.125502787832 +// end::aggStddevPop +; + + +aggSumOfSquares +// tag::aggSumOfSquares +SELECT MIN(salary) AS min, MAX(salary) AS max, SUM_OF_SQUARES(salary) AS sumsq + FROM emp; + + min | max | sumsq +---------------+---------------+---------------- +25324 |74999 |2.51740125721E11 +// end::aggSumOfSquares +; + + +aggVarPop +// tag::aggVarPop +SELECT MIN(salary) AS min, MAX(salary) AS max, VAR_POP(salary) AS varpop FROM emp; + + min | max | varpop +---------------+---------------+---------------- +25324 |74999 |1.894786801075E8 +// end::aggVarPop +; + + +/////////////////////////////// +// +// String +// +/////////////////////////////// + +stringAscii +// tag::stringAscii +SELECT ASCII('Elastic'); + +ASCII(Elastic) +--------------- +69 +// end::stringAscii +; + +stringBitLength +// tag::stringBitLength +SELECT BIT_LENGTH('Elastic'); + +BIT_LENGTH(Elastic) +------------------- +56 +// end::stringBitLength +; + +stringChar +// tag::stringChar +SELECT CHAR(69); + + CHAR(69) +--------------- +E +// end::stringChar +; + +stringCharLength +// tag::stringCharLength +SELECT CHAR_LENGTH('Elastic'); + +CHAR_LENGTH(Elastic) +-------------------- +7 +// end::stringCharLength +; + +stringConcat +// tag::stringConcat +SELECT CONCAT('Elasticsearch', ' SQL'); + +CONCAT(Elasticsearch, SQL) +-------------------------- +Elasticsearch SQL +// end::stringConcat +; + +stringInsert +// tag::stringInsert +SELECT INSERT('Elastic ', 8, 1, 'search'); + +INSERT(Elastic ,8,1,search) +--------------------------- +Elasticsearch +// end::stringInsert +; + +stringLCase +// tag::stringLCase +SELECT LCASE('Elastic'); + +LCASE(Elastic) +--------------- +elastic +// end::stringLCase +; + +stringLeft +// tag::stringLeft +SELECT LEFT('Elastic',3); + +LEFT(Elastic,3) +--------------- +Ela +// end::stringLeft +; + +stringLength +// tag::stringLength +SELECT LENGTH('Elastic '); + +LENGTH(Elastic ) +------------------ +7 +// end::stringLength +; + +stringLocateWoStart +// tag::stringLocateWoStart +SELECT LOCATE('a', 'Elasticsearch'); + +LOCATE(a,Elasticsearch) +----------------------- +3 +// end::stringLocateWoStart +; + +stringLocateWithStart +// tag::stringLocateWithStart +SELECT LOCATE('a', 'Elasticsearch', 5); + +LOCATE(a,Elasticsearch,5) +------------------------- +10 +// end::stringLocateWithStart +; + +stringLTrim +// tag::stringLTrim +SELECT LTRIM(' Elastic'); + +LTRIM( Elastic) +----------------- +Elastic +// end::stringLTrim +; + +stringPosition +// tag::stringPosition +SELECT POSITION('Elastic', 'Elasticsearch'); + +POSITION(Elastic,Elasticsearch) +------------------------------- +1 +// end::stringPosition +; + +stringRepeat +// tag::stringRepeat +SELECT REPEAT('La', 3); + + REPEAT(La,3) +--------------- +LaLaLa +// end::stringRepeat +; + +stringReplace-Ignore +// tag::stringReplace +SELECT REPLACE('Elastic', 'El', 'Fant'); + + REPLACE(Elastic, El, Fant) +----------------------------- +Fantastic +// end::stringReplace +; + +stringRight +// tag::stringRight +SELECT RIGHT('Elastic',3); + +RIGHT(Elastic,3) +---------------- +tic +// end::stringRight +; + +stringRTrim +// tag::stringRTrim +SELECT RTRIM('Elastic '); + +RTRIM(Elastic ) +----------------- +Elastic +// end::stringRTrim +; + +stringSpace-Ignore +// tag::stringSpace +SELECT SPACE(3); + + SPACE(3) +--------------- + + +// end::stringSpace +; + +stringSubString +// tag::stringSubString +SELECT SUBSTRING('Elasticsearch', 0, 7); + +SUBSTRING(Elasticsearch,0,7) +---------------------------- +Elastic +// end::stringSubString +; + +stringUCase +// tag::stringUCase +SELECT UCASE('Elastic'); + +UCASE(Elastic) +--------------- +ELASTIC +// end::stringUCase +; + + +/////////////////////////////// +// +// Cast +// +/////////////////////////////// + +conversionStringToIntCast +// tag::conversionStringToIntCast +SELECT CAST('123' AS INT) AS int; + + int +--------------- +123 +// end::conversionStringToIntCast +; + +conversionIntToStringCast-Ignore +// tag::conversionIntToStringCast +SELECT CAST(123 AS VARCHAR) AS string; + + string +--------------- +123 + +// end::conversionIntToStringCast +; + +conversionStringToDateCast +// tag::conversionStringToDateCast +SELECT YEAR(CAST('2018-05-19T11:23:45Z' AS TIMESTAMP)) AS year; + + year +--------------- +2018 +// end::conversionStringToDateCast +; diff --git a/x-pack/qa/sql/src/main/resources/employees_with_nulls.csv b/x-pack/qa/sql/src/main/resources/employees_with_nulls.csv new file mode 100644 index 00000000000..482da640470 --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/employees_with_nulls.csv @@ -0,0 +1,101 @@ +birth_date,emp_no,first_name,gender,hire_date,languages,last_name,salary +1953-09-02T00:00:00Z,10001,Georgi,,1986-06-26T00:00:00Z,2,Facello,57305 +1964-06-02T00:00:00Z,10002,Bezalel,,1985-11-21T00:00:00Z,5,Simmel,56371 +1959-12-03T00:00:00Z,10003,Parto,,1986-08-28T00:00:00Z,4,Bamford,61805 +1954-05-01T00:00:00Z,10004,Chirstian,,1986-12-01T00:00:00Z,5,Koblick,36174 +1955-01-21T00:00:00Z,10005,Kyoichi,,1989-09-12T00:00:00Z,1,Maliniak,63528 +1953-04-20T00:00:00Z,10006,Anneke,,1989-06-02T00:00:00Z,3,Preusig,60335 +1957-05-23T00:00:00Z,10007,Tzvetan,,1989-02-10T00:00:00Z,4,Zielinski,74572 +1958-02-19T00:00:00Z,10008,Saniya,,1994-09-15T00:00:00Z,2,Kalloufi,43906 +1952-04-19T00:00:00Z,10009,Sumant,,1985-02-18T00:00:00Z,1,Peac,66174 +1963-06-01T00:00:00Z,10010,Duangkaew,,1989-08-24T00:00:00Z,4,Piveteau,45797 +1953-11-07T00:00:00Z,10011,Mary,F,1990-01-22T00:00:00Z,5,Sluis,31120 +1960-10-04T00:00:00Z,10012,Patricio,M,1992-12-18T00:00:00Z,5,Bridgland,48942 +1963-06-07T00:00:00Z,10013,Eberhardt,M,1985-10-20T00:00:00Z,1,Terkki,48735 +1956-02-12T00:00:00Z,10014,Berni,M,1987-03-11T00:00:00Z,5,Genin,37137 +1959-08-19T00:00:00Z,10015,Guoxiang,M,1987-07-02T00:00:00Z,5,Nooteboom,25324 +1961-05-02T00:00:00Z,10016,Kazuhito,M,1995-01-27T00:00:00Z,2,Cappelletti,61358 +1958-07-06T00:00:00Z,10017,Cristinel,F,1993-08-03T00:00:00Z,2,Bouloucos,58715 +1954-06-19T00:00:00Z,10018,Kazuhide,F,1993-08-03T00:00:00Z,2,Peha,56760 +1953-01-23T00:00:00Z,10019,Lillian,M,1993-08-03T00:00:00Z,1,Haddadi,73717 +1952-12-24T00:00:00Z,10020,,M,1991-01-26T00:00:00Z,3,Warwick,40031 +1960-02-20T00:00:00Z,10021,,M,1989-12-17T00:00:00Z,5,Erde,60408 +1952-07-08T00:00:00Z,10022,,M,1995-08-22T00:00:00Z,3,Famili,48233 +1953-09-29T00:00:00Z,10023,,F,1989-12-17T00:00:00Z,2,Montemayor,47896 +1958-09-05T00:00:00Z,10024,,F,1997-05-19T00:00:00Z,3,Pettey,64675 +1958-10-31T00:00:00Z,10025,Prasadram,M,1987-08-17T00:00:00Z,5,Heyers,47411 +1953-04-03T00:00:00Z,10026,Yongqiao,M,1995-03-20T00:00:00Z,3,Berztiss,28336 +1962-07-10T00:00:00Z,10027,Divier,F,1989-07-07T00:00:00Z,5,Reistad,73851 +1963-11-26T00:00:00Z,10028,Domenick,M,1991-10-22T00:00:00Z,1,Tempesti,39356 +1956-12-13T00:00:00Z,10029,Otmar,M,1985-11-20T00:00:00Z,,Herbst,74999 +1958-07-14T00:00:00Z,10030,Elvis,M,1994-02-17T00:00:00Z,,Demeyer,67492 +1959-01-27T00:00:00Z,10031,Karsten,M,1994-02-17T00:00:00Z,,Joslin,37716 +1960-08-09T00:00:00Z,10032,Jeong,F,1990-06-20T00:00:00Z,,Reistad,62233 +1956-11-14T00:00:00Z,10033,Arif,M,1987-03-18T00:00:00Z,,Merlo,70011 +1962-12-29T00:00:00Z,10034,Bader,M,1988-09-05T00:00:00Z,,Swan,39878 +1953-02-08T00:00:00Z,10035,Alain,M,1988-09-05T00:00:00Z,,Chappelet,25945 +1959-08-10T00:00:00Z,10036,Adamantios,M,1992-01-03T00:00:00Z,,Portugali,60781 +1963-07-22T00:00:00Z,10037,Pradeep,M,1990-12-05T00:00:00Z,,Makrucki,37691 +1960-07-20T00:00:00Z,10038,Huan,M,1989-09-20T00:00:00Z,,Lortz,35222 +1959-10-01T00:00:00Z,10039,Alejandro,M,1988-01-19T00:00:00Z,,Brender,36051 +1959-09-13T00:00:00Z,10040,Weiyi,F,1993-02-14T00:00:00Z,,Meriste,37112 +1959-08-27T00:00:00Z,10041,Uri,F,1989-11-12T00:00:00Z,1,Lenart,56415 +1956-02-26T00:00:00Z,10042,Magy,F,1993-03-21T00:00:00Z,3,Stamatiou,30404 +1960-09-19T00:00:00Z,10043,Yishay,M,1990-10-20T00:00:00Z,1,Tzvieli,34341 +1961-09-21T00:00:00Z,10044,Mingsen,F,1994-05-21T00:00:00Z,1,Casley,39728 +1957-08-14T00:00:00Z,10045,Moss,M,1989-09-02T00:00:00Z,3,Shanbhogue,74970 +1960-07-23T00:00:00Z,10046,Lucien,M,1992-06-20T00:00:00Z,4,Rosenbaum,50064 +1952-06-29T00:00:00Z,10047,Zvonko,M,1989-03-31T00:00:00Z,4,Nyanchama,42716 +1963-07-11T00:00:00Z,10048,Florian,M,1985-02-24T00:00:00Z,3,Syrotiuk,26436 +1961-04-24T00:00:00Z,10049,Basil,F,1992-05-04T00:00:00Z,5,Tramer,37853 +1958-05-21T00:00:00Z,10050,Yinghua,M,1990-12-25T00:00:00Z,2,Dredge,43026 +1953-07-28T00:00:00Z,10051,Hidefumi,M,1992-10-15T00:00:00Z,3,Caine,58121 +1961-02-26T00:00:00Z,10052,Heping,M,1988-05-21T00:00:00Z,1,Nitsch,55360 +1954-09-13T00:00:00Z,10053,Sanjiv,F,1986-02-04T00:00:00Z,3,Zschoche,54462 +1957-04-04T00:00:00Z,10054,Mayumi,M,1995-03-13T00:00:00Z,4,Schueller,65367 +1956-06-06T00:00:00Z,10055,Georgy,M,1992-04-27T00:00:00Z,5,Dredge,49281 +1961-09-01T00:00:00Z,10056,Brendon,F,1990-02-01T00:00:00Z,2,Bernini,33370 +1954-05-30T00:00:00Z,10057,Ebbe,F,1992-01-15T00:00:00Z,4,Callaway,27215 +1954-10-01T00:00:00Z,10058,Berhard,M,1987-04-13T00:00:00Z,3,McFarlin,38376 +1953-09-19T00:00:00Z,10059,Alejandro,F,1991-06-26T00:00:00Z,2,McAlpine,44307 +1961-10-15T00:00:00Z,10060,Breannda,M,1987-11-02T00:00:00Z,2,Billingsley,29175 +1962-10-19T00:00:00Z,10061,Tse,M,1985-09-17T00:00:00Z,1,Herber,49095 +1961-11-02T00:00:00Z,10062,Anoosh,M,1991-08-30T00:00:00Z,3,Peyn,65030 +1952-08-06T00:00:00Z,10063,Gino,F,1989-04-08T00:00:00Z,3,Leonhardt,52121 +1959-04-07T00:00:00Z,10064,Udi,M,1985-11-20T00:00:00Z,5,Jansch,33956 +1963-04-14T00:00:00Z,10065,Satosi,M,1988-05-18T00:00:00Z,2,Awdeh,50249 +1952-11-13T00:00:00Z,10066,Kwee,M,1986-02-26T00:00:00Z,5,Schusler,31897 +1953-01-07T00:00:00Z,10067,Claudi,M,1987-03-04T00:00:00Z,2,Stavenow,52044 +1962-11-26T00:00:00Z,10068,Charlene,M,1987-08-07T00:00:00Z,3,Brattka,28941 +1960-09-06T00:00:00Z,10069,Margareta,F,1989-11-05T00:00:00Z,5,Bierman,41933 +1955-08-20T00:00:00Z,10070,Reuven,M,1985-10-14T00:00:00Z,3,Garigliano,54329 +1958-01-21T00:00:00Z,10071,Hisao,M,1987-10-01T00:00:00Z,2,Lipner,40612 +1952-05-15T00:00:00Z,10072,Hironoby,F,1988-07-21T00:00:00Z,5,Sidou,54518 +1954-02-23T00:00:00Z,10073,Shir,M,1991-12-01T00:00:00Z,4,McClurg,32568 +1955-08-28T00:00:00Z,10074,Mokhtar,F,1990-08-13T00:00:00Z,5,Bernatsky,38992 +1960-03-09T00:00:00Z,10075,Gao,F,1987-03-19T00:00:00Z,5,Dolinsky,51956 +1952-06-13T00:00:00Z,10076,Erez,F,1985-07-09T00:00:00Z,3,Ritzmann,62405 +1964-04-18T00:00:00Z,10077,Mona,M,1990-03-02T00:00:00Z,5,Azuma,46595 +1959-12-25T00:00:00Z,10078,Danel,F,1987-05-26T00:00:00Z,2,Mondadori,69904 +1961-10-05T00:00:00Z,10079,Kshitij,F,1986-03-27T00:00:00Z,2,Gils,32263 +1957-12-03T00:00:00Z,10080,Premal,M,1985-11-19T00:00:00Z,5,Baek,52833 +1960-12-17T00:00:00Z,10081,Zhongwei,M,1986-10-30T00:00:00Z,2,Rosen,50128 +1963-09-09T00:00:00Z,10082,Parviz,M,1990-01-03T00:00:00Z,4,Lortz,49818 +1959-07-23T00:00:00Z,10083,Vishv,M,1987-03-31T00:00:00Z,1,Zockler, +1960-05-25T00:00:00Z,10084,Tuval,M,1995-12-15T00:00:00Z,1,Kalloufi, +1962-11-07T00:00:00Z,10085,Kenroku,M,1994-04-09T00:00:00Z,5,Malabarba, +1962-11-19T00:00:00Z,10086,Somnath,M,1990-02-16T00:00:00Z,1,Foote, +1959-07-23T00:00:00Z,10087,Xinglin,F,1986-09-08T00:00:00Z,5,Eugenio, +1954-02-25T00:00:00Z,10088,Jungsoon,F,1988-09-02T00:00:00Z,5,Syrzycki, +1963-03-21T00:00:00Z,10089,Sudharsan,F,1986-08-12T00:00:00Z,4,Flasterstein, +1961-05-30T00:00:00Z,10090,Kendra,M,1986-03-14T00:00:00Z,2,Hofting,44956 +1955-10-04T00:00:00Z,10091,Amabile,M,1992-11-18T00:00:00Z,3,Gomatam,38645 +1964-10-18T00:00:00Z,10092,Valdiodio,F,1989-09-22T00:00:00Z,1,Niizuma,25976 +1964-06-11T00:00:00Z,10093,Sailaja,M,1996-11-05T00:00:00Z,3,Desikan,45656 +1957-05-25T00:00:00Z,10094,Arumugam,F,1987-04-18T00:00:00Z,5,Ossenbruggen,66817 +1965-01-03T00:00:00Z,10095,Hilari,M,1986-07-15T00:00:00Z,4,Morton,37702 +1954-09-16T00:00:00Z,10096,Jayson,M,1990-01-14T00:00:00Z,4,Mandell,43889 +1952-02-27T00:00:00Z,10097,Remzi,M,1990-09-15T00:00:00Z,3,Waschkowski,71165 +1961-09-23T00:00:00Z,10098,Sreekrishna,F,1985-05-13T00:00:00Z,4,Servieres,44817 +1956-05-25T00:00:00Z,10099,Valter,F,1988-10-18T00:00:00Z,2,Sullins,73578 +1953-04-21T00:00:00Z,10100,Hironobu,F,1987-09-21T00:00:00Z,4,Haraldson,68431 diff --git a/x-pack/qa/sql/src/main/resources/functions.csv-spec b/x-pack/qa/sql/src/main/resources/functions.csv-spec index 1a610aec048..3622cfe0433 100644 --- a/x-pack/qa/sql/src/main/resources/functions.csv-spec +++ b/x-pack/qa/sql/src/main/resources/functions.csv-spec @@ -407,3 +407,26 @@ SELECT CONCAT(CONCAT(SUBSTRING("first_name",1,LENGTH("first_name")-2),UCASE(LEFT ---------------+--------------------------------------------- AlejandRo |2 ; + + +checkColumnNameWithNestedArithmeticFunctionCallsOnTableColumn +SELECT CHAR(emp_no % 10000) FROM "test_emp" WHERE emp_no > 10064 ORDER BY emp_no LIMIT 1; + +CHAR(((emp_no) % 10000)):s +A +; + +checkColumnNameWithComplexNestedArithmeticFunctionCallsOnTableColumn1 +SELECT CHAR(emp_no % (7000 + 3000)) FROM "test_emp" WHERE emp_no > 10065 ORDER BY emp_no LIMIT 1; + +CHAR(((emp_no) % ((7000 + 3000)))):s +B +; + + +checkColumnNameWithComplexNestedArithmeticFunctionCallsOnTableColumn2 +SELECT CHAR((emp_no % (emp_no - 1 + 1)) + 67) FROM "test_emp" WHERE emp_no > 10066 ORDER BY emp_no LIMIT 1; + +CHAR(((((emp_no) % (((((emp_no) - 1)) + 1)))) + 67)):s +C +; diff --git a/x-pack/qa/sql/src/main/resources/math.sql-spec b/x-pack/qa/sql/src/main/resources/math.sql-spec index e38de2aa6bc..6452d2a3ac0 100644 --- a/x-pack/qa/sql/src/main/resources/math.sql-spec +++ b/x-pack/qa/sql/src/main/resources/math.sql-spec @@ -128,7 +128,9 @@ mathATan2 // tag::atan2 SELECT ATAN2(emp_no, emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; // end::atan2 -mathPower // tag::power +mathPowerPositive SELECT POWER(emp_no, 2) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathPowerNegative +SELECT POWER(salary, -1) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; // end::power diff --git a/x-pack/qa/sql/src/main/resources/setup_test_emp_with_nulls.sql b/x-pack/qa/sql/src/main/resources/setup_test_emp_with_nulls.sql new file mode 100644 index 00000000000..c6afaa9018a --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/setup_test_emp_with_nulls.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS "test_emp_with_nulls"; +CREATE TABLE "test_emp_with_nulls" ( + "birth_date" TIMESTAMP WITH TIME ZONE, + "emp_no" INT, + "first_name" VARCHAR(50), + "gender" VARCHAR(1), + "hire_date" TIMESTAMP WITH TIME ZONE, + "languages" TINYINT, + "last_name" VARCHAR(50), + "salary" INT + ) + AS SELECT * FROM CSVREAD('classpath:/employees_with_nulls.csv'); \ No newline at end of file diff --git a/x-pack/qa/third-party/hipchat/build.gradle b/x-pack/qa/third-party/hipchat/build.gradle index 03b6c319698..2b2ee7fcbbf 100644 --- a/x-pack/qa/third-party/hipchat/build.gradle +++ b/x-pack/qa/third-party/hipchat/build.gradle @@ -4,7 +4,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('watcher'), configuration: 'runtime') } diff --git a/x-pack/qa/third-party/jira/build.gradle b/x-pack/qa/third-party/jira/build.gradle index 3814c8e9a53..283f9688699 100644 --- a/x-pack/qa/third-party/jira/build.gradle +++ b/x-pack/qa/third-party/jira/build.gradle @@ -7,7 +7,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('watcher'), configuration: 'runtime') } diff --git a/x-pack/qa/third-party/pagerduty/build.gradle b/x-pack/qa/third-party/pagerduty/build.gradle index c0f337e160e..12758989d0f 100644 --- a/x-pack/qa/third-party/pagerduty/build.gradle +++ b/x-pack/qa/third-party/pagerduty/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('watcher'), configuration: 'runtime') } diff --git a/x-pack/qa/third-party/slack/build.gradle b/x-pack/qa/third-party/slack/build.gradle index 431752765f3..f1bcd98cff6 100644 --- a/x-pack/qa/third-party/slack/build.gradle +++ b/x-pack/qa/third-party/slack/build.gradle @@ -5,7 +5,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('watcher'), configuration: 'runtime') } diff --git a/x-pack/qa/transport-client-tests/build.gradle b/x-pack/qa/transport-client-tests/build.gradle index a94ad8fd592..3ece6dd1147 100644 --- a/x-pack/qa/transport-client-tests/build.gradle +++ b/x-pack/qa/transport-client-tests/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') } diff --git a/x-pack/test/feature-aware/build.gradle b/x-pack/test/feature-aware/build.gradle index f6a1f6cb16f..11b0e67183c 100644 --- a/x-pack/test/feature-aware/build.gradle +++ b/x-pack/test/feature-aware/build.gradle @@ -3,7 +3,7 @@ apply plugin: 'elasticsearch.build' dependencies { compile 'org.ow2.asm:asm:6.2' compile "org.elasticsearch:elasticsearch:${version}" - compile project(path: xpackModule('core'), configuration: 'shadow') + compile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile "org.elasticsearch.test:framework:${version}" } diff --git a/x-pack/transport-client/build.gradle b/x-pack/transport-client/build.gradle index 7155dad5ee6..a96f4146fbf 100644 --- a/x-pack/transport-client/build.gradle +++ b/x-pack/transport-client/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' @@ -10,7 +8,7 @@ archivesBaseName = 'x-pack-transport' dependencies { // this "api" dependency looks weird, but it is correct, as it contains // all of x-pack for now, and transport client will be going away in the future. - compile project(path: xpackModule('core'), configuration: 'shadow') + compile "org.elasticsearch.plugin:x-pack-core:${version}" compile "org.elasticsearch.client:transport:${version}" testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" @@ -22,8 +20,7 @@ dependencyLicenses.enabled = false forbiddenApisTest { // we don't use the core test-framework, no lucene classes present so we don't want the es-test-signatures to // be pulled in - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), - PrecommitTasks.getResource('/forbidden/es-all-signatures.txt')] + replaceSignatureFiles 'jdk-signatures', 'es-all-signatures' } namingConventions {