Merge branch 'master' into index-lifecycle

This commit is contained in:
Gordon Brown 2018-08-23 11:52:59 -06:00
commit 1f13c77b49
327 changed files with 10754 additions and 1665 deletions

View File

@ -325,21 +325,19 @@ common configurations in our build and how we use them:
<dl> <dl>
<dt>`compile`</dt><dd>Code that is on the classpath at both compile and <dt>`compile`</dt><dd>Code that is on the classpath at both compile and
runtime. If the [`shadow`][shadow-plugin] plugin is applied to the project then runtime.</dd>
this code is bundled into the jar produced by the project.</dd>
<dt>`runtime`</dt><dd>Code that is not on the classpath at compile time but is <dt>`runtime`</dt><dd>Code that is not on the classpath at compile time but is
on the classpath at runtime. We mostly use this configuration to make sure that on the classpath at runtime. We mostly use this configuration to make sure that
we do not accidentally compile against dependencies of our dependencies also we do not accidentally compile against dependencies of our dependencies also
known as "transitive" dependencies".</dd> known as "transitive" dependencies".</dd>
<dt>`compileOnly`</dt><dd>Code that is on the classpath at comile time but that <dt>`compileOnly`</dt><dd>Code that is on the classpath at compile time but that
should not be shipped with the project because it is "provided" by the runtime should not be shipped with the project because it is "provided" by the runtime
somehow. Elasticsearch plugins use this configuration to include dependencies somehow. Elasticsearch plugins use this configuration to include dependencies
that are bundled with Elasticsearch's server.</dd> that are bundled with Elasticsearch's server.</dd>
<dt>`shadow`</dt><dd>Only available in projects with the shadow plugin. Code <dt>`bundle`</dt><dd>Only available in projects with the shadow plugin,
that is on the classpath at both compile and runtime but it *not* bundled into dependencies with this configuration are bundled into the jar produced by the
the jar produced by the project. If you depend on a project with the `shadow` build. Since IDEs do not understand this configuration we rig them to treat
plugin then you need to depend on this configuration because it will bring dependencies in this configuration as `compile` dependencies.</dd>
along all of the dependencies you need at runtime.</dd>
<dt>`testCompile`</dt><dd>Code that is on the classpath for compiling tests <dt>`testCompile`</dt><dd>Code that is on the classpath for compiling tests
that are part of this project but not production code. The canonical example that are part of this project but not production code. The canonical example
of this is `junit`.</dd> of this is `junit`.</dd>

View File

@ -22,6 +22,7 @@ import org.elasticsearch.gradle.LoggedExec
import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.Version
import org.elasticsearch.gradle.VersionCollection import org.elasticsearch.gradle.VersionCollection
import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.plugin.PluginBuildPlugin
import org.gradle.plugins.ide.eclipse.model.SourceFolder import org.gradle.plugins.ide.eclipse.model.SourceFolder
import org.gradle.util.GradleVersion import org.gradle.util.GradleVersion
import org.gradle.util.DistributionLocator import org.gradle.util.DistributionLocator
@ -304,7 +305,7 @@ subprojects {
// org.elasticsearch:elasticsearch must be the last one or all the links for the // org.elasticsearch:elasticsearch must be the last one or all the links for the
// other packages (e.g org.elasticsearch.client) will point to server rather than // other packages (e.g org.elasticsearch.client) will point to server rather than
// their own artifacts. // their own artifacts.
if (project.plugins.hasPlugin(BuildPlugin)) { if (project.plugins.hasPlugin(BuildPlugin) || project.plugins.hasPlugin(PluginBuildPlugin)) {
String artifactsHost = VersionProperties.elasticsearch.isSnapshot() ? "https://snapshots.elastic.co" : "https://artifacts.elastic.co" String artifactsHost = VersionProperties.elasticsearch.isSnapshot() ? "https://snapshots.elastic.co" : "https://artifacts.elastic.co"
Closure sortClosure = { a, b -> b.group <=> a.group } Closure sortClosure = { a, b -> b.group <=> a.group }
Closure depJavadocClosure = { shadowed, dep -> Closure depJavadocClosure = { shadowed, dep ->
@ -322,13 +323,6 @@ subprojects {
*/ */
project.evaluationDependsOn(upstreamProject.path) project.evaluationDependsOn(upstreamProject.path)
project.javadoc.source += upstreamProject.javadoc.source project.javadoc.source += upstreamProject.javadoc.source
/*
* Do not add those projects to the javadoc classpath because
* we are going to resolve them with their source instead.
*/
project.javadoc.classpath = project.javadoc.classpath.filter { f ->
false == upstreamProject.configurations.archives.artifacts.files.files.contains(f)
}
/* /*
* Instead we need the upstream project's javadoc classpath so * Instead we need the upstream project's javadoc classpath so
* we don't barf on the classes that it references. * we don't barf on the classes that it references.
@ -345,16 +339,16 @@ subprojects {
project.configurations.compile.dependencies project.configurations.compile.dependencies
.findAll() .findAll()
.toSorted(sortClosure) .toSorted(sortClosure)
.each({ c -> depJavadocClosure(hasShadow, c) }) .each({ c -> depJavadocClosure(false, c) })
project.configurations.compileOnly.dependencies project.configurations.compileOnly.dependencies
.findAll() .findAll()
.toSorted(sortClosure) .toSorted(sortClosure)
.each({ c -> depJavadocClosure(hasShadow, c) }) .each({ c -> depJavadocClosure(false, c) })
if (hasShadow) { if (hasShadow) {
project.configurations.shadow.dependencies project.configurations.bundle.dependencies
.findAll() .findAll()
.toSorted(sortClosure) .toSorted(sortClosure)
.each({ c -> depJavadocClosure(false, c) }) .each({ c -> depJavadocClosure(true, c) })
} }
} }
} }
@ -523,25 +517,18 @@ allprojects {
allprojects { allprojects {
/* /*
* IntelliJ and Eclipse don't know about the shadow plugin so when we're * IntelliJ and Eclipse don't know about the shadow plugin so when we're
* in "IntelliJ mode" or "Eclipse mode" add "runtime" dependencies * in "IntelliJ mode" or "Eclipse mode" switch "bundle" dependencies into
* eveywhere where we see a "shadow" dependency which will cause them to * regular "compile" dependencies. This isn't needed for the project
* reference shadowed projects directly rather than rely on the shadowing
* to include them. This is the correct thing for it to do because it
* doesn't run the jar shadowing at all. This isn't needed for the project
* itself because the IDE configuration is done by SourceSets but it is * itself because the IDE configuration is done by SourceSets but it is
* *is* needed for projects that depends on the project doing the shadowing. * *is* needed for projects that depends on the project doing the shadowing.
* Without this they won't properly depend on the shadowed project. * Without this they won't properly depend on the shadowed project.
*/ */
if (isEclipse || isIdea) { if (isEclipse || isIdea) {
configurations.all { Configuration configuration -> project.plugins.withType(ShadowPlugin).whenPluginAdded {
dependencies.all { Dependency dep -> project.afterEvaluate {
if (dep instanceof ProjectDependency) { project.configurations.compile.extendsFrom project.configurations.bundle
if (dep.getTargetConfiguration() == 'shadow') { }
configuration.dependencies.add(project.dependencies.project(path: dep.dependencyProject.path, configuration: 'runtime')) }
}
}
}
}
} }
} }

View File

@ -79,8 +79,9 @@ class BuildPlugin implements Plugin<Project> {
} }
project.pluginManager.apply('java') project.pluginManager.apply('java')
project.pluginManager.apply('carrotsearch.randomized-testing') project.pluginManager.apply('carrotsearch.randomized-testing')
// these plugins add lots of info to our jars configureConfigurations(project)
configureJars(project) // jar config must be added before info broker configureJars(project) // jar config must be added before info broker
// these plugins add lots of info to our jars
project.pluginManager.apply('nebula.info-broker') project.pluginManager.apply('nebula.info-broker')
project.pluginManager.apply('nebula.info-basic') project.pluginManager.apply('nebula.info-basic')
project.pluginManager.apply('nebula.info-java') project.pluginManager.apply('nebula.info-java')
@ -91,8 +92,8 @@ class BuildPlugin implements Plugin<Project> {
globalBuildInfo(project) globalBuildInfo(project)
configureRepositories(project) configureRepositories(project)
configureConfigurations(project)
project.ext.versions = VersionProperties.versions project.ext.versions = VersionProperties.versions
configureSourceSets(project)
configureCompile(project) configureCompile(project)
configureJavadoc(project) configureJavadoc(project)
configureSourcesJar(project) configureSourcesJar(project)
@ -421,8 +422,10 @@ class BuildPlugin implements Plugin<Project> {
project.configurations.compile.dependencies.all(disableTransitiveDeps) project.configurations.compile.dependencies.all(disableTransitiveDeps)
project.configurations.testCompile.dependencies.all(disableTransitiveDeps) project.configurations.testCompile.dependencies.all(disableTransitiveDeps)
project.configurations.compileOnly.dependencies.all(disableTransitiveDeps) project.configurations.compileOnly.dependencies.all(disableTransitiveDeps)
project.plugins.withType(ShadowPlugin).whenPluginAdded { project.plugins.withType(ShadowPlugin).whenPluginAdded {
project.configurations.shadow.dependencies.all(disableTransitiveDeps) Configuration bundle = project.configurations.create('bundle')
bundle.dependencies.all(disableTransitiveDeps)
} }
} }
@ -528,12 +531,16 @@ class BuildPlugin implements Plugin<Project> {
project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask -> project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask ->
// The GenerateMavenPom task is aggressive about setting the destination, instead of fighting it, // The GenerateMavenPom task is aggressive about setting the destination, instead of fighting it,
// just make a copy. // just make a copy.
generatePOMTask.ext.pomFileName = "${project.archivesBaseName}-${project.version}.pom" generatePOMTask.ext.pomFileName = null
doLast { doLast {
project.copy { project.copy {
from generatePOMTask.destination from generatePOMTask.destination
into "${project.buildDir}/distributions" into "${project.buildDir}/distributions"
rename { generatePOMTask.ext.pomFileName } rename {
generatePOMTask.ext.pomFileName == null ?
"${project.archivesBaseName}-${project.version}.pom" :
generatePOMTask.ext.pomFileName
}
} }
} }
// build poms with assemble (if the assemble task exists) // build poms with assemble (if the assemble task exists)
@ -556,30 +563,6 @@ class BuildPlugin implements Plugin<Project> {
publications { publications {
nebula(MavenPublication) { nebula(MavenPublication) {
artifacts = [ project.tasks.shadowJar ] artifacts = [ project.tasks.shadowJar ]
artifactId = project.archivesBaseName
/*
* Configure the pom to include the "shadow" as compile dependencies
* because that is how we're using them but remove all other dependencies
* because they've been shaded into the jar.
*/
pom.withXml { XmlProvider xml ->
Node root = xml.asNode()
root.remove(root.dependencies)
Node dependenciesNode = root.appendNode('dependencies')
project.configurations.shadow.allDependencies.each {
if (false == it instanceof SelfResolvingDependency) {
Node dependencyNode = dependenciesNode.appendNode('dependency')
dependencyNode.appendNode('groupId', it.group)
dependencyNode.appendNode('artifactId', it.name)
dependencyNode.appendNode('version', it.version)
dependencyNode.appendNode('scope', 'compile')
}
}
// Be tidy and remove the element if it is empty
if (dependenciesNode.children.empty) {
root.remove(dependenciesNode)
}
}
} }
} }
} }
@ -587,6 +570,20 @@ class BuildPlugin implements Plugin<Project> {
} }
} }
/**
* Add dependencies that we are going to bundle to the compile classpath.
*/
static void configureSourceSets(Project project) {
project.plugins.withType(ShadowPlugin).whenPluginAdded {
['main', 'test'].each {name ->
SourceSet sourceSet = project.sourceSets.findByName(name)
if (sourceSet != null) {
sourceSet.compileClasspath += project.configurations.bundle
}
}
}
}
/** Adds compiler settings to the project */ /** Adds compiler settings to the project */
static void configureCompile(Project project) { static void configureCompile(Project project) {
if (project.compilerJavaVersion < JavaVersion.VERSION_1_10) { if (project.compilerJavaVersion < JavaVersion.VERSION_1_10) {
@ -764,9 +761,16 @@ class BuildPlugin implements Plugin<Project> {
* better to be safe * better to be safe
*/ */
mergeServiceFiles() mergeServiceFiles()
/*
* Bundle dependencies of the "bundled" configuration.
*/
configurations = [project.configurations.bundle]
} }
// Make sure we assemble the shadow jar // Make sure we assemble the shadow jar
project.tasks.assemble.dependsOn project.tasks.shadowJar project.tasks.assemble.dependsOn project.tasks.shadowJar
project.artifacts {
apiElements project.tasks.shadowJar
}
} }
} }
@ -873,13 +877,8 @@ class BuildPlugin implements Plugin<Project> {
exclude '**/*$*.class' exclude '**/*$*.class'
project.plugins.withType(ShadowPlugin).whenPluginAdded { project.plugins.withType(ShadowPlugin).whenPluginAdded {
/* // Test against a shadow jar if we made one
* If we make a shaded jar we test against it.
*/
classpath -= project.tasks.compileJava.outputs.files classpath -= project.tasks.compileJava.outputs.files
classpath -= project.configurations.compile
classpath -= project.configurations.runtime
classpath += project.configurations.shadow
classpath += project.tasks.shadowJar.outputs.files classpath += project.tasks.shadowJar.outputs.files
dependsOn project.tasks.shadowJar dependsOn project.tasks.shadowJar
} }
@ -905,26 +904,6 @@ class BuildPlugin implements Plugin<Project> {
additionalTest.dependsOn(project.tasks.testClasses) additionalTest.dependsOn(project.tasks.testClasses)
project.check.dependsOn(additionalTest) project.check.dependsOn(additionalTest)
}); });
project.plugins.withType(ShadowPlugin).whenPluginAdded {
/*
* We need somewhere to configure dependencies that we don't wish
* to shade into the jar. The shadow plugin creates a "shadow"
* configuration which is *almost* exactly that. It is never
* bundled into the shaded jar but is used for main source
* compilation. Unfortunately, by default it is not used for
* *test* source compilation and isn't used in tests at all. This
* change makes it available for test compilation.
*
* Note that this isn't going to work properly with qa projects
* but they have no business applying the shadow plugin in the
* firstplace.
*/
SourceSet testSourceSet = project.sourceSets.findByName('test')
if (testSourceSet != null) {
testSourceSet.compileClasspath += project.configurations.shadow
}
}
} }
private static configurePrecommit(Project project) { private static configurePrecommit(Project project) {
@ -936,7 +915,7 @@ class BuildPlugin implements Plugin<Project> {
it.group.startsWith('org.elasticsearch') == false it.group.startsWith('org.elasticsearch') == false
} - project.configurations.compileOnly } - project.configurations.compileOnly
project.plugins.withType(ShadowPlugin).whenPluginAdded { project.plugins.withType(ShadowPlugin).whenPluginAdded {
project.dependencyLicenses.dependencies += project.configurations.shadow.fileCollection { project.dependencyLicenses.dependencies += project.configurations.bundle.fileCollection {
it.group.startsWith('org.elasticsearch') == false it.group.startsWith('org.elasticsearch') == false
} }
} }
@ -947,7 +926,7 @@ class BuildPlugin implements Plugin<Project> {
deps.runtimeConfiguration = project.configurations.runtime deps.runtimeConfiguration = project.configurations.runtime
project.plugins.withType(ShadowPlugin).whenPluginAdded { project.plugins.withType(ShadowPlugin).whenPluginAdded {
deps.runtimeConfiguration = project.configurations.create('infoDeps') deps.runtimeConfiguration = project.configurations.create('infoDeps')
deps.runtimeConfiguration.extendsFrom(project.configurations.runtime, project.configurations.shadow) deps.runtimeConfiguration.extendsFrom(project.configurations.runtime, project.configurations.bundle)
} }
deps.compileOnlyConfiguration = project.configurations.compileOnly deps.compileOnlyConfiguration = project.configurations.compileOnly
project.afterEvaluate { project.afterEvaluate {

View File

@ -157,11 +157,10 @@ public class PluginBuildPlugin extends BuildPlugin {
from pluginMetadata // metadata (eg custom security policy) from pluginMetadata // metadata (eg custom security policy)
/* /*
* If the plugin is using the shadow plugin then we need to bundle * If the plugin is using the shadow plugin then we need to bundle
* "shadow" things rather than the default jar and dependencies so * that shadow jar.
* we don't hit jar hell.
*/ */
from { project.plugins.hasPlugin(ShadowPlugin) ? project.shadowJar : project.jar } from { project.plugins.hasPlugin(ShadowPlugin) ? project.shadowJar : project.jar }
from { project.plugins.hasPlugin(ShadowPlugin) ? project.configurations.shadow : project.configurations.runtime - project.configurations.compileOnly } from project.configurations.runtime - project.configurations.compileOnly
// extra files for the plugin to go into the zip // extra files for the plugin to go into the zip
from('src/main/packaging') // TODO: move all config/bin/_size/etc into packaging from('src/main/packaging') // TODO: move all config/bin/_size/etc into packaging
from('src/main') { from('src/main') {

View File

@ -19,6 +19,7 @@
package org.elasticsearch.gradle.precommit package org.elasticsearch.gradle.precommit
import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin
import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.LoggedExec
import org.gradle.api.file.FileCollection import org.gradle.api.file.FileCollection
import org.gradle.api.tasks.OutputFile import org.gradle.api.tasks.OutputFile
@ -39,6 +40,9 @@ public class JarHellTask extends LoggedExec {
public JarHellTask() { public JarHellTask() {
project.afterEvaluate { project.afterEvaluate {
FileCollection classpath = project.sourceSets.test.runtimeClasspath FileCollection classpath = project.sourceSets.test.runtimeClasspath
if (project.plugins.hasPlugin(ShadowPlugin)) {
classpath += project.configurations.bundle
}
inputs.files(classpath) inputs.files(classpath)
dependsOn(classpath) dependsOn(classpath)
description = "Runs CheckJarHell on ${classpath}" description = "Runs CheckJarHell on ${classpath}"

View File

@ -18,18 +18,12 @@
*/ */
package org.elasticsearch.gradle.precommit package org.elasticsearch.gradle.precommit
import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis
import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin
import org.elasticsearch.gradle.ExportElasticsearchBuildResourcesTask import org.elasticsearch.gradle.ExportElasticsearchBuildResourcesTask
import org.gradle.api.JavaVersion
import org.gradle.api.Project import org.gradle.api.Project
import org.gradle.api.Task import org.gradle.api.Task
import org.gradle.api.file.FileCollection import org.gradle.api.artifacts.Configuration
import org.gradle.api.plugins.JavaBasePlugin import org.gradle.api.plugins.JavaBasePlugin
import org.gradle.api.plugins.quality.Checkstyle import org.gradle.api.plugins.quality.Checkstyle
import org.gradle.api.tasks.JavaExec
import org.gradle.api.tasks.StopExecutionException
/** /**
* Validation tasks which should be run before committing. These run before tests. * Validation tasks which should be run before committing. These run before tests.
*/ */
@ -38,8 +32,8 @@ class PrecommitTasks {
/** Adds a precommit task, which depends on non-test verification tasks. */ /** Adds a precommit task, which depends on non-test verification tasks. */
public static Task create(Project project, boolean includeDependencyLicenses) { public static Task create(Project project, boolean includeDependencyLicenses) {
List<Task> precommitTasks = [ List<Task> precommitTasks = [
configureForbiddenApis(project),
configureCheckstyle(project), configureCheckstyle(project),
configureForbiddenApisCli(project),
configureNamingConventions(project), configureNamingConventions(project),
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class), project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
project.tasks.create('licenseHeaders', LicenseHeadersTask.class), project.tasks.create('licenseHeaders', LicenseHeadersTask.class),
@ -48,9 +42,6 @@ class PrecommitTasks {
project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class) project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class)
] ]
// Configure it but don't add it as a dependency yet
configureForbiddenApisCli(project)
// tasks with just tests don't need dependency licenses, so this flag makes adding // tasks with just tests don't need dependency licenses, so this flag makes adding
// the task optional // the task optional
if (includeDependencyLicenses) { if (includeDependencyLicenses) {
@ -84,77 +75,60 @@ class PrecommitTasks {
return project.tasks.create(precommitOptions) return project.tasks.create(precommitOptions)
} }
private static Task configureForbiddenApis(Project project) {
project.pluginManager.apply(ForbiddenApisPlugin.class)
project.forbiddenApis {
failOnUnsupportedJava = false
bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-non-portable', 'jdk-system-out']
signaturesURLs = [getClass().getResource('/forbidden/jdk-signatures.txt'),
getClass().getResource('/forbidden/es-all-signatures.txt')]
suppressAnnotations = ['**.SuppressForbidden']
}
project.tasks.withType(CheckForbiddenApis) {
// we do not use the += operator to add signatures, as conventionMappings of Gradle do not work when it's configured using withType:
if (name.endsWith('Test')) {
signaturesURLs = project.forbiddenApis.signaturesURLs +
[ getClass().getResource('/forbidden/es-test-signatures.txt'), getClass().getResource('/forbidden/http-signatures.txt') ]
} else {
signaturesURLs = project.forbiddenApis.signaturesURLs +
[ getClass().getResource('/forbidden/es-server-signatures.txt') ]
}
// forbidden apis doesn't support Java 11, so stop at 10
String targetMajorVersion = (project.compilerJavaVersion.compareTo(JavaVersion.VERSION_1_10) > 0 ?
JavaVersion.VERSION_1_10 :
project.compilerJavaVersion).getMajorVersion()
targetCompatibility = Integer.parseInt(targetMajorVersion) >= 9 ?targetMajorVersion : "1.${targetMajorVersion}"
}
Task forbiddenApis = project.tasks.findByName('forbiddenApis')
forbiddenApis.group = "" // clear group, so this does not show up under verification tasks
return forbiddenApis
}
private static Task configureForbiddenApisCli(Project project) { private static Task configureForbiddenApisCli(Project project) {
project.configurations.create("forbiddenApisCliJar") Configuration forbiddenApisConfiguration = project.configurations.create("forbiddenApisCliJar")
project.dependencies { project.dependencies {
forbiddenApisCliJar 'de.thetaphi:forbiddenapis:2.5' forbiddenApisCliJar ('de.thetaphi:forbiddenapis:2.5')
} }
Task forbiddenApisCli = project.tasks.create('forbiddenApisCli') Task forbiddenApisCli = project.tasks.create('forbiddenApis')
project.sourceSets.forEach { sourceSet -> project.sourceSets.forEach { sourceSet ->
forbiddenApisCli.dependsOn( forbiddenApisCli.dependsOn(
project.tasks.create(sourceSet.getTaskName('forbiddenApisCli', null), JavaExec) { project.tasks.create(sourceSet.getTaskName('forbiddenApis', null), ForbiddenApisCliTask) {
ExportElasticsearchBuildResourcesTask buildResources = project.tasks.getByName('buildResources') ExportElasticsearchBuildResourcesTask buildResources = project.tasks.getByName('buildResources')
dependsOn(buildResources) dependsOn(buildResources)
classpath = project.files( execAction = { spec ->
project.configurations.forbiddenApisCliJar, spec.classpath = project.files(
project.configurations.forbiddenApisCliJar,
sourceSet.compileClasspath,
sourceSet.runtimeClasspath
)
spec.executable = "${project.runtimeJavaHome}/bin/java"
}
inputs.files(
forbiddenApisConfiguration,
sourceSet.compileClasspath, sourceSet.compileClasspath,
sourceSet.runtimeClasspath sourceSet.runtimeClasspath
) )
main = 'de.thetaphi.forbiddenapis.cli.CliMain'
executable = "${project.runtimeJavaHome}/bin/java" targetCompatibility = project.compilerJavaVersion
args "-b", 'jdk-unsafe-1.8' bundledSignatures = [
args "-b", 'jdk-deprecated-1.8' "jdk-unsafe", "jdk-deprecated", "jdk-non-portable", "jdk-system-out"
args "-b", 'jdk-non-portable' ]
args "-b", 'jdk-system-out' signaturesFiles = project.files(
args "-f", buildResources.copy("forbidden/jdk-signatures.txt") buildResources.copy("forbidden/jdk-signatures.txt"),
args "-f", buildResources.copy("forbidden/es-all-signatures.txt") buildResources.copy("forbidden/es-all-signatures.txt")
args "--suppressannotation", '**.SuppressForbidden' )
suppressAnnotations = ['**.SuppressForbidden']
if (sourceSet.name == 'test') { if (sourceSet.name == 'test') {
args "-f", buildResources.copy("forbidden/es-test-signatures.txt") signaturesFiles += project.files(
args "-f", buildResources.copy("forbidden/http-signatures.txt") buildResources.copy("forbidden/es-test-signatures.txt"),
buildResources.copy("forbidden/http-signatures.txt")
)
} else { } else {
args "-f", buildResources.copy("forbidden/es-server-signatures.txt") signaturesFiles += project.files(buildResources.copy("forbidden/es-server-signatures.txt"))
} }
dependsOn sourceSet.classesTaskName dependsOn sourceSet.classesTaskName
doFirst { classesDirs = sourceSet.output.classesDirs
// Forbidden APIs expects only existing dirs, and requires at least one ext.replaceSignatureFiles = { String... names ->
FileCollection existingOutputs = sourceSet.output.classesDirs signaturesFiles = project.files(
.filter { it.exists() } names.collect { buildResources.copy("forbidden/${it}.txt") }
if (existingOutputs.isEmpty()) { )
throw new StopExecutionException("${sourceSet.name} has no outputs") }
} ext.addSignatureFiles = { String... names ->
existingOutputs.forEach { args "-d", it } signaturesFiles += project.files(
names.collect { buildResources.copy("forbidden/${it}.txt") }
)
} }
} }
) )

View File

@ -18,6 +18,7 @@
*/ */
package org.elasticsearch.gradle.precommit; package org.elasticsearch.gradle.precommit;
import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin
import org.apache.tools.ant.BuildEvent; import org.apache.tools.ant.BuildEvent;
import org.apache.tools.ant.BuildException; import org.apache.tools.ant.BuildException;
import org.apache.tools.ant.BuildListener; import org.apache.tools.ant.BuildListener;
@ -82,6 +83,11 @@ public class ThirdPartyAuditTask extends AntTask {
configuration = project.configurations.findByName('testCompile') configuration = project.configurations.findByName('testCompile')
} }
assert configuration != null assert configuration != null
if (project.plugins.hasPlugin(ShadowPlugin)) {
Configuration original = configuration
configuration = project.configurations.create('thirdPartyAudit')
configuration.extendsFrom(original, project.configurations.bundle)
}
if (compileOnly == null) { if (compileOnly == null) {
classpath = configuration classpath = configuration
} else { } else {

View File

@ -35,6 +35,7 @@ import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import java.util.Set; import java.util.Set;
@ -105,7 +106,7 @@ public class ExportElasticsearchBuildResourcesTask extends DefaultTask {
if (is == null) { if (is == null) {
throw new GradleException("Can't export `" + resourcePath + "` from build-tools: not found"); throw new GradleException("Can't export `" + resourcePath + "` from build-tools: not found");
} }
Files.copy(is, destination); Files.copy(is, destination, StandardCopyOption.REPLACE_EXISTING);
} catch (IOException e) { } catch (IOException e) {
throw new GradleException("Can't write resource `" + resourcePath + "` to " + destination, e); throw new GradleException("Can't write resource `" + resourcePath + "` to " + destination, e);
} }

View File

@ -0,0 +1,154 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.precommit;
import de.thetaphi.forbiddenapis.cli.CliMain;
import org.gradle.api.Action;
import org.gradle.api.DefaultTask;
import org.gradle.api.JavaVersion;
import org.gradle.api.file.FileCollection;
import org.gradle.api.tasks.Input;
import org.gradle.api.tasks.InputFiles;
import org.gradle.api.tasks.OutputFile;
import org.gradle.api.tasks.SkipWhenEmpty;
import org.gradle.api.tasks.TaskAction;
import org.gradle.process.JavaExecSpec;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
public class ForbiddenApisCliTask extends DefaultTask {
private FileCollection signaturesFiles;
private List<String> signatures = new ArrayList<>();
private Set<String> bundledSignatures = new LinkedHashSet<>();
private Set<String> suppressAnnotations = new LinkedHashSet<>();
private JavaVersion targetCompatibility;
private FileCollection classesDirs;
private Action<JavaExecSpec> execAction;
public JavaVersion getTargetCompatibility() {
return targetCompatibility;
}
public void setTargetCompatibility(JavaVersion targetCompatibility) {
this.targetCompatibility = targetCompatibility;
}
public Action<JavaExecSpec> getExecAction() {
return execAction;
}
public void setExecAction(Action<JavaExecSpec> execAction) {
this.execAction = execAction;
}
@OutputFile
public File getMarkerFile() {
return new File(
new File(getProject().getBuildDir(), "precommit"),
getName()
);
}
@InputFiles
@SkipWhenEmpty
public FileCollection getClassesDirs() {
return classesDirs.filter(File::exists);
}
public void setClassesDirs(FileCollection classesDirs) {
this.classesDirs = classesDirs;
}
@InputFiles
public FileCollection getSignaturesFiles() {
return signaturesFiles;
}
public void setSignaturesFiles(FileCollection signaturesFiles) {
this.signaturesFiles = signaturesFiles;
}
@Input
public List<String> getSignatures() {
return signatures;
}
public void setSignatures(List<String> signatures) {
this.signatures = signatures;
}
@Input
public Set<String> getBundledSignatures() {
return bundledSignatures;
}
public void setBundledSignatures(Set<String> bundledSignatures) {
this.bundledSignatures = bundledSignatures;
}
@Input
public Set<String> getSuppressAnnotations() {
return suppressAnnotations;
}
public void setSuppressAnnotations(Set<String> suppressAnnotations) {
this.suppressAnnotations = suppressAnnotations;
}
@TaskAction
public void runForbiddenApisAndWriteMarker() throws IOException {
getProject().javaexec((JavaExecSpec spec) -> {
execAction.execute(spec);
spec.setMain(CliMain.class.getName());
// build the command line
getSignaturesFiles().forEach(file -> spec.args("-f", file.getAbsolutePath()));
getSuppressAnnotations().forEach(annotation -> spec.args("--suppressannotation", annotation));
getBundledSignatures().forEach(bundled -> {
// there's no option for target compatibility so we have to interpret it
final String prefix;
if (bundled.equals("jdk-system-out") ||
bundled.equals("jdk-reflection") ||
bundled.equals("jdk-non-portable")) {
prefix = "";
} else {
prefix = "-" + (
getTargetCompatibility().compareTo(JavaVersion.VERSION_1_9) >= 0 ?
getTargetCompatibility().getMajorVersion() :
"1." + getTargetCompatibility().getMajorVersion())
;
}
spec.args("-b", bundled + prefix);
}
);
getClassesDirs().forEach(dir ->
spec.args("-d", dir)
);
});
Files.write(getMarkerFile().toPath(), Collections.emptyList());
}
}

View File

@ -16,8 +16,6 @@
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
import org.elasticsearch.gradle.precommit.PrecommitTasks
import org.elasticsearch.gradle.test.RestIntegTestTask import org.elasticsearch.gradle.test.RestIntegTestTask
import org.gradle.api.internal.provider.Providers import org.gradle.api.internal.provider.Providers
@ -47,13 +45,13 @@ dependencies {
* Everything in the "shadow" configuration is *not* copied into the * Everything in the "shadow" configuration is *not* copied into the
* shadowJar. * shadowJar.
*/ */
shadow "org.elasticsearch:elasticsearch:${version}" compile "org.elasticsearch:elasticsearch:${version}"
shadow "org.elasticsearch.client:elasticsearch-rest-client:${version}" compile "org.elasticsearch.client:elasticsearch-rest-client:${version}"
shadow "org.elasticsearch.plugin:parent-join-client:${version}" compile "org.elasticsearch.plugin:parent-join-client:${version}"
shadow "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}" compile "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}"
shadow "org.elasticsearch.plugin:rank-eval-client:${version}" compile "org.elasticsearch.plugin:rank-eval-client:${version}"
shadow "org.elasticsearch.plugin:lang-mustache-client:${version}" compile "org.elasticsearch.plugin:lang-mustache-client:${version}"
compile project(':x-pack:protocol') bundle project(':x-pack:protocol')
testCompile "org.elasticsearch.client:test:${version}" testCompile "org.elasticsearch.client:test:${version}"
testCompile "org.elasticsearch.test:framework:${version}" testCompile "org.elasticsearch.test:framework:${version}"
@ -75,8 +73,8 @@ dependencyLicenses {
forbiddenApisMain { forbiddenApisMain {
// core does not depend on the httpclient for compile so we add the signatures here. We don't add them for test as they are already // core does not depend on the httpclient for compile so we add the signatures here. We don't add them for test as they are already
// specified // specified
signaturesURLs += [PrecommitTasks.getResource('/forbidden/http-signatures.txt')] addSignatureFiles 'http-signatures'
signaturesURLs += [file('src/main/resources/forbidden/rest-high-level-signatures.txt').toURI().toURL()] signaturesFiles += files('src/main/resources/forbidden/rest-high-level-signatures.txt')
} }
integTestCluster { integTestCluster {

View File

@ -0,0 +1,63 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest;
import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse;
import java.io.IOException;
import static java.util.Collections.emptySet;
public class GraphClient {
private final RestHighLevelClient restHighLevelClient;
GraphClient(RestHighLevelClient restHighLevelClient) {
this.restHighLevelClient = restHighLevelClient;
}
/**
* Executes an exploration request using the Graph API.
*
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html">Graph API
* on elastic.co</a>.
*/
public final GraphExploreResponse explore(GraphExploreRequest graphExploreRequest,
RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(graphExploreRequest, RequestConverters::xPackGraphExplore,
options, GraphExploreResponse::fromXContext, emptySet());
}
/**
* Asynchronously executes an exploration request using the Graph API.
*
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html">Graph API
* on elastic.co</a>.
*/
public final void exploreAsync(GraphExploreRequest graphExploreRequest,
RequestOptions options,
ActionListener<GraphExploreResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(graphExploreRequest, RequestConverters::xPackGraphExplore,
options, GraphExploreResponse::fromXContext, listener, emptySet());
}
}

View File

@ -20,12 +20,15 @@
package org.elasticsearch.client; package org.elasticsearch.client;
import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpDelete;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpPut; import org.apache.http.client.methods.HttpPut;
import org.elasticsearch.client.RequestConverters.EndpointBuilder; import org.elasticsearch.client.RequestConverters.EndpointBuilder;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.protocol.xpack.ml.CloseJobRequest; import org.elasticsearch.protocol.xpack.ml.CloseJobRequest;
import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest;
import org.elasticsearch.protocol.xpack.ml.GetJobRequest;
import org.elasticsearch.protocol.xpack.ml.GetBucketsRequest;
import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; import org.elasticsearch.protocol.xpack.ml.OpenJobRequest;
import org.elasticsearch.protocol.xpack.ml.PutJobRequest; import org.elasticsearch.protocol.xpack.ml.PutJobRequest;
@ -50,6 +53,23 @@ final class MLRequestConverters {
return request; return request;
} }
static Request getJob(GetJobRequest getJobRequest) {
String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack")
.addPathPartAsIs("ml")
.addPathPartAsIs("anomaly_detectors")
.addPathPart(Strings.collectionToCommaDelimitedString(getJobRequest.getJobIds()))
.build();
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
RequestConverters.Params params = new RequestConverters.Params(request);
if (getJobRequest.isAllowNoJobs() != null) {
params.putParam("allow_no_jobs", Boolean.toString(getJobRequest.isAllowNoJobs()));
}
return request;
}
static Request openJob(OpenJobRequest openJobRequest) throws IOException { static Request openJob(OpenJobRequest openJobRequest) throws IOException {
String endpoint = new EndpointBuilder() String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack") .addPathPartAsIs("_xpack")
@ -59,11 +79,11 @@ final class MLRequestConverters {
.addPathPartAsIs("_open") .addPathPartAsIs("_open")
.build(); .build();
Request request = new Request(HttpPost.METHOD_NAME, endpoint); Request request = new Request(HttpPost.METHOD_NAME, endpoint);
request.setJsonEntity(openJobRequest.toString()); request.setEntity(createEntity(openJobRequest, REQUEST_BODY_CONTENT_TYPE));
return request; return request;
} }
static Request closeJob(CloseJobRequest closeJobRequest) { static Request closeJob(CloseJobRequest closeJobRequest) throws IOException {
String endpoint = new EndpointBuilder() String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack") .addPathPartAsIs("_xpack")
.addPathPartAsIs("ml") .addPathPartAsIs("ml")
@ -72,18 +92,7 @@ final class MLRequestConverters {
.addPathPartAsIs("_close") .addPathPartAsIs("_close")
.build(); .build();
Request request = new Request(HttpPost.METHOD_NAME, endpoint); Request request = new Request(HttpPost.METHOD_NAME, endpoint);
request.setEntity(createEntity(closeJobRequest, REQUEST_BODY_CONTENT_TYPE));
RequestConverters.Params params = new RequestConverters.Params(request);
if (closeJobRequest.isForce() != null) {
params.putParam("force", Boolean.toString(closeJobRequest.isForce()));
}
if (closeJobRequest.isAllowNoJobs() != null) {
params.putParam("allow_no_jobs", Boolean.toString(closeJobRequest.isAllowNoJobs()));
}
if (closeJobRequest.getTimeout() != null) {
params.putParam("timeout", closeJobRequest.getTimeout().getStringRep());
}
return request; return request;
} }
@ -101,4 +110,18 @@ final class MLRequestConverters {
return request; return request;
} }
static Request getBuckets(GetBucketsRequest getBucketsRequest) throws IOException {
String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack")
.addPathPartAsIs("ml")
.addPathPartAsIs("anomaly_detectors")
.addPathPart(getBucketsRequest.getJobId())
.addPathPartAsIs("results")
.addPathPartAsIs("buckets")
.build();
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
request.setEntity(createEntity(getBucketsRequest, REQUEST_BODY_CONTENT_TYPE));
return request;
}
} }

View File

@ -23,6 +23,10 @@ import org.elasticsearch.protocol.xpack.ml.CloseJobRequest;
import org.elasticsearch.protocol.xpack.ml.CloseJobResponse; import org.elasticsearch.protocol.xpack.ml.CloseJobResponse;
import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest;
import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse; import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse;
import org.elasticsearch.protocol.xpack.ml.GetBucketsRequest;
import org.elasticsearch.protocol.xpack.ml.GetBucketsResponse;
import org.elasticsearch.protocol.xpack.ml.GetJobRequest;
import org.elasticsearch.protocol.xpack.ml.GetJobResponse;
import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; import org.elasticsearch.protocol.xpack.ml.OpenJobRequest;
import org.elasticsearch.protocol.xpack.ml.OpenJobResponse; import org.elasticsearch.protocol.xpack.ml.OpenJobResponse;
import org.elasticsearch.protocol.xpack.ml.PutJobRequest; import org.elasticsearch.protocol.xpack.ml.PutJobRequest;
@ -52,7 +56,7 @@ public final class MachineLearningClient {
* For additional info * For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html">ML PUT job documentation</a> * see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html">ML PUT job documentation</a>
* *
* @param request the PutJobRequest containing the {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} settings * @param request The PutJobRequest containing the {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} settings
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return PutJobResponse with enclosed {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} object * @return PutJobResponse with enclosed {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} object
* @throws IOException when there is a serialization issue sending the request or receiving the response * @throws IOException when there is a serialization issue sending the request or receiving the response
@ -71,7 +75,7 @@ public final class MachineLearningClient {
* For additional info * For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html">ML PUT job documentation</a> * see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html">ML PUT job documentation</a>
* *
* @param request the request containing the {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} settings * @param request The request containing the {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} settings
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener Listener to be notified upon request completion * @param listener Listener to be notified upon request completion
*/ */
@ -84,13 +88,54 @@ public final class MachineLearningClient {
Collections.emptySet()); Collections.emptySet());
} }
/**
* Gets one or more Machine Learning job configuration info.
*
* <p>
* For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job.html"></a>
* </p>
* @param request {@link GetJobRequest} Request containing a list of jobId(s) and additional options
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return {@link GetJobResponse} response object containing
* the {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} objects and the number of jobs found
* @throws IOException when there is a serialization issue sending the request or receiving the response
*/
public GetJobResponse getJob(GetJobRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request,
MLRequestConverters::getJob,
options,
GetJobResponse::fromXContent,
Collections.emptySet());
}
/**
* Gets one or more Machine Learning job configuration info, asynchronously.
*
* <p>
* For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job.html"></a>
* </p>
* @param request {@link GetJobRequest} Request containing a list of jobId(s) and additional options
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener Listener to be notified with {@link GetJobResponse} upon request completion
*/
public void getJobAsync(GetJobRequest request, RequestOptions options, ActionListener<GetJobResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(request,
MLRequestConverters::getJob,
options,
GetJobResponse::fromXContent,
listener,
Collections.emptySet());
}
/** /**
* Deletes the given Machine Learning Job * Deletes the given Machine Learning Job
* <p> * <p>
* For additional info * For additional info
* see <a href="http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html">ML Delete Job documentation</a> * see <a href="http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html">ML Delete Job documentation</a>
* </p> * </p>
* @param request the request to delete the job * @param request The request to delete the job
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return action acknowledgement * @return action acknowledgement
* @throws IOException when there is a serialization issue sending the request or receiving the response * @throws IOException when there is a serialization issue sending the request or receiving the response
@ -109,7 +154,7 @@ public final class MachineLearningClient {
* For additional info * For additional info
* see <a href="http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html">ML Delete Job documentation</a> * see <a href="http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html">ML Delete Job documentation</a>
* </p> * </p>
* @param request the request to delete the job * @param request The request to delete the job
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener Listener to be notified upon request completion * @param listener Listener to be notified upon request completion
*/ */
@ -133,7 +178,7 @@ public final class MachineLearningClient {
* For additional info * For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html"></a> * see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html"></a>
* </p> * </p>
* @param request request containing job_id and additional optional options * @param request Request containing job_id and additional optional options
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return response containing if the job was successfully opened or not. * @return response containing if the job was successfully opened or not.
* @throws IOException when there is a serialization issue sending the request or receiving the response * @throws IOException when there is a serialization issue sending the request or receiving the response
@ -156,7 +201,7 @@ public final class MachineLearningClient {
* For additional info * For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html"></a> * see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html"></a>
* </p> * </p>
* @param request request containing job_id and additional optional options * @param request Request containing job_id and additional optional options
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener Listener to be notified upon request completion * @param listener Listener to be notified upon request completion
*/ */
@ -174,7 +219,7 @@ public final class MachineLearningClient {
* *
* A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. * A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results.
* *
* @param request request containing job_ids and additional options. See {@link CloseJobRequest} * @param request Request containing job_ids and additional options. See {@link CloseJobRequest}
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return response containing if the job was successfully closed or not. * @return response containing if the job was successfully closed or not.
* @throws IOException when there is a serialization issue sending the request or receiving the response * @throws IOException when there is a serialization issue sending the request or receiving the response
@ -192,7 +237,7 @@ public final class MachineLearningClient {
* *
* A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. * A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results.
* *
* @param request request containing job_ids and additional options. See {@link CloseJobRequest} * @param request Request containing job_ids and additional options. See {@link CloseJobRequest}
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener Listener to be notified upon request completion * @param listener Listener to be notified upon request completion
*/ */
@ -204,4 +249,40 @@ public final class MachineLearningClient {
listener, listener,
Collections.emptySet()); Collections.emptySet());
} }
/**
* Gets the buckets for a Machine Learning Job.
* <p>
* For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-bucket.html">ML GET buckets documentation</a>
*
* @param request The request
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
*/
public GetBucketsResponse getBuckets(GetBucketsRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request,
MLRequestConverters::getBuckets,
options,
GetBucketsResponse::fromXContent,
Collections.emptySet());
}
/**
* Gets the buckets for a Machine Learning Job, notifies listener once the requested buckets are retrieved.
* <p>
* For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-bucket.html">ML GET buckets documentation</a>
*
* @param request The request
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener Listener to be notified upon request completion
*/
public void getBucketsAsync(GetBucketsRequest request, RequestOptions options, ActionListener<GetBucketsResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(request,
MLRequestConverters::getBuckets,
options,
GetBucketsResponse::fromXContent,
listener,
Collections.emptySet());
}
} }

View File

@ -118,6 +118,7 @@ import org.elasticsearch.protocol.xpack.license.PutLicenseRequest;
import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest;
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest;
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest;
import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.rest.action.search.RestSearchAction;
import org.elasticsearch.script.mustache.MultiSearchTemplateRequest; import org.elasticsearch.script.mustache.MultiSearchTemplateRequest;
import org.elasticsearch.script.mustache.SearchTemplateRequest; import org.elasticsearch.script.mustache.SearchTemplateRequest;
@ -1128,6 +1129,13 @@ final class RequestConverters {
return request; return request;
} }
static Request xPackGraphExplore(GraphExploreRequest exploreRequest) throws IOException {
String endpoint = endpoint(exploreRequest.indices(), exploreRequest.types(), "_xpack/graph/_explore");
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
request.setEntity(createEntity(exploreRequest, REQUEST_BODY_CONTENT_TYPE));
return request;
}
static Request xPackWatcherPutWatch(PutWatchRequest putWatchRequest) { static Request xPackWatcherPutWatch(PutWatchRequest putWatchRequest) {
String endpoint = new EndpointBuilder() String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack") .addPathPartAsIs("_xpack")

View File

@ -209,6 +209,7 @@ public class RestHighLevelClient implements Closeable {
private final TasksClient tasksClient = new TasksClient(this); private final TasksClient tasksClient = new TasksClient(this);
private final XPackClient xPackClient = new XPackClient(this); private final XPackClient xPackClient = new XPackClient(this);
private final WatcherClient watcherClient = new WatcherClient(this); private final WatcherClient watcherClient = new WatcherClient(this);
private final GraphClient graphClient = new GraphClient(this);
private final LicenseClient licenseClient = new LicenseClient(this); private final LicenseClient licenseClient = new LicenseClient(this);
private final MigrationClient migrationClient = new MigrationClient(this); private final MigrationClient migrationClient = new MigrationClient(this);
private final MachineLearningClient machineLearningClient = new MachineLearningClient(this); private final MachineLearningClient machineLearningClient = new MachineLearningClient(this);
@ -325,6 +326,16 @@ public class RestHighLevelClient implements Closeable {
* Watcher APIs on elastic.co</a> for more information. * Watcher APIs on elastic.co</a> for more information.
*/ */
public WatcherClient watcher() { return watcherClient; } public WatcherClient watcher() { return watcherClient; }
/**
* Provides methods for accessing the Elastic Licensed Graph explore API that
* is shipped with the default distribution of Elasticsearch. All of
* these APIs will 404 if run against the OSS distribution of Elasticsearch.
* <p>
* See the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html">
* Graph API on elastic.co</a> for more information.
*/
public GraphClient graph() { return graphClient; }
/** /**
* Provides methods for accessing the Elastic Licensed Licensing APIs that * Provides methods for accessing the Elastic Licensed Licensing APIs that
@ -961,6 +972,11 @@ public class RestHighLevelClient implements Closeable {
FieldCapabilitiesResponse::fromXContent, listener, emptySet()); FieldCapabilitiesResponse::fromXContent, listener, emptySet());
} }
/**
* @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation
* layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}.
*/
@Deprecated
protected final <Req extends ActionRequest, Resp> Resp performRequestAndParseEntity(Req request, protected final <Req extends ActionRequest, Resp> Resp performRequestAndParseEntity(Req request,
CheckedFunction<Req, Request, IOException> requestConverter, CheckedFunction<Req, Request, IOException> requestConverter,
RequestOptions options, RequestOptions options,
@ -970,15 +986,58 @@ public class RestHighLevelClient implements Closeable {
response -> parseEntity(response.getEntity(), entityParser), ignores); response -> parseEntity(response.getEntity(), entityParser), ignores);
} }
/**
* Defines a helper method for performing a request and then parsing the returned entity using the provided entityParser.
*/
protected final <Req extends Validatable, Resp> Resp performRequestAndParseEntity(Req request,
CheckedFunction<Req, Request, IOException> requestConverter,
RequestOptions options,
CheckedFunction<XContentParser, Resp, IOException> entityParser,
Set<Integer> ignores) throws IOException {
return performRequest(request, requestConverter, options,
response -> parseEntity(response.getEntity(), entityParser), ignores);
}
/**
* @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation
* layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}.
*/
@Deprecated
protected final <Req extends ActionRequest, Resp> Resp performRequest(Req request, protected final <Req extends ActionRequest, Resp> Resp performRequest(Req request,
CheckedFunction<Req, Request, IOException> requestConverter, CheckedFunction<Req, Request, IOException> requestConverter,
RequestOptions options, RequestOptions options,
CheckedFunction<Response, Resp, IOException> responseConverter, CheckedFunction<Response, Resp, IOException> responseConverter,
Set<Integer> ignores) throws IOException { Set<Integer> ignores) throws IOException {
ActionRequestValidationException validationException = request.validate(); ActionRequestValidationException validationException = request.validate();
if (validationException != null) { if (validationException != null && validationException.validationErrors().isEmpty() == false) {
throw validationException; throw validationException;
} }
return internalPerformRequest(request, requestConverter, options, responseConverter, ignores);
}
/**
* Defines a helper method for performing a request.
*/
protected final <Req extends Validatable, Resp> Resp performRequest(Req request,
CheckedFunction<Req, Request, IOException> requestConverter,
RequestOptions options,
CheckedFunction<Response, Resp, IOException> responseConverter,
Set<Integer> ignores) throws IOException {
ValidationException validationException = request.validate();
if (validationException != null && validationException.validationErrors().isEmpty() == false) {
throw validationException;
}
return internalPerformRequest(request, requestConverter, options, responseConverter, ignores);
}
/**
* Provides common functionality for performing a request.
*/
private <Req, Resp> Resp internalPerformRequest(Req request,
CheckedFunction<Req, Request, IOException> requestConverter,
RequestOptions options,
CheckedFunction<Response, Resp, IOException> responseConverter,
Set<Integer> ignores) throws IOException {
Request req = requestConverter.apply(request); Request req = requestConverter.apply(request);
req.setOptions(options); req.setOptions(options);
Response response; Response response;
@ -1006,25 +1065,75 @@ public class RestHighLevelClient implements Closeable {
} }
} }
/**
* @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation
* layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}.
*/
@Deprecated
protected final <Req extends ActionRequest, Resp> void performRequestAsyncAndParseEntity(Req request, protected final <Req extends ActionRequest, Resp> void performRequestAsyncAndParseEntity(Req request,
CheckedFunction<Req, Request, IOException> requestConverter, CheckedFunction<Req, Request, IOException> requestConverter,
RequestOptions options, RequestOptions options,
CheckedFunction<XContentParser, Resp, IOException> entityParser, CheckedFunction<XContentParser, Resp, IOException> entityParser,
ActionListener<Resp> listener, Set<Integer> ignores) { ActionListener<Resp> listener, Set<Integer> ignores) {
performRequestAsync(request, requestConverter, options, performRequestAsync(request, requestConverter, options,
response -> parseEntity(response.getEntity(), entityParser), listener, ignores); response -> parseEntity(response.getEntity(), entityParser), listener, ignores);
} }
/**
* Defines a helper method for asynchronously performing a request.
*/
protected final <Req extends Validatable, Resp> void performRequestAsyncAndParseEntity(Req request,
CheckedFunction<Req, Request, IOException> requestConverter,
RequestOptions options,
CheckedFunction<XContentParser, Resp, IOException> entityParser,
ActionListener<Resp> listener, Set<Integer> ignores) {
performRequestAsync(request, requestConverter, options,
response -> parseEntity(response.getEntity(), entityParser), listener, ignores);
}
/**
* @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation
* layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}.
*/
@Deprecated
protected final <Req extends ActionRequest, Resp> void performRequestAsync(Req request, protected final <Req extends ActionRequest, Resp> void performRequestAsync(Req request,
CheckedFunction<Req, Request, IOException> requestConverter, CheckedFunction<Req, Request, IOException> requestConverter,
RequestOptions options, RequestOptions options,
CheckedFunction<Response, Resp, IOException> responseConverter, CheckedFunction<Response, Resp, IOException> responseConverter,
ActionListener<Resp> listener, Set<Integer> ignores) { ActionListener<Resp> listener, Set<Integer> ignores) {
ActionRequestValidationException validationException = request.validate(); ActionRequestValidationException validationException = request.validate();
if (validationException != null) { if (validationException != null && validationException.validationErrors().isEmpty() == false) {
listener.onFailure(validationException); listener.onFailure(validationException);
return; return;
} }
internalPerformRequestAsync(request, requestConverter, options, responseConverter, listener, ignores);
}
/**
* Defines a helper method for asynchronously performing a request.
*/
protected final <Req extends Validatable, Resp> void performRequestAsync(Req request,
CheckedFunction<Req, Request, IOException> requestConverter,
RequestOptions options,
CheckedFunction<Response, Resp, IOException> responseConverter,
ActionListener<Resp> listener, Set<Integer> ignores) {
ValidationException validationException = request.validate();
if (validationException != null && validationException.validationErrors().isEmpty() == false) {
listener.onFailure(validationException);
return;
}
internalPerformRequestAsync(request, requestConverter, options, responseConverter, listener, ignores);
}
/**
* Provides common functionality for asynchronously performing a request.
*/
private <Req, Resp> void internalPerformRequestAsync(Req request,
CheckedFunction<Req, Request, IOException> requestConverter,
RequestOptions options,
CheckedFunction<Response, Resp, IOException> responseConverter,
ActionListener<Resp> listener, Set<Integer> ignores) {
Request req; Request req;
try { try {
req = requestConverter.apply(request); req = requestConverter.apply(request);
@ -1038,6 +1147,7 @@ public class RestHighLevelClient implements Closeable {
client.performRequestAsync(req, responseListener); client.performRequestAsync(req, responseListener);
} }
final <Resp> ResponseListener wrapResponseListener(CheckedFunction<Response, Resp, IOException> responseConverter, final <Resp> ResponseListener wrapResponseListener(CheckedFunction<Response, Resp, IOException> responseConverter,
ActionListener<Resp> actionListener, Set<Integer> ignores) { ActionListener<Resp> actionListener, Set<Integer> ignores) {
return new ResponseListener() { return new ResponseListener() {

View File

@ -0,0 +1,41 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
/**
* Defines a validation layer for Requests.
*/
public interface Validatable {
ValidationException EMPTY_VALIDATION = new ValidationException() {
@Override
public void addValidationError(String error) {
throw new UnsupportedOperationException("Validation messages should not be added to the empty validation");
}
};
/**
* Perform validation. This method does not have to be overridden in the event that no validation needs to be done.
*
* @return potentially null, in the event of older actions, an empty {@link ValidationException} in newer actions, or finally a
* {@link ValidationException} that contains a list of all failed validation.
*/
default ValidationException validate() {
return EMPTY_VALIDATION;
}
}

View File

@ -0,0 +1,55 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import java.util.ArrayList;
import java.util.List;
/**
* Encapsulates an accumulation of validation errors
*/
public class ValidationException extends IllegalArgumentException {
private final List<String> validationErrors = new ArrayList<>();
/**
* Add a new validation error to the accumulating validation errors
* @param error the error to add
*/
public void addValidationError(String error) {
validationErrors.add(error);
}
/**
* Returns the validation errors accumulated
*/
public final List<String> validationErrors() {
return validationErrors;
}
@Override
public final String getMessage() {
StringBuilder sb = new StringBuilder();
sb.append("Validation Failed: ");
int index = 0;
for (String error : validationErrors) {
sb.append(++index).append(": ").append(error).append(";");
}
return sb.toString();
}
}

View File

@ -0,0 +1,139 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpPut;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest;
import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse;
import org.elasticsearch.protocol.xpack.graph.Hop;
import org.elasticsearch.protocol.xpack.graph.Vertex;
import org.elasticsearch.protocol.xpack.graph.VertexRequest;
import org.hamcrest.Matchers;
import org.junit.Before;
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
public class GraphIT extends ESRestHighLevelClientTestCase {
@Before
public void indexDocuments() throws IOException {
// Create chain of doc IDs across indices 1->2->3
Request doc1 = new Request(HttpPut.METHOD_NAME, "/index1/type/1");
doc1.setJsonEntity("{ \"num\":[1], \"const\":\"start\"}");
client().performRequest(doc1);
Request doc2 = new Request(HttpPut.METHOD_NAME, "/index2/type/1");
doc2.setJsonEntity("{\"num\":[1,2], \"const\":\"foo\"}");
client().performRequest(doc2);
Request doc3 = new Request(HttpPut.METHOD_NAME, "/index2/type/2");
doc3.setJsonEntity("{\"num\":[2,3], \"const\":\"foo\"}");
client().performRequest(doc3);
Request doc4 = new Request(HttpPut.METHOD_NAME, "/index_no_field_data/type/2");
doc4.setJsonEntity("{\"num\":\"string\", \"const\":\"foo\"}");
client().performRequest(doc4);
Request doc5 = new Request(HttpPut.METHOD_NAME, "/index_no_field_data/type/2");
doc5.setJsonEntity("{\"num\":[2,4], \"const\":\"foo\"}");
client().performRequest(doc5);
client().performRequest(new Request(HttpPost.METHOD_NAME, "/_refresh"));
}
public void testCleanExplore() throws Exception {
GraphExploreRequest graphExploreRequest = new GraphExploreRequest();
graphExploreRequest.indices("index1", "index2");
graphExploreRequest.useSignificance(false);
int numHops = 3;
for (int i = 0; i < numHops; i++) {
QueryBuilder guidingQuery = null;
if (i == 0) {
guidingQuery = new TermQueryBuilder("const.keyword", "start");
} else if (randomBoolean()){
guidingQuery = new TermQueryBuilder("const.keyword", "foo");
}
Hop hop = graphExploreRequest.createNextHop(guidingQuery);
VertexRequest vr = hop.addVertexRequest("num");
vr.minDocCount(1);
}
Map<String, Integer> expectedTermsAndDepths = new HashMap<>();
expectedTermsAndDepths.put("1", 0);
expectedTermsAndDepths.put("2", 1);
expectedTermsAndDepths.put("3", 2);
GraphExploreResponse exploreResponse = highLevelClient().graph().explore(graphExploreRequest, RequestOptions.DEFAULT);
Map<String, Integer> actualTermsAndDepths = new HashMap<>();
Collection<Vertex> v = exploreResponse.getVertices();
for (Vertex vertex : v) {
actualTermsAndDepths.put(vertex.getTerm(), vertex.getHopDepth());
}
assertEquals(expectedTermsAndDepths, actualTermsAndDepths);
assertThat(exploreResponse.isTimedOut(), Matchers.is(false));
ShardOperationFailedException[] failures = exploreResponse.getShardFailures();
assertThat(failures.length, Matchers.equalTo(0));
}
public void testBadExplore() throws Exception {
//Explore indices where lack of fielddata=true on one index leads to partial failures
GraphExploreRequest graphExploreRequest = new GraphExploreRequest();
graphExploreRequest.indices("index1", "index2", "index_no_field_data");
graphExploreRequest.useSignificance(false);
int numHops = 3;
for (int i = 0; i < numHops; i++) {
QueryBuilder guidingQuery = null;
if (i == 0) {
guidingQuery = new TermQueryBuilder("const.keyword", "start");
} else if (randomBoolean()){
guidingQuery = new TermQueryBuilder("const.keyword", "foo");
}
Hop hop = graphExploreRequest.createNextHop(guidingQuery);
VertexRequest vr = hop.addVertexRequest("num");
vr.minDocCount(1);
}
Map<String, Integer> expectedTermsAndDepths = new HashMap<>();
expectedTermsAndDepths.put("1", 0);
expectedTermsAndDepths.put("2", 1);
expectedTermsAndDepths.put("3", 2);
GraphExploreResponse exploreResponse = highLevelClient().graph().explore(graphExploreRequest, RequestOptions.DEFAULT);
Map<String, Integer> actualTermsAndDepths = new HashMap<>();
Collection<Vertex> v = exploreResponse.getVertices();
for (Vertex vertex : v) {
actualTermsAndDepths.put(vertex.getTerm(), vertex.getHopDepth());
}
assertEquals(expectedTermsAndDepths, actualTermsAndDepths);
assertThat(exploreResponse.isTimedOut(), Matchers.is(false));
ShardOperationFailedException[] failures = exploreResponse.getShardFailures();
assertThat(failures.length, Matchers.equalTo(1));
assertTrue(failures[0].reason().contains("Fielddata is disabled"));
}
}

View File

@ -20,17 +20,22 @@
package org.elasticsearch.client; package org.elasticsearch.client;
import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpDelete;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpPut;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.protocol.xpack.ml.CloseJobRequest; import org.elasticsearch.protocol.xpack.ml.CloseJobRequest;
import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest;
import org.elasticsearch.protocol.xpack.ml.GetBucketsRequest;
import org.elasticsearch.protocol.xpack.ml.GetJobRequest;
import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; import org.elasticsearch.protocol.xpack.ml.OpenJobRequest;
import org.elasticsearch.protocol.xpack.ml.PutJobRequest; import org.elasticsearch.protocol.xpack.ml.PutJobRequest;
import org.elasticsearch.protocol.xpack.ml.job.config.AnalysisConfig; import org.elasticsearch.protocol.xpack.ml.job.config.AnalysisConfig;
import org.elasticsearch.protocol.xpack.ml.job.config.Detector; import org.elasticsearch.protocol.xpack.ml.job.config.Detector;
import org.elasticsearch.protocol.xpack.ml.job.config.Job; import org.elasticsearch.protocol.xpack.ml.job.config.Job;
import org.elasticsearch.protocol.xpack.ml.job.util.PageParams;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
@ -47,6 +52,7 @@ public class MLRequestConvertersTests extends ESTestCase {
Request request = MLRequestConverters.putJob(putJobRequest); Request request = MLRequestConverters.putJob(putJobRequest);
assertEquals(HttpPut.METHOD_NAME, request.getMethod());
assertThat(request.getEndpoint(), equalTo("/_xpack/ml/anomaly_detectors/foo")); assertThat(request.getEndpoint(), equalTo("/_xpack/ml/anomaly_detectors/foo"));
try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) {
Job parsedJob = Job.PARSER.apply(parser, null).build(); Job parsedJob = Job.PARSER.apply(parser, null).build();
@ -54,6 +60,23 @@ public class MLRequestConvertersTests extends ESTestCase {
} }
} }
public void testGetJob() {
GetJobRequest getJobRequest = new GetJobRequest();
Request request = MLRequestConverters.getJob(getJobRequest);
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
assertEquals("/_xpack/ml/anomaly_detectors", request.getEndpoint());
assertFalse(request.getParameters().containsKey("allow_no_jobs"));
getJobRequest = new GetJobRequest("job1", "jobs*");
getJobRequest.setAllowNoJobs(true);
request = MLRequestConverters.getJob(getJobRequest);
assertEquals("/_xpack/ml/anomaly_detectors/job1,jobs*", request.getEndpoint());
assertEquals(Boolean.toString(true), request.getParameters().get("allow_no_jobs"));
}
public void testOpenJob() throws Exception { public void testOpenJob() throws Exception {
String jobId = "some-job-id"; String jobId = "some-job-id";
OpenJobRequest openJobRequest = new OpenJobRequest(jobId); OpenJobRequest openJobRequest = new OpenJobRequest(jobId);
@ -62,21 +85,17 @@ public class MLRequestConvertersTests extends ESTestCase {
Request request = MLRequestConverters.openJob(openJobRequest); Request request = MLRequestConverters.openJob(openJobRequest);
assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals(HttpPost.METHOD_NAME, request.getMethod());
assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_open", request.getEndpoint()); assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_open", request.getEndpoint());
ByteArrayOutputStream bos = new ByteArrayOutputStream(); assertEquals(requestEntityToString(request), "{\"job_id\":\""+ jobId +"\",\"timeout\":\"10m\"}");
request.getEntity().writeTo(bos);
assertEquals(bos.toString("UTF-8"), "{\"job_id\":\""+ jobId +"\",\"timeout\":\"10m\"}");
} }
public void testCloseJob() { public void testCloseJob() throws Exception {
String jobId = "somejobid"; String jobId = "somejobid";
CloseJobRequest closeJobRequest = new CloseJobRequest(jobId); CloseJobRequest closeJobRequest = new CloseJobRequest(jobId);
Request request = MLRequestConverters.closeJob(closeJobRequest); Request request = MLRequestConverters.closeJob(closeJobRequest);
assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals(HttpPost.METHOD_NAME, request.getMethod());
assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_close", request.getEndpoint()); assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_close", request.getEndpoint());
assertFalse(request.getParameters().containsKey("force")); assertEquals("{\"job_id\":\"somejobid\"}", requestEntityToString(request));
assertFalse(request.getParameters().containsKey("allow_no_jobs"));
assertFalse(request.getParameters().containsKey("timeout"));
closeJobRequest = new CloseJobRequest(jobId, "otherjobs*"); closeJobRequest = new CloseJobRequest(jobId, "otherjobs*");
closeJobRequest.setForce(true); closeJobRequest.setForce(true);
@ -85,9 +104,8 @@ public class MLRequestConvertersTests extends ESTestCase {
request = MLRequestConverters.closeJob(closeJobRequest); request = MLRequestConverters.closeJob(closeJobRequest);
assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + ",otherjobs*/_close", request.getEndpoint()); assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + ",otherjobs*/_close", request.getEndpoint());
assertEquals(Boolean.toString(true), request.getParameters().get("force")); assertEquals("{\"job_id\":\"somejobid,otherjobs*\",\"timeout\":\"10m\",\"force\":true,\"allow_no_jobs\":false}",
assertEquals(Boolean.toString(false), request.getParameters().get("allow_no_jobs")); requestEntityToString(request));
assertEquals("10m", request.getParameters().get("timeout"));
} }
public void testDeleteJob() { public void testDeleteJob() {
@ -104,6 +122,23 @@ public class MLRequestConvertersTests extends ESTestCase {
assertEquals(Boolean.toString(true), request.getParameters().get("force")); assertEquals(Boolean.toString(true), request.getParameters().get("force"));
} }
public void testGetBuckets() throws IOException {
String jobId = randomAlphaOfLength(10);
GetBucketsRequest getBucketsRequest = new GetBucketsRequest(jobId);
getBucketsRequest.setPageParams(new PageParams(100, 300));
getBucketsRequest.setAnomalyScore(75.0);
getBucketsRequest.setSort("anomaly_score");
getBucketsRequest.setDescending(true);
Request request = MLRequestConverters.getBuckets(getBucketsRequest);
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/results/buckets", request.getEndpoint());
try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) {
GetBucketsRequest parsedRequest = GetBucketsRequest.PARSER.apply(parser, null);
assertThat(parsedRequest, equalTo(getBucketsRequest));
}
}
private static Job createValidJob(String jobId) { private static Job createValidJob(String jobId) {
AnalysisConfig.Builder analysisConfig = AnalysisConfig.builder(Collections.singletonList( AnalysisConfig.Builder analysisConfig = AnalysisConfig.builder(Collections.singletonList(
Detector.builder().setFunction("count").build())); Detector.builder().setFunction("count").build()));
@ -111,4 +146,10 @@ public class MLRequestConvertersTests extends ESTestCase {
jobBuilder.setAnalysisConfig(analysisConfig); jobBuilder.setAnalysisConfig(analysisConfig);
return jobBuilder.build(); return jobBuilder.build();
} }
private static String requestEntityToString(Request request) throws Exception {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
request.getEntity().writeTo(bos);
return bos.toString("UTF-8");
}
} }

View File

@ -0,0 +1,217 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.protocol.xpack.ml.GetBucketsRequest;
import org.elasticsearch.protocol.xpack.ml.GetBucketsResponse;
import org.elasticsearch.protocol.xpack.ml.PutJobRequest;
import org.elasticsearch.protocol.xpack.ml.job.config.Job;
import org.elasticsearch.protocol.xpack.ml.job.results.Bucket;
import org.elasticsearch.protocol.xpack.ml.job.util.PageParams;
import org.junit.After;
import org.junit.Before;
import java.io.IOException;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase {
private static final String RESULTS_INDEX = ".ml-anomalies-shared";
private static final String DOC = "doc";
private static final String JOB_ID = "get-results-it-job";
// 2018-08-01T00:00:00Z
private static final long START_TIME_EPOCH_MS = 1533081600000L;
private BucketStats bucketStats = new BucketStats();
@Before
public void createJobAndIndexResults() throws IOException {
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
Job job = MachineLearningIT.buildJob(JOB_ID);
machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
BulkRequest bulkRequest = new BulkRequest();
bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
long time = START_TIME_EPOCH_MS;
long endTime = time + 3600000L * 24 * 10; // 10 days of hourly buckets
while (time < endTime) {
addBucketIndexRequest(time, false, bulkRequest);
addRecordIndexRequests(time, false, bulkRequest);
time += 3600000L;
}
// Also index an interim bucket
addBucketIndexRequest(time, true, bulkRequest);
addRecordIndexRequests(time, true, bulkRequest);
highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT);
}
private void addBucketIndexRequest(long timestamp, boolean isInterim, BulkRequest bulkRequest) {
IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX, DOC);
double bucketScore = randomDoubleBetween(0.0, 100.0, true);
bucketStats.report(bucketScore);
indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"result_type\":\"bucket\", \"timestamp\": " + timestamp + "," +
"\"bucket_span\": 3600,\"is_interim\": " + isInterim + ", \"anomaly_score\": " + bucketScore +
", \"bucket_influencers\":[{\"job_id\": \"" + JOB_ID + "\", \"result_type\":\"bucket_influencer\", " +
"\"influencer_field_name\": \"bucket_time\", \"timestamp\": " + timestamp + ", \"bucket_span\": 3600, " +
"\"is_interim\": " + isInterim + "}]}", XContentType.JSON);
bulkRequest.add(indexRequest);
}
private void addRecordIndexRequests(long timestamp, boolean isInterim, BulkRequest bulkRequest) {
if (randomBoolean()) {
return;
}
int recordCount = randomIntBetween(1, 3);
for (int i = 0; i < recordCount; ++i) {
IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX, DOC);
double recordScore = randomDoubleBetween(0.0, 100.0, true);
double p = randomDoubleBetween(0.0, 0.05, false);
indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"result_type\":\"record\", \"timestamp\": " + timestamp + "," +
"\"bucket_span\": 3600,\"is_interim\": " + isInterim + ", \"record_score\": " + recordScore + ", \"probability\": "
+ p + "}", XContentType.JSON);
bulkRequest.add(indexRequest);
}
}
@After
public void deleteJob() throws IOException {
new MlRestTestStateCleaner(logger, client()).clearMlMetadata();
}
public void testGetBuckets() throws IOException {
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
{
GetBucketsRequest request = new GetBucketsRequest(JOB_ID);
GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync);
assertThat(response.count(), equalTo(241L));
assertThat(response.buckets().size(), equalTo(100));
assertThat(response.buckets().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS));
}
{
GetBucketsRequest request = new GetBucketsRequest(JOB_ID);
request.setTimestamp("1533081600000");
GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync);
assertThat(response.count(), equalTo(1L));
assertThat(response.buckets().size(), equalTo(1));
assertThat(response.buckets().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS));
}
{
GetBucketsRequest request = new GetBucketsRequest(JOB_ID);
request.setAnomalyScore(75.0);
GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync);
assertThat(response.count(), equalTo(bucketStats.criticalCount));
assertThat(response.buckets().size(), equalTo((int) Math.min(100, bucketStats.criticalCount)));
assertThat(response.buckets().stream().anyMatch(b -> b.getAnomalyScore() < 75.0), is(false));
}
{
GetBucketsRequest request = new GetBucketsRequest(JOB_ID);
request.setExcludeInterim(true);
GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync);
assertThat(response.count(), equalTo(240L));
}
{
GetBucketsRequest request = new GetBucketsRequest(JOB_ID);
request.setStart("1533081600000");
request.setEnd("1533092400000");
GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync);
assertThat(response.count(), equalTo(3L));
assertThat(response.buckets().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS));
assertThat(response.buckets().get(1).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 3600000L));
assertThat(response.buckets().get(2).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 2 * + 3600000L));
}
{
GetBucketsRequest request = new GetBucketsRequest(JOB_ID);
request.setPageParams(new PageParams(3, 3));
GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync);
assertThat(response.buckets().size(), equalTo(3));
assertThat(response.buckets().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 3 * 3600000L));
assertThat(response.buckets().get(1).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 4 * 3600000L));
assertThat(response.buckets().get(2).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 5 * 3600000L));
}
{
GetBucketsRequest request = new GetBucketsRequest(JOB_ID);
request.setSort("anomaly_score");
request.setDescending(true);
GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync);
double previousScore = 100.0;
for (Bucket bucket : response.buckets()) {
assertThat(bucket.getAnomalyScore(), lessThanOrEqualTo(previousScore));
previousScore = bucket.getAnomalyScore();
}
}
{
GetBucketsRequest request = new GetBucketsRequest(JOB_ID);
// Make sure we get all buckets
request.setPageParams(new PageParams(0, 10000));
request.setExpand(true);
GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync);
assertThat(response.buckets().stream().anyMatch(b -> b.getRecords().size() > 0), is(true));
}
}
private static class BucketStats {
// score < 50.0
private long minorCount;
// score < 75.0
private long majorCount;
// score > 75.0
private long criticalCount;
private void report(double anomalyScore) {
if (anomalyScore < 50.0) {
minorCount++;
} else if (anomalyScore < 75.0) {
majorCount++;
} else {
criticalCount++;
}
}
}
}

View File

@ -19,12 +19,13 @@
package org.elasticsearch.client; package org.elasticsearch.client;
import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator;
import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.protocol.xpack.ml.CloseJobRequest; import org.elasticsearch.protocol.xpack.ml.CloseJobRequest;
import org.elasticsearch.protocol.xpack.ml.CloseJobResponse; import org.elasticsearch.protocol.xpack.ml.CloseJobResponse;
import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest;
import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse; import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse;
import org.elasticsearch.protocol.xpack.ml.GetJobRequest;
import org.elasticsearch.protocol.xpack.ml.GetJobResponse;
import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; import org.elasticsearch.protocol.xpack.ml.OpenJobRequest;
import org.elasticsearch.protocol.xpack.ml.OpenJobResponse; import org.elasticsearch.protocol.xpack.ml.OpenJobResponse;
import org.elasticsearch.protocol.xpack.ml.PutJobRequest; import org.elasticsearch.protocol.xpack.ml.PutJobRequest;
@ -33,15 +34,25 @@ import org.elasticsearch.protocol.xpack.ml.job.config.AnalysisConfig;
import org.elasticsearch.protocol.xpack.ml.job.config.DataDescription; import org.elasticsearch.protocol.xpack.ml.job.config.DataDescription;
import org.elasticsearch.protocol.xpack.ml.job.config.Detector; import org.elasticsearch.protocol.xpack.ml.job.config.Detector;
import org.elasticsearch.protocol.xpack.ml.job.config.Job; import org.elasticsearch.protocol.xpack.ml.job.config.Job;
import org.junit.After;
import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import static org.hamcrest.CoreMatchers.hasItems;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.is;
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32993")
public class MachineLearningIT extends ESRestHighLevelClientTestCase { public class MachineLearningIT extends ESRestHighLevelClientTestCase {
@After
public void cleanUp() throws IOException {
new MlRestTestStateCleaner(logger, client()).clearMlMetadata();
}
public void testPutJob() throws Exception { public void testPutJob() throws Exception {
String jobId = randomValidJobId(); String jobId = randomValidJobId();
Job job = buildJob(jobId); Job job = buildJob(jobId);
@ -54,6 +65,41 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
assertThat(createdJob.getJobType(), is(Job.ANOMALY_DETECTOR_JOB_TYPE)); assertThat(createdJob.getJobType(), is(Job.ANOMALY_DETECTOR_JOB_TYPE));
} }
public void testGetJob() throws Exception {
String jobId1 = randomValidJobId();
String jobId2 = randomValidJobId();
Job job1 = buildJob(jobId1);
Job job2 = buildJob(jobId2);
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
machineLearningClient.putJob(new PutJobRequest(job1), RequestOptions.DEFAULT);
machineLearningClient.putJob(new PutJobRequest(job2), RequestOptions.DEFAULT);
GetJobRequest request = new GetJobRequest(jobId1, jobId2);
// Test getting specific jobs
GetJobResponse response = execute(request, machineLearningClient::getJob, machineLearningClient::getJobAsync);
assertEquals(2, response.count());
assertThat(response.jobs(), hasSize(2));
assertThat(response.jobs().stream().map(Job::getId).collect(Collectors.toList()), containsInAnyOrder(jobId1, jobId2));
// Test getting all jobs explicitly
request = GetJobRequest.getAllJobsRequest();
response = execute(request, machineLearningClient::getJob, machineLearningClient::getJobAsync);
assertTrue(response.count() >= 2L);
assertTrue(response.jobs().size() >= 2L);
assertThat(response.jobs().stream().map(Job::getId).collect(Collectors.toList()), hasItems(jobId1, jobId2));
// Test getting all jobs implicitly
response = execute(new GetJobRequest(), machineLearningClient::getJob, machineLearningClient::getJobAsync);
assertTrue(response.count() >= 2L);
assertTrue(response.jobs().size() >= 2L);
assertThat(response.jobs().stream().map(Job::getId).collect(Collectors.toList()), hasItems(jobId1, jobId2));
}
public void testDeleteJob() throws Exception { public void testDeleteJob() throws Exception {
String jobId = randomValidJobId(); String jobId = randomValidJobId();
Job job = buildJob(jobId); Job job = buildJob(jobId);

View File

@ -0,0 +1,109 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.test.rest.ESRestTestCase;
import java.io.IOException;
import java.util.List;
import java.util.Map;
/**
* This is temporarily duplicated from the server side.
* @TODO Replace with an implementation using the HLRC once
* the APIs for managing datafeeds are implemented.
*/
public class MlRestTestStateCleaner {
private final Logger logger;
private final RestClient adminClient;
public MlRestTestStateCleaner(Logger logger, RestClient adminClient) {
this.logger = logger;
this.adminClient = adminClient;
}
public void clearMlMetadata() throws IOException {
deleteAllDatafeeds();
deleteAllJobs();
// indices will be deleted by the ESRestTestCase class
}
@SuppressWarnings("unchecked")
private void deleteAllDatafeeds() throws IOException {
final Request datafeedsRequest = new Request("GET", "/_xpack/ml/datafeeds");
datafeedsRequest.addParameter("filter_path", "datafeeds");
final Response datafeedsResponse = adminClient.performRequest(datafeedsRequest);
final List<Map<String, Object>> datafeeds =
(List<Map<String, Object>>) XContentMapValues.extractValue("datafeeds", ESRestTestCase.entityAsMap(datafeedsResponse));
if (datafeeds == null) {
return;
}
try {
adminClient.performRequest(new Request("POST", "/_xpack/ml/datafeeds/_all/_stop"));
} catch (Exception e1) {
logger.warn("failed to stop all datafeeds. Forcing stop", e1);
try {
adminClient.performRequest(new Request("POST", "/_xpack/ml/datafeeds/_all/_stop?force=true"));
} catch (Exception e2) {
logger.warn("Force-closing all data feeds failed", e2);
}
throw new RuntimeException(
"Had to resort to force-stopping datafeeds, something went wrong?", e1);
}
for (Map<String, Object> datafeed : datafeeds) {
String datafeedId = (String) datafeed.get("datafeed_id");
adminClient.performRequest(new Request("DELETE", "/_xpack/ml/datafeeds/" + datafeedId));
}
}
private void deleteAllJobs() throws IOException {
final Request jobsRequest = new Request("GET", "/_xpack/ml/anomaly_detectors");
jobsRequest.addParameter("filter_path", "jobs");
final Response response = adminClient.performRequest(jobsRequest);
@SuppressWarnings("unchecked")
final List<Map<String, Object>> jobConfigs =
(List<Map<String, Object>>) XContentMapValues.extractValue("jobs", ESRestTestCase.entityAsMap(response));
if (jobConfigs == null) {
return;
}
try {
adminClient.performRequest(new Request("POST", "/_xpack/ml/anomaly_detectors/_all/_close"));
} catch (Exception e1) {
logger.warn("failed to close all jobs. Forcing closed", e1);
try {
adminClient.performRequest(new Request("POST", "/_xpack/ml/anomaly_detectors/_all/_close?force=true"));
} catch (Exception e2) {
logger.warn("Force-closing all jobs failed", e2);
}
throw new RuntimeException("Had to resort to force-closing jobs, something went wrong?",
e1);
}
for (Map<String, Object> jobConfig : jobConfigs) {
String jobId = (String) jobConfig.get("job_id");
adminClient.performRequest(new Request("DELETE", "/_xpack/ml/anomaly_detectors/" + jobId));
}
}
}

View File

@ -118,6 +118,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.RandomCreateIndexGenerator; import org.elasticsearch.index.RandomCreateIndexGenerator;
import org.elasticsearch.index.VersionType; import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.index.rankeval.PrecisionAtK; import org.elasticsearch.index.rankeval.PrecisionAtK;
@ -132,6 +133,8 @@ import org.elasticsearch.protocol.xpack.indexlifecycle.StartILMRequest;
import org.elasticsearch.protocol.xpack.indexlifecycle.StopILMRequest; import org.elasticsearch.protocol.xpack.indexlifecycle.StopILMRequest;
import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest;
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest;
import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest;
import org.elasticsearch.protocol.xpack.graph.Hop;
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.repositories.fs.FsRepository;
import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.rest.action.search.RestSearchAction;
@ -2602,6 +2605,35 @@ public class RequestConvertersTests extends ESTestCase {
request.getEntity().writeTo(bos); request.getEntity().writeTo(bos);
assertThat(bos.toString("UTF-8"), is(body)); assertThat(bos.toString("UTF-8"), is(body));
} }
public void testGraphExplore() throws Exception {
Map<String, String> expectedParams = new HashMap<>();
GraphExploreRequest graphExploreRequest = new GraphExploreRequest();
graphExploreRequest.sampleDiversityField("diversity");
graphExploreRequest.indices("index1", "index2");
graphExploreRequest.types("type1", "type2");
int timeout = randomIntBetween(10000, 20000);
graphExploreRequest.timeout(TimeValue.timeValueMillis(timeout));
graphExploreRequest.useSignificance(randomBoolean());
int numHops = randomIntBetween(1, 5);
for (int i = 0; i < numHops; i++) {
int hopNumber = i + 1;
QueryBuilder guidingQuery = null;
if (randomBoolean()) {
guidingQuery = new TermQueryBuilder("field" + hopNumber, "value" + hopNumber);
}
Hop hop = graphExploreRequest.createNextHop(guidingQuery);
hop.addVertexRequest("field" + hopNumber);
hop.getVertexRequest(0).addInclude("value" + hopNumber, hopNumber);
}
Request request = RequestConverters.xPackGraphExplore(graphExploreRequest);
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
assertEquals("/index1,index2/type1,type2/_xpack/graph/_explore", request.getEndpoint());
assertEquals(expectedParams, request.getParameters());
assertThat(request.getEntity().getContentType().getValue(), is(XContentType.JSON.mediaTypeWithoutParameters()));
assertToXContentBody(graphExploreRequest, request.getEntity());
}
public void testSetIndexLifecyclePolicy() throws Exception { public void testSetIndexLifecyclePolicy() throws Exception {
SetIndexLifecyclePolicyRequest req = new SetIndexLifecyclePolicyRequest(); SetIndexLifecyclePolicyRequest req = new SetIndexLifecyclePolicyRequest();

View File

@ -758,6 +758,7 @@ public class RestHighLevelClientTests extends ESTestCase {
apiName.startsWith("license.") == false && apiName.startsWith("license.") == false &&
apiName.startsWith("machine_learning.") == false && apiName.startsWith("machine_learning.") == false &&
apiName.startsWith("watcher.") == false && apiName.startsWith("watcher.") == false &&
apiName.startsWith("graph.") == false &&
apiName.startsWith("migration.") == false && apiName.startsWith("migration.") == false &&
apiName.startsWith("index_lifecycle.") == false) { apiName.startsWith("index_lifecycle.") == false) {
apiNotFound.add(apiName); apiNotFound.add(apiName);

View File

@ -256,7 +256,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
assertNull(searchResponse.getSuggest()); assertNull(searchResponse.getSuggest());
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
assertEquals(0, searchResponse.getHits().getHits().length); assertEquals(0, searchResponse.getHits().getHits().length);
assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f);
Terms termsAgg = searchResponse.getAggregations().get("agg1"); Terms termsAgg = searchResponse.getAggregations().get("agg1");
assertEquals("agg1", termsAgg.getName()); assertEquals("agg1", termsAgg.getName());
assertEquals(2, termsAgg.getBuckets().size()); assertEquals(2, termsAgg.getBuckets().size());
@ -293,7 +293,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
assertEquals(5, searchResponse.getHits().totalHits); assertEquals(5, searchResponse.getHits().totalHits);
assertEquals(0, searchResponse.getHits().getHits().length); assertEquals(0, searchResponse.getHits().getHits().length);
assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f);
Range rangeAgg = searchResponse.getAggregations().get("agg1"); Range rangeAgg = searchResponse.getAggregations().get("agg1");
assertEquals("agg1", rangeAgg.getName()); assertEquals("agg1", rangeAgg.getName());
assertEquals(2, rangeAgg.getBuckets().size()); assertEquals(2, rangeAgg.getBuckets().size());
@ -323,7 +323,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
assertNull(searchResponse.getSuggest()); assertNull(searchResponse.getSuggest());
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
assertEquals(0, searchResponse.getHits().getHits().length); assertEquals(0, searchResponse.getHits().getHits().length);
assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f);
Terms termsAgg = searchResponse.getAggregations().get("agg1"); Terms termsAgg = searchResponse.getAggregations().get("agg1");
assertEquals("agg1", termsAgg.getName()); assertEquals("agg1", termsAgg.getName());
assertEquals(2, termsAgg.getBuckets().size()); assertEquals(2, termsAgg.getBuckets().size());
@ -375,7 +375,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
assertEquals(5, searchResponse.getHits().totalHits); assertEquals(5, searchResponse.getHits().totalHits);
assertEquals(0, searchResponse.getHits().getHits().length); assertEquals(0, searchResponse.getHits().getHits().length);
assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f);
assertEquals(1, searchResponse.getAggregations().asList().size()); assertEquals(1, searchResponse.getAggregations().asList().size());
MatrixStats matrixStats = searchResponse.getAggregations().get("agg1"); MatrixStats matrixStats = searchResponse.getAggregations().get("agg1");
assertEquals(5, matrixStats.getFieldCount("num")); assertEquals(5, matrixStats.getFieldCount("num"));
@ -474,7 +474,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
assertEquals(3, searchResponse.getHits().totalHits); assertEquals(3, searchResponse.getHits().totalHits);
assertEquals(0, searchResponse.getHits().getHits().length); assertEquals(0, searchResponse.getHits().getHits().length);
assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f);
assertEquals(1, searchResponse.getAggregations().asList().size()); assertEquals(1, searchResponse.getAggregations().asList().size());
Terms terms = searchResponse.getAggregations().get("top-tags"); Terms terms = searchResponse.getAggregations().get("top-tags");
assertEquals(0, terms.getDocCountError()); assertEquals(0, terms.getDocCountError());
@ -513,7 +513,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
assertNull(searchResponse.getAggregations()); assertNull(searchResponse.getAggregations());
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
assertEquals(0, searchResponse.getHits().totalHits); assertEquals(0, searchResponse.getHits().totalHits);
assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f);
assertEquals(0, searchResponse.getHits().getHits().length); assertEquals(0, searchResponse.getHits().getHits().length);
assertEquals(1, searchResponse.getSuggest().size()); assertEquals(1, searchResponse.getSuggest().size());

View File

@ -0,0 +1,125 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.documentation;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpPut;
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.protocol.xpack.graph.Connection;
import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest;
import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse;
import org.elasticsearch.protocol.xpack.graph.Hop;
import org.elasticsearch.protocol.xpack.graph.Vertex;
import org.elasticsearch.protocol.xpack.graph.VertexRequest;
import org.junit.Before;
import java.io.IOException;
import java.util.Collection;
public class GraphDocumentationIT extends ESRestHighLevelClientTestCase {
@Before
public void indexDocuments() throws IOException {
// Create chain of doc IDs across indices 1->2->3
Request doc1 = new Request(HttpPut.METHOD_NAME, "/index1/type/1");
doc1.setJsonEntity("{ \"participants\":[1,2], \"text\":\"let's start projectx\", \"attachment_md5\":\"324FHDGHFDG4564\"}");
client().performRequest(doc1);
Request doc2 = new Request(HttpPut.METHOD_NAME, "/index2/type/2");
doc2.setJsonEntity("{\"participants\":[2,3,4], \"text\":\"got something you both may be interested in\"}");
client().performRequest(doc2);
client().performRequest(new Request(HttpPost.METHOD_NAME, "/_refresh"));
}
@SuppressForbidden(reason = "system out is ok for a documentation example")
public void testExplore() throws Exception {
RestHighLevelClient client = highLevelClient();
// tag::x-pack-graph-explore-request
GraphExploreRequest request = new GraphExploreRequest();
request.indices("index1", "index2");
request.useSignificance(false);
TermQueryBuilder startingQuery = new TermQueryBuilder("text", "projectx");
Hop hop1 = request.createNextHop(startingQuery); // <1>
VertexRequest people = hop1.addVertexRequest("participants"); // <2>
people.minDocCount(1);
VertexRequest files = hop1.addVertexRequest("attachment_md5");
files.minDocCount(1);
Hop hop2 = request.createNextHop(null); // <3>
VertexRequest vr2 = hop2.addVertexRequest("participants");
vr2.minDocCount(5);
GraphExploreResponse exploreResponse = client.graph().explore(request, RequestOptions.DEFAULT); // <4>
// end::x-pack-graph-explore-request
// tag::x-pack-graph-explore-response
Collection<Vertex> v = exploreResponse.getVertices();
Collection<Connection> c = exploreResponse.getConnections();
for (Vertex vertex : v) {
System.out.println(vertex.getField() + ":" + vertex.getTerm() + // <1>
" discovered at hop depth " + vertex.getHopDepth());
}
for (Connection link : c) {
System.out.println(link.getFrom() + " -> " + link.getTo() // <2>
+ " evidenced by " + link.getDocCount() + " docs");
}
// end::x-pack-graph-explore-response
Collection<Vertex> initialVertices = exploreResponse.getVertices();
// tag::x-pack-graph-explore-expand
GraphExploreRequest expandRequest = new GraphExploreRequest();
expandRequest.indices("index1", "index2");
Hop expandHop1 = expandRequest.createNextHop(null); // <1>
VertexRequest fromPeople = expandHop1.addVertexRequest("participants"); // <2>
for (Vertex vertex : initialVertices) {
if (vertex.getField().equals("participants")) {
fromPeople.addInclude(vertex.getTerm(), 1f);
}
}
Hop expandHop2 = expandRequest.createNextHop(null);
VertexRequest newPeople = expandHop2.addVertexRequest("participants"); // <3>
for (Vertex vertex : initialVertices) {
if (vertex.getField().equals("participants")) {
newPeople.addExclude(vertex.getTerm());
}
}
GraphExploreResponse expandResponse = client.graph().explore(expandRequest, RequestOptions.DEFAULT);
// end::x-pack-graph-explore-expand
}
}

View File

@ -20,15 +20,23 @@ package org.elasticsearch.client.documentation;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.LatchedActionListener;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.ESRestHighLevelClientTestCase;
import org.elasticsearch.client.MachineLearningIT; import org.elasticsearch.client.MachineLearningIT;
import org.elasticsearch.client.MlRestTestStateCleaner;
import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.protocol.xpack.ml.CloseJobRequest; import org.elasticsearch.protocol.xpack.ml.CloseJobRequest;
import org.elasticsearch.protocol.xpack.ml.CloseJobResponse; import org.elasticsearch.protocol.xpack.ml.CloseJobResponse;
import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest;
import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse; import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse;
import org.elasticsearch.protocol.xpack.ml.GetBucketsRequest;
import org.elasticsearch.protocol.xpack.ml.GetBucketsResponse;
import org.elasticsearch.protocol.xpack.ml.GetJobRequest;
import org.elasticsearch.protocol.xpack.ml.GetJobResponse;
import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; import org.elasticsearch.protocol.xpack.ml.OpenJobRequest;
import org.elasticsearch.protocol.xpack.ml.OpenJobResponse; import org.elasticsearch.protocol.xpack.ml.OpenJobResponse;
import org.elasticsearch.protocol.xpack.ml.PutJobRequest; import org.elasticsearch.protocol.xpack.ml.PutJobRequest;
@ -37,17 +45,29 @@ import org.elasticsearch.protocol.xpack.ml.job.config.AnalysisConfig;
import org.elasticsearch.protocol.xpack.ml.job.config.DataDescription; import org.elasticsearch.protocol.xpack.ml.job.config.DataDescription;
import org.elasticsearch.protocol.xpack.ml.job.config.Detector; import org.elasticsearch.protocol.xpack.ml.job.config.Detector;
import org.elasticsearch.protocol.xpack.ml.job.config.Job; import org.elasticsearch.protocol.xpack.ml.job.config.Job;
import org.elasticsearch.protocol.xpack.ml.job.results.Bucket;
import org.elasticsearch.protocol.xpack.ml.job.util.PageParams;
import org.junit.After;
import java.io.IOException;
import java.util.Collections; import java.util.Collections;
import java.util.Date; import java.util.Date;
import java.util.List; import java.util.List;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.hasSize;
public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
@After
public void cleanUp() throws IOException {
new MlRestTestStateCleaner(logger, client()).clearMlMetadata();
}
public void testCreateJob() throws Exception { public void testCreateJob() throws Exception {
RestHighLevelClient client = highLevelClient(); RestHighLevelClient client = highLevelClient();
@ -126,6 +146,63 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
} }
} }
public void testGetJob() throws Exception {
RestHighLevelClient client = highLevelClient();
String jobId = "get-machine-learning-job1";
Job job = MachineLearningIT.buildJob("get-machine-learning-job1");
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
Job secondJob = MachineLearningIT.buildJob("get-machine-learning-job2");
client.machineLearning().putJob(new PutJobRequest(secondJob), RequestOptions.DEFAULT);
{
//tag::x-pack-ml-get-job-request
GetJobRequest request = new GetJobRequest("get-machine-learning-job1", "get-machine-learning-job*"); //<1>
request.setAllowNoJobs(true); //<2>
//end::x-pack-ml-get-job-request
//tag::x-pack-ml-get-job-execute
GetJobResponse response = client.machineLearning().getJob(request, RequestOptions.DEFAULT);
long numberOfJobs = response.count(); //<1>
List<Job> jobs = response.jobs(); //<2>
//end::x-pack-ml-get-job-execute
assertEquals(2, response.count());
assertThat(response.jobs(), hasSize(2));
assertThat(response.jobs().stream().map(Job::getId).collect(Collectors.toList()),
containsInAnyOrder(job.getId(), secondJob.getId()));
}
{
GetJobRequest request = new GetJobRequest("get-machine-learning-job1", "get-machine-learning-job*");
// tag::x-pack-ml-get-job-listener
ActionListener<GetJobResponse> listener = new ActionListener<GetJobResponse>() {
@Override
public void onResponse(GetJobResponse response) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::x-pack-ml-get-job-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::x-pack-ml-get-job-execute-async
client.machineLearning().getJobAsync(request, RequestOptions.DEFAULT, listener); // <1>
// end::x-pack-ml-get-job-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
public void testDeleteJob() throws Exception { public void testDeleteJob() throws Exception {
RestHighLevelClient client = highLevelClient(); RestHighLevelClient client = highLevelClient();
@ -223,7 +300,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
assertTrue(latch.await(30L, TimeUnit.SECONDS)); assertTrue(latch.await(30L, TimeUnit.SECONDS));
} }
} }
public void testCloseJob() throws Exception { public void testCloseJob() throws Exception {
RestHighLevelClient client = highLevelClient(); RestHighLevelClient client = highLevelClient();
@ -264,6 +341,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
}; };
//end::x-pack-ml-close-job-listener //end::x-pack-ml-close-job-listener
CloseJobRequest closeJobRequest = new CloseJobRequest("closing-my-second-machine-learning-job"); CloseJobRequest closeJobRequest = new CloseJobRequest("closing-my-second-machine-learning-job");
// Replace the empty listener by a blocking listener in test // Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1); final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch); listener = new LatchedActionListener<>(listener, latch);
@ -275,4 +353,105 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
assertTrue(latch.await(30L, TimeUnit.SECONDS)); assertTrue(latch.await(30L, TimeUnit.SECONDS));
} }
} }
public void testGetBuckets() throws IOException, InterruptedException {
RestHighLevelClient client = highLevelClient();
String jobId = "test-get-buckets";
Job job = MachineLearningIT.buildJob(jobId);
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
// Let us index a bucket
IndexRequest indexRequest = new IndexRequest(".ml-anomalies-shared", "doc");
indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
indexRequest.source("{\"job_id\":\"test-get-buckets\", \"result_type\":\"bucket\", \"timestamp\": 1533081600000," +
"\"bucket_span\": 600,\"is_interim\": false, \"anomaly_score\": 80.0}", XContentType.JSON);
client.index(indexRequest, RequestOptions.DEFAULT);
{
// tag::x-pack-ml-get-buckets-request
GetBucketsRequest request = new GetBucketsRequest(jobId); // <1>
// end::x-pack-ml-get-buckets-request
// tag::x-pack-ml-get-buckets-timestamp
request.setTimestamp("2018-08-17T00:00:00Z"); // <1>
// end::x-pack-ml-get-buckets-timestamp
// Set timestamp to null as it is incompatible with other args
request.setTimestamp(null);
// tag::x-pack-ml-get-buckets-anomaly-score
request.setAnomalyScore(75.0); // <1>
// end::x-pack-ml-get-buckets-anomaly-score
// tag::x-pack-ml-get-buckets-desc
request.setDescending(true); // <1>
// end::x-pack-ml-get-buckets-desc
// tag::x-pack-ml-get-buckets-end
request.setEnd("2018-08-21T00:00:00Z"); // <1>
// end::x-pack-ml-get-buckets-end
// tag::x-pack-ml-get-buckets-exclude-interim
request.setExcludeInterim(true); // <1>
// end::x-pack-ml-get-buckets-exclude-interim
// tag::x-pack-ml-get-buckets-expand
request.setExpand(true); // <1>
// end::x-pack-ml-get-buckets-expand
// tag::x-pack-ml-get-buckets-page
request.setPageParams(new PageParams(100, 200)); // <1>
// end::x-pack-ml-get-buckets-page
// Set page params back to null so the response contains the bucket we indexed
request.setPageParams(null);
// tag::x-pack-ml-get-buckets-sort
request.setSort("anomaly_score"); // <1>
// end::x-pack-ml-get-buckets-sort
// tag::x-pack-ml-get-buckets-start
request.setStart("2018-08-01T00:00:00Z"); // <1>
// end::x-pack-ml-get-buckets-start
// tag::x-pack-ml-get-buckets-execute
GetBucketsResponse response = client.machineLearning().getBuckets(request, RequestOptions.DEFAULT);
// end::x-pack-ml-get-buckets-execute
// tag::x-pack-ml-get-buckets-response
long count = response.count(); // <1>
List<Bucket> buckets = response.buckets(); // <2>
// end::x-pack-ml-get-buckets-response
assertEquals(1, buckets.size());
}
{
GetBucketsRequest request = new GetBucketsRequest(jobId);
// tag::x-pack-ml-get-buckets-listener
ActionListener<GetBucketsResponse> listener =
new ActionListener<GetBucketsResponse>() {
@Override
public void onResponse(GetBucketsResponse getBucketsResponse) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::x-pack-ml-get-buckets-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::x-pack-ml-get-buckets-execute-async
client.machineLearning().getBucketsAsync(request, RequestOptions.DEFAULT, listener); // <1>
// end::x-pack-ml-get-buckets-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
} }

View File

@ -1,3 +1,5 @@
import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask
/* /*
* Licensed to Elasticsearch under one or more contributor * Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with * license agreements. See the NOTICE file distributed with
@ -16,9 +18,6 @@
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
import org.elasticsearch.gradle.precommit.PrecommitTasks
apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.build'
apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-base-publish'
apply plugin: 'nebula.maven-scm' apply plugin: 'nebula.maven-scm'
@ -53,10 +52,9 @@ dependencies {
testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}" testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}"
} }
forbiddenApisMain { tasks.withType(ForbiddenApisCliTask) {
//client does not depend on server, so only jdk and http signatures should be checked //client does not depend on server, so only jdk and http signatures should be checked
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), replaceSignatureFiles ('jdk-signatures', 'http-signatures')
PrecommitTasks.getResource('/forbidden/http-signatures.txt')]
} }
forbiddenPatterns { forbiddenPatterns {
@ -67,9 +65,6 @@ forbiddenApisTest {
//we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage //we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
bundledSignatures -= 'jdk-non-portable' bundledSignatures -= 'jdk-non-portable'
bundledSignatures += 'jdk-internal' bundledSignatures += 'jdk-internal'
//client does not depend on server, so only jdk signatures should be checked
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'),
PrecommitTasks.getResource('/forbidden/http-signatures.txt')]
} }
// JarHell is part of es server, which we don't want to pull in // JarHell is part of es server, which we don't want to pull in

View File

@ -16,9 +16,6 @@
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
import org.elasticsearch.gradle.precommit.PrecommitTasks
apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.build'
apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-base-publish'
apply plugin: 'nebula.maven-scm' apply plugin: 'nebula.maven-scm'
@ -55,7 +52,7 @@ dependencies {
forbiddenApisMain { forbiddenApisMain {
//client does not depend on server, so only jdk signatures should be checked //client does not depend on server, so only jdk signatures should be checked
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] replaceSignatureFiles 'jdk-signatures'
} }
forbiddenApisTest { forbiddenApisTest {
@ -63,7 +60,7 @@ forbiddenApisTest {
bundledSignatures -= 'jdk-non-portable' bundledSignatures -= 'jdk-non-portable'
bundledSignatures += 'jdk-internal' bundledSignatures += 'jdk-internal'
//client does not depend on server, so only jdk signatures should be checked //client does not depend on server, so only jdk signatures should be checked
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] replaceSignatureFiles 'jdk-signatures'
} }
dependencyLicenses { dependencyLicenses {

View File

@ -16,10 +16,6 @@
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
import org.elasticsearch.gradle.precommit.PrecommitTasks
import org.gradle.api.JavaVersion
apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.build'
targetCompatibility = JavaVersion.VERSION_1_7 targetCompatibility = JavaVersion.VERSION_1_7
@ -36,7 +32,7 @@ dependencies {
forbiddenApisMain { forbiddenApisMain {
//client does not depend on core, so only jdk signatures should be checked //client does not depend on core, so only jdk signatures should be checked
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] replaceSignatureFiles 'jdk-signatures'
} }
forbiddenApisTest { forbiddenApisTest {
@ -44,7 +40,7 @@ forbiddenApisTest {
bundledSignatures -= 'jdk-non-portable' bundledSignatures -= 'jdk-non-portable'
bundledSignatures += 'jdk-internal' bundledSignatures += 'jdk-internal'
//client does not depend on core, so only jdk signatures should be checked //client does not depend on core, so only jdk signatures should be checked
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] replaceSignatureFiles 'jdk-signatures'
} }
// JarHell is part of es server, which we don't want to pull in // JarHell is part of es server, which we don't want to pull in

View File

@ -16,9 +16,6 @@
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
import org.elasticsearch.gradle.precommit.PrecommitTasks
apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.build'
apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-base-publish'
apply plugin: 'nebula.maven-scm' apply plugin: 'nebula.maven-scm'
@ -47,8 +44,7 @@ dependencyLicenses {
forbiddenApisTest { forbiddenApisTest {
// we don't use the core test-framework, no lucene classes present so we don't want the es-test-signatures to // we don't use the core test-framework, no lucene classes present so we don't want the es-test-signatures to
// be pulled in // be pulled in
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), replaceSignatureFiles 'jdk-signatures', 'es-all-signatures'
PrecommitTasks.getResource('/forbidden/es-all-signatures.txt')]
} }
namingConventions { namingConventions {

View File

@ -1,11 +1,11 @@
import org.elasticsearch.gradle.precommit.PrecommitTasks
apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.build'
targetCompatibility = JavaVersion.VERSION_1_7 targetCompatibility = JavaVersion.VERSION_1_7
// java_version_checker do not depend on core so only JDK signatures should be checked // java_version_checker do not depend on core so only JDK signatures should be checked
forbiddenApisMain.signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] forbiddenApisMain {
replaceSignatureFiles 'jdk-signatures'
}
test.enabled = false test.enabled = false
namingConventions.enabled = false namingConventions.enabled = false

View File

@ -17,8 +17,9 @@
* under the License. * under the License.
*/ */
import org.elasticsearch.gradle.precommit.PrecommitTasks
import org.gradle.api.JavaVersion
import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask
apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.build'
@ -31,10 +32,9 @@ dependencies {
archivesBaseName = 'elasticsearch-launchers' archivesBaseName = 'elasticsearch-launchers'
// java_version_checker do not depend on core so only JDK signatures should be checked tasks.withType(ForbiddenApisCliTask) {
List jdkSignatures = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] replaceSignatureFiles 'jdk-signatures'
forbiddenApisMain.signaturesURLs = jdkSignatures }
forbiddenApisTest.signaturesURLs = jdkSignatures
namingConventions { namingConventions {
testClass = 'org.elasticsearch.tools.launchers.LaunchersTestCase' testClass = 'org.elasticsearch.tools.launchers.LaunchersTestCase'

View File

@ -0,0 +1,53 @@
[[java-rest-high-x-pack-graph-explore]]
=== X-Pack Graph explore API
[[java-rest-high-x-pack-graph-explore-execution]]
==== Initial request
Graph queries are executed using the `explore()` method:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/GraphDocumentationIT.java[x-pack-graph-explore-request]
--------------------------------------------------
<1> In this example we seed the exploration with a query to find messages mentioning the mysterious `projectx`
<2> What we want to discover in these messages are the ids of `participants` in the communications and the md5 hashes
of any attached files. In each case, we want to find people or files that have had at least one document connecting them
to projectx.
<3> The next "hop" in the graph exploration is to find the people who have shared several messages with the people or files
discovered in the previous hop (the projectx conspirators). The `minDocCount` control is used here to ensure the people
discovered have had at least 5 communications with projectx entities. Note we could also supply a "guiding query" here e.g. a
date range to consider only recent communications but we pass null to consider all connections.
<4> Finally we call the graph explore API with the GraphExploreRequest object.
==== Response
Graph responses consist of Vertex and Connection objects (aka "nodes" and "edges" respectively):
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/GraphDocumentationIT.java[x-pack-graph-explore-response]
--------------------------------------------------
<1> Each Vertex is a unique term (a combination of fieldname and term value). The "hopDepth" property tells us at which point in the
requested exploration this term was first discovered.
<2> Each Connection is a pair of Vertex objects and includes a docCount property telling us how many times these two
Vertex terms have been sighted together
[[java-rest-high-x-pack-graph-expand-execution]]
==== Expanding a client-side Graph
Typically once an application has rendered an initial GraphExploreResponse as a collection of vertices and connecting lines (graph visualization toolkits such as D3, sigma.js or Keylines help here) the next step a user may want to do is "expand". This involves finding new vertices that might be connected to the existing ones currently shown.
To do this we use the same `explore` method but our request contains details about which vertices to expand from and which vertices to avoid re-discovering.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/GraphDocumentationIT.java[x-pack-graph-explore-expand]
--------------------------------------------------
<1> Unlike the initial request we do not need to pass a starting query
<2> In the first hop which represents our "from" vertices we explicitly list the terms that we already have on-screen and want to expand by using the `addInclude` filter.
We can supply a boost for those terms that are considered more important to follow than others but here we select a common value of 1 for all.
<3> When defining the second hop which represents the "to" vertices we hope to discover we explicitly list the terms that we already know about using the `addExclude` filter

View File

@ -0,0 +1,125 @@
[[java-rest-high-x-pack-ml-get-buckets]]
=== Get Buckets API
The Get Buckets API retrieves one or more bucket results.
It accepts a `GetBucketsRequest` object and responds
with a `GetBucketsResponse` object.
[[java-rest-high-x-pack-ml-get-buckets-request]]
==== Get Buckets Request
A `GetBucketsRequest` object gets created with an existing non-null `jobId`.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-request]
--------------------------------------------------
<1> Constructing a new request referencing an existing `jobId`
==== Optional Arguments
The following arguments are optional:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-timestamp]
--------------------------------------------------
<1> The timestamp of the bucket to get. Otherwise it will return all buckets.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-anomaly-score]
--------------------------------------------------
<1> Buckets with anomaly scores greater or equal than this value will be returned.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-desc]
--------------------------------------------------
<1> If `true`, the buckets are sorted in descending order. Defaults to `false`.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-end]
--------------------------------------------------
<1> Buckets with timestamps earlier than this time will be returned.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-exclude-interim]
--------------------------------------------------
<1> If `true`, interim results will be excluded. Defaults to `false`.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-expand]
--------------------------------------------------
<1> If `true`, buckets will include their anomaly records. Defaults to `false`.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-page]
--------------------------------------------------
<1> The page parameters `from` and `size`. `from` specifies the number of buckets to skip.
`size` specifies the maximum number of buckets to get. Defaults to `0` and `100` respectively.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-sort]
--------------------------------------------------
<1> The field to sort buckets on. Defaults to `timestamp`.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-end]
--------------------------------------------------
<1> Buckets with timestamps on or after this time will be returned.
[[java-rest-high-x-pack-ml-get-buckets-execution]]
==== Execution
The request can be executed through the `MachineLearningClient` contained
in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-execute]
--------------------------------------------------
[[java-rest-high-x-pack-ml-get-buckets-execution-async]]
==== Asynchronous Execution
The request can also be executed asynchronously:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-execute-async]
--------------------------------------------------
<1> The `GetBucketsRequest` to execute and the `ActionListener` to use when
the execution completes
The asynchronous method does not block and returns immediately. Once it is
completed the `ActionListener` is called back with the `onResponse` method
if the execution is successful or the `onFailure` method if the execution
failed.
A typical listener for `GetBucketsResponse` looks like:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-listener]
--------------------------------------------------
<1> `onResponse` is called back when the action is completed successfully
<2> `onFailure` is called back when some unexpected error occurs
[[java-rest-high-snapshot-ml-get-buckets-response]]
==== Get Buckets Response
The returned `GetBucketsResponse` contains the requested buckets:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-response]
--------------------------------------------------
<1> The count of buckets that were matched
<2> The buckets retrieved

View File

@ -0,0 +1,57 @@
[[java-rest-high-x-pack-ml-get-job]]
=== Get Job API
The Get Job API provides the ability to get {ml} jobs in the cluster.
It accepts a `GetJobRequest` object and responds
with a `GetJobResponse` object.
[[java-rest-high-x-pack-ml-get-job-request]]
==== Get Job Request
A `GetJobRequest` object gets can have any number of `jobId` or `groupName`
entries. However, they all must be non-null. An empty list is the same as
requesting for all jobs.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-request]
--------------------------------------------------
<1> Constructing a new request referencing existing `jobIds`, can contain wildcards
<2> Whether to ignore if a wildcard expression matches no jobs.
(This includes `_all` string or when no jobs have been specified)
[[java-rest-high-x-pack-ml-get-job-execution]]
==== Execution
The request can be executed through the `MachineLearningClient` contained
in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-execute]
--------------------------------------------------
<1> `getCount()` from the `GetJobResponse` indicates the number of jobs found
<2> `getJobs()` is the collection of {ml} `Job` objects found
[[java-rest-high-x-pack-ml-get-job-execution-async]]
==== Asynchronous Execution
The request can also be executed asynchronously:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-execute-async]
--------------------------------------------------
<1> The `GetJobRequest` to execute and the `ActionListener` to use when
the execution completes
The method does not block and returns immediately. The passed `ActionListener` is used
to notify the caller of completion. A typical `ActionListener` for `GetJobResponse` may
look like
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-listener]
--------------------------------------------------
<1> `onResponse` is called back when the action is completed successfully
<2> `onFailure` is called back when some unexpected error occurs

View File

@ -205,14 +205,18 @@ include::licensing/delete-license.asciidoc[]
The Java High Level REST Client supports the following Machine Learning APIs: The Java High Level REST Client supports the following Machine Learning APIs:
* <<java-rest-high-x-pack-ml-put-job>> * <<java-rest-high-x-pack-ml-put-job>>
* <<java-rest-high-x-pack-ml-get-job>>
* <<java-rest-high-x-pack-ml-delete-job>> * <<java-rest-high-x-pack-ml-delete-job>>
* <<java-rest-high-x-pack-ml-open-job>> * <<java-rest-high-x-pack-ml-open-job>>
* <<java-rest-high-x-pack-ml-close-job>> * <<java-rest-high-x-pack-ml-close-job>>
* <<java-rest-high-x-pack-ml-get-buckets>>
include::ml/put-job.asciidoc[] include::ml/put-job.asciidoc[]
include::ml/get-job.asciidoc[]
include::ml/delete-job.asciidoc[] include::ml/delete-job.asciidoc[]
include::ml/open-job.asciidoc[] include::ml/open-job.asciidoc[]
include::ml/close-job.asciidoc[] include::ml/close-job.asciidoc[]
include::ml/get-buckets.asciidoc[]
== Migration APIs == Migration APIs
@ -231,3 +235,11 @@ The Java High Level REST Client supports the following Watcher APIs:
include::watcher/put-watch.asciidoc[] include::watcher/put-watch.asciidoc[]
include::watcher/delete-watch.asciidoc[] include::watcher/delete-watch.asciidoc[]
== Graph APIs
The Java High Level REST Client supports the following Graph APIs:
* <<java-rest-high-x-pack-graph-explore>>
include::graph/explore.asciidoc[]

View File

@ -26,7 +26,7 @@ The only variable that is available is `params`, which can be used to access use
The result of the script is always converted to a string. The result of the script is always converted to a string.
If no context is specified then this context is used by default. If no context is specified then this context is used by default.
====== Example *Example*
Request: Request:
@ -67,7 +67,7 @@ The following parameters may be specified in `context_setup` for a filter contex
document:: Contains the document that will be temporarily indexed in-memory and is accessible from the script. document:: Contains the document that will be temporarily indexed in-memory and is accessible from the script.
index:: The name of an index containing a mapping that is compatable with the document being indexed. index:: The name of an index containing a mapping that is compatable with the document being indexed.
====== Example *Example*
[source,js] [source,js]
---------------------------------------------------------------- ----------------------------------------------------------------
@ -125,7 +125,7 @@ document:: Contains the document that will be temporarily indexed in-memory and
index:: The name of an index containing a mapping that is compatable with the document being indexed. index:: The name of an index containing a mapping that is compatable with the document being indexed.
query:: If `_score` is used in the script then a query can specified that will be used to compute a score. query:: If `_score` is used in the script then a query can specified that will be used to compute a score.
====== Example *Example*
[source,js] [source,js]
---------------------------------------------------------------- ----------------------------------------------------------------

View File

@ -144,7 +144,7 @@ Possible response:
}, },
"hits": { "hits": {
"total": 3, "total": 3,
"max_score": 0.0, "max_score": null,
"hits": [] "hits": []
}, },
"aggregations": { "aggregations": {

View File

@ -25,7 +25,7 @@ the configured remote cluster alias.
`num_nodes_connected`:: `num_nodes_connected`::
The number of connected nodes in the remote cluster. The number of connected nodes in the remote cluster.
`max_connection_per_cluster`:: `max_connections_per_cluster`::
The maximum number of connections maintained for the remote cluster. The maximum number of connections maintained for the remote cluster.
`initial_connect_timeout`:: `initial_connect_timeout`::

View File

@ -1141,7 +1141,7 @@ And the response (partially shown):
}, },
"hits" : { "hits" : {
"total" : 1000, "total" : 1000,
"max_score" : 0.0, "max_score" : null,
"hits" : [ ] "hits" : [ ]
}, },
"aggregations" : { "aggregations" : {

View File

@ -67,6 +67,13 @@ process equal to the size of the file being mapped. Before using this
class, be sure you have allowed plenty of class, be sure you have allowed plenty of
<<vm-max-map-count,virtual address space>>. <<vm-max-map-count,virtual address space>>.
[[allow-mmapfs]]
You can restrict the use of the `mmapfs` store type via the setting
`node.store.allow_mmapfs`. This is a boolean setting indicating whether or not
`mmapfs` is allowed. The default is to allow `mmapfs`. This setting is useful,
for example, if you are in an environment where you can not control the ability
to create a lot of memory maps so you need disable the ability to use `mmapfs`.
=== Pre-loading data into the file system cache === Pre-loading data into the file system cache
NOTE: This is an expert setting, the details of which may change in the future. NOTE: This is an expert setting, the details of which may change in the future.

View File

@ -2,6 +2,9 @@
=== `ignore_above` === `ignore_above`
Strings longer than the `ignore_above` setting will not be indexed or stored. Strings longer than the `ignore_above` setting will not be indexed or stored.
For arrays of strings, `ignore_above` will be applied for each array element separately and string elements longer than `ignore_above` will not be indexed or stored.
NOTE: All strings/array elements will still be present in the `_source` field, if the latter is enabled which is the default in Elasticsearch.
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------

View File

@ -151,7 +151,7 @@ returns
}, },
"hits": { "hits": {
"total": 3, "total": 3,
"max_score": 0.0, "max_score": null,
"hits": [] "hits": []
}, },
"aggregations": { "aggregations": {

View File

@ -100,3 +100,8 @@ and the context is only accepted if `path` points to a field with `geo_point` ty
`max_concurrent_shard_requests` used to limit the total number of concurrent shard `max_concurrent_shard_requests` used to limit the total number of concurrent shard
requests a single high level search request can execute. In 7.0 this changed to be the requests a single high level search request can execute. In 7.0 this changed to be the
max number of concurrent shard requests per node. The default is now `5`. max number of concurrent shard requests per node. The default is now `5`.
==== `max_score` set to `null` when scores are not tracked
`max_score` used to be set to `0` whenever scores are not tracked. `null` is now used
instead which is a more appropriate value for a scenario where scores are not available.

View File

@ -86,6 +86,16 @@ The msearch's `max_concurrent_searches` request parameter can be used to control
the maximum number of concurrent searches the multi search api will execute. the maximum number of concurrent searches the multi search api will execute.
This default is based on the number of data nodes and the default search thread pool size. This default is based on the number of data nodes and the default search thread pool size.
The request parameter `max_concurrent_shard_requests` can be used to control the
maximum number of concurrent shard requests the each sub search request will execute.
This parameter should be used to protect a single request from overloading a cluster
(e.g., a default request will hit all indices in a cluster which could cause shard request rejections
if the number of shards per node is high). This default is based on the number of
data nodes in the cluster but at most `256`.In certain scenarios parallelism isn't achieved through
concurrent request such that this protection will result in poor performance. For
instance in an environment where only a very low number of concurrent search requests are expected
it might help to increase this value to a higher number.
[float] [float]
[[msearch-security]] [[msearch-security]]
=== Security === Security

View File

@ -161,7 +161,7 @@ be set to `true` in the response.
}, },
"hits": { "hits": {
"total": 1, "total": 1,
"max_score": 0.0, "max_score": null,
"hits": [] "hits": []
} }
} }

View File

@ -30,6 +30,27 @@ GET /_search
Doc value fields can work on fields that are not stored. Doc value fields can work on fields that are not stored.
`*` can be used as a wild card, for example:
[source,js]
--------------------------------------------------
GET /_search
{
"query" : {
"match_all": {}
},
"docvalue_fields" : [
{
"field": "*field", <1>
"format": "use_field_mapping" <2>
}
]
}
--------------------------------------------------
// CONSOLE
<1> Match all fields ending with `field`
<2> Format to be applied to all matching fields.
Note that if the fields parameter specifies fields without docvalues it will try to load the value from the fielddata cache Note that if the fields parameter specifies fields without docvalues it will try to load the value from the fielddata cache
causing the terms for that field to be loaded to memory (cached), which will result in more memory consumption. causing the terms for that field to be loaded to memory (cached), which will result in more memory consumption.

View File

@ -258,7 +258,7 @@ Which should look like:
}, },
"hits": { "hits": {
"total" : 0, "total" : 0,
"max_score" : 0.0, "max_score" : null,
"hits" : [] "hits" : []
}, },
"suggest": { "suggest": {

View File

@ -46,6 +46,9 @@ settings for the ad1 realm: `xpack.security.authc.realms.ad1.*`. The API already
omits all `ssl` settings, `bind_dn`, and `bind_password` due to the omits all `ssl` settings, `bind_dn`, and `bind_password` due to the
sensitive nature of the information. sensitive nature of the information.
`xpack.security.fips_mode.enabled`::
Enables fips mode of operation. Set this to `true` if you run this {es} instance in a FIPS 140-2 enabled JVM. For more information, see <<fips-140-compliance>>. Defaults to `false`.
[float] [float]
[[password-security-settings]] [[password-security-settings]]
==== Default password security settings ==== Default password security settings
@ -858,6 +861,15 @@ The maximum amount of skew that can be tolerated between the IdP's clock and the
{es} node's clock. {es} node's clock.
Defaults to `3m` (3 minutes). Defaults to `3m` (3 minutes).
`req_authn_context_class_ref`::
A comma separated list of Authentication Context Class Reference values to be
included in the Requested Authentication Context when requesting the IdP to
authenticate the current user. The Authentication Context of the corresponding
authentication response should contain at least one of the requested values.
+
For more information, see
{stack-ov}/saml-guide-authentication.html#req-authn-context[Requesting specific authentication methods].
[float] [float]
[[ref-saml-signing-settings]] [[ref-saml-signing-settings]]
===== SAML realm signing settings ===== SAML realm signing settings
@ -1124,7 +1136,12 @@ settings such as those for HTTP or Transport.
`xpack.ssl.supported_protocols`:: `xpack.ssl.supported_protocols`::
Supported protocols with versions. Valid protocols: `SSLv2Hello`, Supported protocols with versions. Valid protocols: `SSLv2Hello`,
`SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`. Defaults to `TLSv1.2`, `TLSv1.1`, `SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`. Defaults to `TLSv1.2`, `TLSv1.1`,
`TLSv1`. `TLSv1`.
+
--
NOTE: If `xpack.security.fips_mode.enabled` is `true`, you cannot use `SSLv2Hello`
or `SSLv3`. See <<fips-140-compliance>>.
--
`xpack.ssl.client_authentication`:: `xpack.ssl.client_authentication`::
Controls the server's behavior in regard to requesting a certificate Controls the server's behavior in regard to requesting a certificate
@ -1223,6 +1240,9 @@ Password to the truststore.
`xpack.ssl.truststore.secure_password` (<<secure-settings,Secure>>):: `xpack.ssl.truststore.secure_password` (<<secure-settings,Secure>>)::
Password to the truststore. Password to the truststore.
WARNING: If `xpack.security.fips_mode.enabled` is `true`, you cannot use Java
keystore files. See <<fips-140-compliance>>.
[float] [float]
===== PKCS#12 files ===== PKCS#12 files
@ -1261,6 +1281,9 @@ Password to the truststore.
`xpack.ssl.truststore.secure_password` (<<secure-settings,Secure>>):: `xpack.ssl.truststore.secure_password` (<<secure-settings,Secure>>)::
Password to the truststore. Password to the truststore.
WARNING: If `xpack.security.fips_mode.enabled` is `true`, you cannot use PKCS#12
keystore files. See <<fips-140-compliance>>.
[[pkcs12-truststore-note]] [[pkcs12-truststore-note]]
[NOTE] [NOTE]
Storing trusted certificates in a PKCS#12 file, although supported, is Storing trusted certificates in a PKCS#12 file, although supported, is

View File

@ -155,6 +155,11 @@ the kernel allows a process to have at least 262,144 memory-mapped areas
and is enforced on Linux only. To pass the maximum map count check, you and is enforced on Linux only. To pass the maximum map count check, you
must configure `vm.max_map_count` via `sysctl` to be at least `262144`. must configure `vm.max_map_count` via `sysctl` to be at least `262144`.
Alternatively, the maximum map count check is only needed if you are using
`mmapfs` as the <<index-modules-store,store type>> for your indices. If you
<<allow-mmapfs,do not allow>> the use of `mmapfs` then this bootstrap check will
not be enforced.
=== Client JVM check === Client JVM check
There are two different JVMs provided by OpenJDK-derived JVMs: the There are two different JVMs provided by OpenJDK-derived JVMs: the

View File

@ -41,6 +41,8 @@ Elasticsearch website or from our RPM repository.
`msi`:: `msi`::
beta[]
+
The `msi` package is suitable for installation on Windows 64-bit systems with at least The `msi` package is suitable for installation on Windows 64-bit systems with at least
.NET 4.5 framework installed, and is the easiest choice for getting started with .NET 4.5 framework installed, and is the easiest choice for getting started with
Elasticsearch on Windows. MSIs may be downloaded from the Elasticsearch website. Elasticsearch on Windows. MSIs may be downloaded from the Elasticsearch website.

View File

@ -91,9 +91,6 @@ using the `bin/elasticsearch-keystore add` command, call:
[source,js] [source,js]
---- ----
POST _nodes/reload_secure_settings POST _nodes/reload_secure_settings
{
"secure_settings_password": ""
}
---- ----
// CONSOLE // CONSOLE
This API will decrypt and re-read the entire keystore, on every cluster node, This API will decrypt and re-read the entire keystore, on every cluster node,

View File

@ -16,9 +16,6 @@
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
import org.elasticsearch.gradle.precommit.PrecommitTasks
apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.build'
apply plugin: 'nebula.optional-base' apply plugin: 'nebula.optional-base'
apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-base-publish'
@ -34,5 +31,5 @@ test.enabled = false
jarHell.enabled = false jarHell.enabled = false
forbiddenApisMain { forbiddenApisMain {
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] replaceSignatureFiles 'jdk-signatures'
} }

View File

@ -1,5 +1,3 @@
import org.elasticsearch.gradle.precommit.PrecommitTasks
/* /*
* Licensed to Elasticsearch under one or more contributor * Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with * license agreements. See the NOTICE file distributed with
@ -91,7 +89,7 @@ dependencies {
forbiddenApisMain { forbiddenApisMain {
// :libs:core does not depend on server // :libs:core does not depend on server
// TODO: Need to decide how we want to handle for forbidden signatures with the changes to server // TODO: Need to decide how we want to handle for forbidden signatures with the changes to server
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] replaceSignatureFiles 'jdk-signatures'
} }
if (isEclipse) { if (isEclipse) {

View File

@ -1,5 +1,3 @@
import org.elasticsearch.gradle.precommit.PrecommitTasks
/* /*
* Licensed to Elasticsearch under one or more contributor * Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with * license agreements. See the NOTICE file distributed with
@ -33,7 +31,7 @@ dependencies {
} }
forbiddenApisMain { forbiddenApisMain {
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] replaceSignatureFiles 'jdk-signatures'
} }
if (isEclipse) { if (isEclipse) {

View File

@ -1,5 +1,3 @@
import org.elasticsearch.gradle.precommit.PrecommitTasks
/* /*
* Licensed to Elasticsearch under one or more contributor * Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with * license agreements. See the NOTICE file distributed with
@ -34,7 +32,7 @@ dependencies {
} }
forbiddenApisMain { forbiddenApisMain {
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] replaceSignatureFiles 'jdk-signatures'
} }
if (isEclipse) { if (isEclipse) {

View File

@ -16,9 +16,6 @@
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
import org.elasticsearch.gradle.precommit.PrecommitTasks
apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-base-publish'
apply plugin: 'nebula.maven-scm' apply plugin: 'nebula.maven-scm'
@ -62,5 +59,5 @@ if (isEclipse) {
forbiddenApisMain { forbiddenApisMain {
// nio does not depend on core, so only jdk signatures should be checked // nio does not depend on core, so only jdk signatures should be checked
// es-all is not checked as we connect and accept sockets // es-all is not checked as we connect and accept sockets
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] replaceSignatureFiles 'jdk-signatures'
} }

View File

@ -16,9 +16,6 @@
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
import org.elasticsearch.gradle.precommit.PrecommitTasks
apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-base-publish'
apply plugin: 'nebula.maven-scm' apply plugin: 'nebula.maven-scm'
@ -47,7 +44,7 @@ dependencies {
} }
forbiddenApisMain { forbiddenApisMain {
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] replaceSignatureFiles 'jdk-signatures'
} }
if (isEclipse) { if (isEclipse) {

View File

@ -1,5 +1,3 @@
import org.elasticsearch.gradle.precommit.PrecommitTasks
/* /*
* Licensed to Elasticsearch under one or more contributor * Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with * license agreements. See the NOTICE file distributed with
@ -57,7 +55,7 @@ dependencies {
forbiddenApisMain { forbiddenApisMain {
// x-content does not depend on server // x-content does not depend on server
// TODO: Need to decide how we want to handle for forbidden signatures with the changes to core // TODO: Need to decide how we want to handle for forbidden signatures with the changes to core
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] replaceSignatureFiles 'jdk-signatures'
} }
if (isEclipse) { if (isEclipse) {

View File

@ -131,7 +131,7 @@ class ParentChildInnerHitContextBuilder extends InnerHitContextBuilder {
for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) { for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) {
intersect(weight, innerHitQueryWeight, totalHitCountCollector, ctx); intersect(weight, innerHitQueryWeight, totalHitCountCollector, ctx);
} }
result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0); result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, Float.NaN);
} else { } else {
int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc()); int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc());
TopDocsCollector<?> topDocsCollector; TopDocsCollector<?> topDocsCollector;

View File

@ -21,11 +21,11 @@ package org.elasticsearch.http.netty4;
import io.netty.channel.Channel; import io.netty.channel.Channel;
import io.netty.channel.ChannelPromise; import io.netty.channel.ChannelPromise;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.common.concurrent.CompletableContext; import org.elasticsearch.common.concurrent.CompletableContext;
import org.elasticsearch.http.HttpChannel; import org.elasticsearch.http.HttpChannel;
import org.elasticsearch.http.HttpResponse; import org.elasticsearch.http.HttpResponse;
import org.elasticsearch.transport.netty4.Netty4Utils;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
@ -42,7 +42,7 @@ public class Netty4HttpChannel implements HttpChannel {
} else { } else {
Throwable cause = f.cause(); Throwable cause = f.cause();
if (cause instanceof Error) { if (cause instanceof Error) {
Netty4Utils.maybeDie(cause); ExceptionsHelper.maybeDieOnAnotherThread(cause);
closeContext.completeExceptionally(new Exception(cause)); closeContext.completeExceptionally(new Exception(cause));
} else { } else {
closeContext.completeExceptionally((Exception) cause); closeContext.completeExceptionally((Exception) cause);
@ -59,7 +59,7 @@ public class Netty4HttpChannel implements HttpChannel {
listener.onResponse(null); listener.onResponse(null);
} else { } else {
final Throwable cause = f.cause(); final Throwable cause = f.cause();
Netty4Utils.maybeDie(cause); ExceptionsHelper.maybeDieOnAnotherThread(cause);
if (cause instanceof Error) { if (cause instanceof Error) {
listener.onFailure(new Exception(cause)); listener.onFailure(new Exception(cause));
} else { } else {

View File

@ -27,7 +27,6 @@ import io.netty.handler.codec.http.DefaultFullHttpRequest;
import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpRequest;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.http.HttpPipelinedRequest; import org.elasticsearch.http.HttpPipelinedRequest;
import org.elasticsearch.transport.netty4.Netty4Utils;
@ChannelHandler.Sharable @ChannelHandler.Sharable
class Netty4HttpRequestHandler extends SimpleChannelInboundHandler<HttpPipelinedRequest<FullHttpRequest>> { class Netty4HttpRequestHandler extends SimpleChannelInboundHandler<HttpPipelinedRequest<FullHttpRequest>> {
@ -58,7 +57,7 @@ class Netty4HttpRequestHandler extends SimpleChannelInboundHandler<HttpPipelined
if (request.decoderResult().isFailure()) { if (request.decoderResult().isFailure()) {
Throwable cause = request.decoderResult().cause(); Throwable cause = request.decoderResult().cause();
if (cause instanceof Error) { if (cause instanceof Error) {
ExceptionsHelper.dieOnError(cause); ExceptionsHelper.maybeDieOnAnotherThread(cause);
serverTransport.incomingRequestError(httpRequest, channel, new Exception(cause)); serverTransport.incomingRequestError(httpRequest, channel, new Exception(cause));
} else { } else {
serverTransport.incomingRequestError(httpRequest, channel, (Exception) cause); serverTransport.incomingRequestError(httpRequest, channel, (Exception) cause);
@ -74,7 +73,7 @@ class Netty4HttpRequestHandler extends SimpleChannelInboundHandler<HttpPipelined
@Override @Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
Netty4Utils.maybeDie(cause); ExceptionsHelper.maybeDieOnAnotherThread(cause);
Netty4HttpChannel channel = ctx.channel().attr(Netty4HttpServerTransport.HTTP_CHANNEL_KEY).get(); Netty4HttpChannel channel = ctx.channel().attr(Netty4HttpServerTransport.HTTP_CHANNEL_KEY).get();
if (cause instanceof Error) { if (cause instanceof Error) {
serverTransport.onException(channel, new Exception(cause)); serverTransport.onException(channel, new Exception(cause));

View File

@ -20,10 +20,10 @@
package org.elasticsearch.http.netty4; package org.elasticsearch.http.netty4;
import io.netty.channel.Channel; import io.netty.channel.Channel;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.common.concurrent.CompletableContext; import org.elasticsearch.common.concurrent.CompletableContext;
import org.elasticsearch.http.HttpServerChannel; import org.elasticsearch.http.HttpServerChannel;
import org.elasticsearch.transport.netty4.Netty4Utils;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
@ -40,7 +40,7 @@ public class Netty4HttpServerChannel implements HttpServerChannel {
} else { } else {
Throwable cause = f.cause(); Throwable cause = f.cause();
if (cause instanceof Error) { if (cause instanceof Error) {
Netty4Utils.maybeDie(cause); ExceptionsHelper.maybeDieOnAnotherThread(cause);
closeContext.completeExceptionally(new Exception(cause)); closeContext.completeExceptionally(new Exception(cause));
} else { } else {
closeContext.completeExceptionally((Exception) cause); closeContext.completeExceptionally((Exception) cause);

View File

@ -41,6 +41,7 @@ import io.netty.handler.codec.http.HttpResponseEncoder;
import io.netty.handler.timeout.ReadTimeoutException; import io.netty.handler.timeout.ReadTimeoutException;
import io.netty.handler.timeout.ReadTimeoutHandler; import io.netty.handler.timeout.ReadTimeoutHandler;
import io.netty.util.AttributeKey; import io.netty.util.AttributeKey;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.network.CloseableChannel;
import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkService;
@ -338,7 +339,7 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
@Override @Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
Netty4Utils.maybeDie(cause); ExceptionsHelper.maybeDieOnAnotherThread(cause);
super.exceptionCaught(ctx, cause); super.exceptionCaught(ctx, cause);
} }
} }
@ -354,7 +355,7 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
@Override @Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
Netty4Utils.maybeDie(cause); ExceptionsHelper.maybeDieOnAnotherThread(cause);
Netty4HttpServerChannel httpServerChannel = ctx.channel().attr(HTTP_SERVER_CHANNEL_KEY).get(); Netty4HttpServerChannel httpServerChannel = ctx.channel().attr(HTTP_SERVER_CHANNEL_KEY).get();
if (cause instanceof Error) { if (cause instanceof Error) {
transport.onServerException(httpServerChannel, new Exception(cause)); transport.onServerException(httpServerChannel, new Exception(cause));

View File

@ -68,7 +68,7 @@ final class Netty4MessageChannelHandler extends ChannelDuplexHandler {
@Override @Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
Netty4Utils.maybeDie(cause); ExceptionsHelper.maybeDieOnAnotherThread(cause);
final Throwable unwrapped = ExceptionsHelper.unwrap(cause, ElasticsearchException.class); final Throwable unwrapped = ExceptionsHelper.unwrap(cause, ElasticsearchException.class);
final Throwable newCause = unwrapped != null ? unwrapped : cause; final Throwable newCause = unwrapped != null ? unwrapped : cause;
Netty4TcpChannel tcpChannel = ctx.channel().attr(Netty4Transport.CHANNEL_KEY).get(); Netty4TcpChannel tcpChannel = ctx.channel().attr(Netty4Transport.CHANNEL_KEY).get();

View File

@ -22,6 +22,7 @@ package org.elasticsearch.transport.netty4;
import io.netty.channel.Channel; import io.netty.channel.Channel;
import io.netty.channel.ChannelOption; import io.netty.channel.ChannelOption;
import io.netty.channel.ChannelPromise; import io.netty.channel.ChannelPromise;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.concurrent.CompletableContext; import org.elasticsearch.common.concurrent.CompletableContext;
@ -45,7 +46,7 @@ public class Netty4TcpChannel implements TcpChannel {
} else { } else {
Throwable cause = f.cause(); Throwable cause = f.cause();
if (cause instanceof Error) { if (cause instanceof Error) {
Netty4Utils.maybeDie(cause); ExceptionsHelper.maybeDieOnAnotherThread(cause);
closeContext.completeExceptionally(new Exception(cause)); closeContext.completeExceptionally(new Exception(cause));
} else { } else {
closeContext.completeExceptionally((Exception) cause); closeContext.completeExceptionally((Exception) cause);
@ -97,7 +98,7 @@ public class Netty4TcpChannel implements TcpChannel {
listener.onResponse(null); listener.onResponse(null);
} else { } else {
final Throwable cause = f.cause(); final Throwable cause = f.cause();
Netty4Utils.maybeDie(cause); ExceptionsHelper.maybeDieOnAnotherThread(cause);
if (cause instanceof Error) { if (cause instanceof Error) {
listener.onFailure(new Exception(cause)); listener.onFailure(new Exception(cause));
} else { } else {

View File

@ -20,6 +20,7 @@
package org.elasticsearch.transport.netty4; package org.elasticsearch.transport.netty4;
import io.netty.channel.Channel; import io.netty.channel.Channel;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.common.concurrent.CompletableContext; import org.elasticsearch.common.concurrent.CompletableContext;
import org.elasticsearch.transport.TcpServerChannel; import org.elasticsearch.transport.TcpServerChannel;
@ -41,7 +42,7 @@ public class Netty4TcpServerChannel implements TcpServerChannel {
} else { } else {
Throwable cause = f.cause(); Throwable cause = f.cause();
if (cause instanceof Error) { if (cause instanceof Error) {
Netty4Utils.maybeDie(cause); ExceptionsHelper.maybeDieOnAnotherThread(cause);
closeContext.completeExceptionally(new Exception(cause)); closeContext.completeExceptionally(new Exception(cause));
} else { } else {
closeContext.completeExceptionally((Exception) cause); closeContext.completeExceptionally((Exception) cause);

View File

@ -38,6 +38,7 @@ import io.netty.util.AttributeKey;
import io.netty.util.concurrent.Future; import io.netty.util.concurrent.Future;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier; import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.SuppressForbidden;
@ -228,7 +229,7 @@ public class Netty4Transport extends TcpTransport {
ChannelFuture channelFuture = bootstrap.connect(address); ChannelFuture channelFuture = bootstrap.connect(address);
Channel channel = channelFuture.channel(); Channel channel = channelFuture.channel();
if (channel == null) { if (channel == null) {
Netty4Utils.maybeDie(channelFuture.cause()); ExceptionsHelper.maybeDieOnAnotherThread(channelFuture.cause());
throw new IOException(channelFuture.cause()); throw new IOException(channelFuture.cause());
} }
addClosedExceptionLogger(channel); addClosedExceptionLogger(channel);
@ -242,7 +243,7 @@ public class Netty4Transport extends TcpTransport {
} else { } else {
Throwable cause = f.cause(); Throwable cause = f.cause();
if (cause instanceof Error) { if (cause instanceof Error) {
Netty4Utils.maybeDie(cause); ExceptionsHelper.maybeDieOnAnotherThread(cause);
listener.onFailure(new Exception(cause)); listener.onFailure(new Exception(cause));
} else { } else {
listener.onFailure((Exception) cause); listener.onFailure((Exception) cause);
@ -307,7 +308,7 @@ public class Netty4Transport extends TcpTransport {
@Override @Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
Netty4Utils.maybeDie(cause); ExceptionsHelper.maybeDieOnAnotherThread(cause);
super.exceptionCaught(ctx, cause); super.exceptionCaught(ctx, cause);
} }
} }
@ -333,7 +334,7 @@ public class Netty4Transport extends TcpTransport {
@Override @Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
Netty4Utils.maybeDie(cause); ExceptionsHelper.maybeDieOnAnotherThread(cause);
super.exceptionCaught(ctx, cause); super.exceptionCaught(ctx, cause);
} }
} }
@ -351,7 +352,7 @@ public class Netty4Transport extends TcpTransport {
@Override @Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
Netty4Utils.maybeDie(cause); ExceptionsHelper.maybeDieOnAnotherThread(cause);
Netty4TcpServerChannel serverChannel = ctx.channel().attr(SERVER_CHANNEL_KEY).get(); Netty4TcpServerChannel serverChannel = ctx.channel().attr(SERVER_CHANNEL_KEY).get();
if (cause instanceof Error) { if (cause instanceof Error) {
onServerException(serverChannel, new Exception(cause)); onServerException(serverChannel, new Exception(cause));

View File

@ -27,20 +27,16 @@ import io.netty.channel.ChannelFuture;
import io.netty.util.NettyRuntime; import io.netty.util.NettyRuntime;
import io.netty.util.internal.logging.InternalLogger; import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory; import io.netty.util.internal.logging.InternalLoggerFactory;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefIterator; import org.apache.lucene.util.BytesRefIterator;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.logging.ESLoggerFactory;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.List; import java.util.List;
import java.util.Locale; import java.util.Locale;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
public class Netty4Utils { public class Netty4Utils {
@ -161,34 +157,4 @@ public class Netty4Utils {
} }
} }
/**
* If the specified cause is an unrecoverable error, this method will rethrow the cause on a separate thread so that it can not be
* caught and bubbles up to the uncaught exception handler.
*
* @param cause the throwable to test
*/
public static void maybeDie(final Throwable cause) {
final Logger logger = ESLoggerFactory.getLogger(Netty4Utils.class);
final Optional<Error> maybeError = ExceptionsHelper.maybeError(cause, logger);
if (maybeError.isPresent()) {
/*
* Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, Netty wraps too many
* invocations of user-code in try/catch blocks that swallow all throwables. This means that a rethrow here will not bubble up
* to where we want it to. So, we fork a thread and throw the exception from there where Netty can not get to it. We do not wrap
* the exception so as to not lose the original cause during exit.
*/
try {
// try to log the current stack trace
final String formatted = ExceptionsHelper.formatStackTrace(Thread.currentThread().getStackTrace());
logger.error("fatal error on the network layer\n{}", formatted);
} finally {
new Thread(
() -> {
throw maybeError.get();
})
.start();
}
}
}
} }

View File

@ -1,3 +1,5 @@
import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask
/* /*
* Licensed to Elasticsearch under one or more contributor * Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with * license agreements. See the NOTICE file distributed with
@ -22,7 +24,7 @@ esplugin {
classname 'org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin' classname 'org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin'
} }
forbiddenApis { tasks.withType(ForbiddenApisCliTask) {
signatures += [ signatures += [
"com.ibm.icu.text.Collator#getInstance() @ Don't use default locale, use getInstance(ULocale) instead" "com.ibm.icu.text.Collator#getInstance() @ Don't use default locale, use getInstance(ULocale) instead"
] ]

View File

@ -139,7 +139,7 @@ public class HttpReadWriteHandler implements ReadWriteHandler {
if (request.decoderResult().isFailure()) { if (request.decoderResult().isFailure()) {
Throwable cause = request.decoderResult().cause(); Throwable cause = request.decoderResult().cause();
if (cause instanceof Error) { if (cause instanceof Error) {
ExceptionsHelper.dieOnError(cause); ExceptionsHelper.maybeDieOnAnotherThread(cause);
transport.incomingRequestError(httpRequest, nioHttpChannel, new Exception(cause)); transport.incomingRequestError(httpRequest, nioHttpChannel, new Exception(cause));
} else { } else {
transport.incomingRequestError(httpRequest, nioHttpChannel, (Exception) cause); transport.incomingRequestError(httpRequest, nioHttpChannel, (Exception) cause);

View File

@ -73,7 +73,7 @@ public class NettyAdaptor implements AutoCloseable {
closeFuture.await(); closeFuture.await();
if (closeFuture.isSuccess() == false) { if (closeFuture.isSuccess() == false) {
Throwable cause = closeFuture.cause(); Throwable cause = closeFuture.cause();
ExceptionsHelper.dieOnError(cause); ExceptionsHelper.maybeDieOnAnotherThread(cause);
throw (Exception) cause; throw (Exception) cause;
} }
} }
@ -84,7 +84,7 @@ public class NettyAdaptor implements AutoCloseable {
listener.accept(null, null); listener.accept(null, null);
} else { } else {
final Throwable cause = f.cause(); final Throwable cause = f.cause();
ExceptionsHelper.dieOnError(cause); ExceptionsHelper.maybeDieOnAnotherThread(cause);
assert cause instanceof Exception; assert cause instanceof Exception;
listener.accept(null, (Exception) cause); listener.accept(null, (Exception) cause);
} }

View File

@ -223,7 +223,7 @@ public class NettyListener implements BiConsumer<Void, Exception>, ChannelPromis
biConsumer.accept(null, null); biConsumer.accept(null, null);
} else { } else {
if (cause instanceof Error) { if (cause instanceof Error) {
ExceptionsHelper.dieOnError(cause); ExceptionsHelper.maybeDieOnAnotherThread(cause);
biConsumer.accept(null, new Exception(cause)); biConsumer.accept(null, new Exception(cause));
} else { } else {
biConsumer.accept(null, (Exception) cause); biConsumer.accept(null, (Exception) cause);

View File

@ -21,5 +21,5 @@ apply plugin: 'elasticsearch.rest-test'
apply plugin: 'elasticsearch.test-with-dependencies' apply plugin: 'elasticsearch.test-with-dependencies'
dependencies { dependencies {
testCompile project(path: ':client:rest-high-level', configuration: 'shadow') testCompile "org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}"
} }

View File

@ -90,14 +90,14 @@ public class DieWithDignityIT extends ESRestTestCase {
final Iterator<String> it = lines.iterator(); final Iterator<String> it = lines.iterator();
boolean fatalErrorOnTheNetworkLayer = false; boolean fatalError = false;
boolean fatalErrorInThreadExiting = false; boolean fatalErrorInThreadExiting = false;
while (it.hasNext() && (fatalErrorOnTheNetworkLayer == false || fatalErrorInThreadExiting == false)) { while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) {
final String line = it.next(); final String line = it.next();
if (line.contains("fatal error on the network layer")) { if (line.matches(".*\\[ERROR\\]\\[o\\.e\\.ExceptionsHelper\\s*\\] \\[node-0\\] fatal error")) {
fatalErrorOnTheNetworkLayer = true; fatalError = true;
} else if (line.matches(".*\\[ERROR\\]\\[o.e.b.ElasticsearchUncaughtExceptionHandler\\] \\[node-0\\]" } else if (line.matches(".*\\[ERROR\\]\\[o\\.e\\.b\\.ElasticsearchUncaughtExceptionHandler\\] \\[node-0\\]"
+ " fatal error in thread \\[Thread-\\d+\\], exiting$")) { + " fatal error in thread \\[Thread-\\d+\\], exiting$")) {
fatalErrorInThreadExiting = true; fatalErrorInThreadExiting = true;
assertTrue(it.hasNext()); assertTrue(it.hasNext());
@ -105,7 +105,7 @@ public class DieWithDignityIT extends ESRestTestCase {
} }
} }
assertTrue(fatalErrorOnTheNetworkLayer); assertTrue(fatalError);
assertTrue(fatalErrorInThreadExiting); assertTrue(fatalErrorInThreadExiting);
} }

View File

@ -1,5 +1,3 @@
import org.elasticsearch.gradle.precommit.PrecommitTasks
/* /*
* Licensed to Elasticsearch under one or more contributor * Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with * license agreements. See the NOTICE file distributed with
@ -69,9 +67,7 @@ esvagrant {
} }
forbiddenApisMain { forbiddenApisMain {
signaturesURLs = [ replaceSignatureFiles 'jdk-signatures'
PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')
]
} }
// we don't have additional tests for the tests themselves // we don't have additional tests for the tests themselves

View File

@ -33,6 +33,11 @@
"type" : "number", "type" : "number",
"description" : "A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on it's rewrite method ie. if date filters are mandatory to match but the shard bounds and the query are disjoint.", "description" : "A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on it's rewrite method ie. if date filters are mandatory to match but the shard bounds and the query are disjoint.",
"default" : 128 "default" : 128
},
"max_concurrent_shard_requests" : {
"type" : "number",
"description" : "The number of concurrent shard requests each sub search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests",
"default" : "The default grows with the number of nodes in the cluster but is at most 256."
} }
} }
}, },

View File

@ -61,3 +61,35 @@ setup:
- match: { responses.3.error.root_cause.0.reason: "/no.such.index/" } - match: { responses.3.error.root_cause.0.reason: "/no.such.index/" }
- match: { responses.3.error.root_cause.0.index: index_3 } - match: { responses.3.error.root_cause.0.index: index_3 }
- match: { responses.4.hits.total: 4 } - match: { responses.4.hits.total: 4 }
---
"Least impact smoke test":
# only passing these parameters to make sure they are consumed
- do:
max_concurrent_shard_requests: 1
max_concurrent_searches: 1
msearch:
body:
- index: index_*
- query:
match: {foo: foo}
- index: index_2
- query:
match_all: {}
- index: index_1
- query:
match: {foo: foo}
- index: index_3
- query:
match_all: {}
- type: test
- query:
match_all: {}
- match: { responses.0.hits.total: 2 }
- match: { responses.1.hits.total: 1 }
- match: { responses.2.hits.total: 1 }
- match: { responses.3.error.root_cause.0.type: index_not_found_exception }
- match: { responses.3.error.root_cause.0.reason: "/no.such.index/" }
- match: { responses.3.error.root_cause.0.index: index_3 }
- match: { responses.4.hits.total: 4 }

View File

@ -233,3 +233,51 @@
query: query:
match_all: {} match_all: {}
size: 0 size: 0
---
"Scroll max_score is null":
- skip:
version: " - 6.99.99"
reason: max_score was set to 0 rather than null before 7.0
- do:
indices.create:
index: test_scroll
- do:
index:
index: test_scroll
type: test
id: 42
body: { foo: 1 }
- do:
index:
index: test_scroll
type: test
id: 43
body: { foo: 2 }
- do:
indices.refresh: {}
- do:
search:
index: test_scroll
size: 1
scroll: 1m
sort: foo
body:
query:
match_all: {}
- set: {_scroll_id: scroll_id}
- length: {hits.hits: 1 }
- match: { hits.max_score: null }
- do:
scroll:
scroll_id: $scroll_id
scroll: 1m
- length: {hits.hits: 1 }
- match: { hits.max_score: null }

View File

@ -244,6 +244,23 @@ setup:
- match: { hits.total: 6 } - match: { hits.total: 6 }
- length: { hits.hits: 0 } - length: { hits.hits: 0 }
---
"no hits and inner_hits max_score null":
- skip:
version: " - 6.99.99"
reason: max_score was set to 0 rather than null before 7.0
- do:
search:
index: test
body:
size: 0
collapse: { field: numeric_group, inner_hits: { name: sub_hits, size: 1} }
sort: [{ sort: desc }]
- match: { hits.max_score: null }
--- ---
"field collapsing and multiple inner_hits": "field collapsing and multiple inner_hits":

View File

@ -128,7 +128,6 @@ setup:
- match: { hits.total: 2 } - match: { hits.total: 2 }
- match: { aggregations.some_agg.doc_count: 3 } - match: { aggregations.some_agg.doc_count: 3 }
- do: - do:
search: search:
pre_filter_shard_size: 1 pre_filter_shard_size: 1

View File

@ -39,6 +39,7 @@ setup:
df: text df: text
- match: {hits.total: 1} - match: {hits.total: 1}
- match: {hits.max_score: 1}
- match: {hits.hits.0._score: 1} - match: {hits.hits.0._score: 1}
- do: - do:
@ -52,6 +53,7 @@ setup:
boost: 2 boost: 2
- match: {hits.total: 1} - match: {hits.total: 1}
- match: {hits.max_score: 2}
- match: {hits.hits.0._score: 2} - match: {hits.hits.0._score: 2}
- do: - do:
@ -61,6 +63,7 @@ setup:
df: text df: text
- match: {hits.total: 1} - match: {hits.total: 1}
- match: {hits.max_score: 1}
- match: {hits.hits.0._score: 1} - match: {hits.hits.0._score: 1}
--- ---

View File

@ -29,6 +29,7 @@
query_weight: 5 query_weight: 5
rescore_query_weight: 10 rescore_query_weight: 10
- match: {hits.max_score: 15}
- match: { hits.hits.0._score: 15 } - match: { hits.hits.0._score: 15 }
- match: { hits.hits.0._explanation.value: 15 } - match: { hits.hits.0._explanation.value: 15 }

View File

@ -136,42 +136,6 @@ public final class ExceptionsHelper {
return Arrays.stream(stackTrace).skip(1).map(e -> "\tat " + e).collect(Collectors.joining("\n")); return Arrays.stream(stackTrace).skip(1).map(e -> "\tat " + e).collect(Collectors.joining("\n"));
} }
static final int MAX_ITERATIONS = 1024;
/**
* Unwrap the specified throwable looking for any suppressed errors or errors as a root cause of the specified throwable.
*
* @param cause the root throwable
*
* @return an optional error if one is found suppressed or a root cause in the tree rooted at the specified throwable
*/
public static Optional<Error> maybeError(final Throwable cause, final Logger logger) {
// early terminate if the cause is already an error
if (cause instanceof Error) {
return Optional.of((Error) cause);
}
final Queue<Throwable> queue = new LinkedList<>();
queue.add(cause);
int iterations = 0;
while (!queue.isEmpty()) {
iterations++;
if (iterations > MAX_ITERATIONS) {
logger.warn("giving up looking for fatal errors", cause);
break;
}
final Throwable current = queue.remove();
if (current instanceof Error) {
return Optional.of((Error) current);
}
Collections.addAll(queue, current.getSuppressed());
if (current.getCause() != null) {
queue.add(current.getCause());
}
}
return Optional.empty();
}
/** /**
* Rethrows the first exception in the list and adds all remaining to the suppressed list. * Rethrows the first exception in the list and adds all remaining to the suppressed list.
* If the given list is empty no exception is thrown * If the given list is empty no exception is thrown
@ -243,13 +207,50 @@ public final class ExceptionsHelper {
return true; return true;
} }
static final int MAX_ITERATIONS = 1024;
/**
* Unwrap the specified throwable looking for any suppressed errors or errors as a root cause of the specified throwable.
*
* @param cause the root throwable
* @return an optional error if one is found suppressed or a root cause in the tree rooted at the specified throwable
*/
public static Optional<Error> maybeError(final Throwable cause, final Logger logger) {
// early terminate if the cause is already an error
if (cause instanceof Error) {
return Optional.of((Error) cause);
}
final Queue<Throwable> queue = new LinkedList<>();
queue.add(cause);
int iterations = 0;
while (queue.isEmpty() == false) {
iterations++;
// this is a guard against deeply nested or circular chains of exceptions
if (iterations > MAX_ITERATIONS) {
logger.warn("giving up looking for fatal errors", cause);
break;
}
final Throwable current = queue.remove();
if (current instanceof Error) {
return Optional.of((Error) current);
}
Collections.addAll(queue, current.getSuppressed());
if (current.getCause() != null) {
queue.add(current.getCause());
}
}
return Optional.empty();
}
/** /**
* If the specified cause is an unrecoverable error, this method will rethrow the cause on a separate thread so that it can not be * If the specified cause is an unrecoverable error, this method will rethrow the cause on a separate thread so that it can not be
* caught and bubbles up to the uncaught exception handler. * caught and bubbles up to the uncaught exception handler. Note that the cause tree is examined for any {@link Error}. See
* {@link #maybeError(Throwable, Logger)} for the semantics.
* *
* @param throwable the throwable to test * @param throwable the throwable to possibly throw on another thread
*/ */
public static void dieOnError(Throwable throwable) { public static void maybeDieOnAnotherThread(final Throwable throwable) {
ExceptionsHelper.maybeError(throwable, logger).ifPresent(error -> { ExceptionsHelper.maybeError(throwable, logger).ifPresent(error -> {
/* /*
* Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, sometimes the stack * Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, sometimes the stack

View File

@ -28,6 +28,7 @@ import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.monitor.process.ProcessProbe; import org.elasticsearch.monitor.process.ProcessProbe;
import org.elasticsearch.node.NodeValidationException; import org.elasticsearch.node.NodeValidationException;
@ -393,17 +394,22 @@ final class BootstrapChecks {
static class MaxMapCountCheck implements BootstrapCheck { static class MaxMapCountCheck implements BootstrapCheck {
private static final long LIMIT = 1 << 18; static final long LIMIT = 1 << 18;
@Override @Override
public BootstrapCheckResult check(BootstrapContext context) { public BootstrapCheckResult check(final BootstrapContext context) {
if (getMaxMapCount() != -1 && getMaxMapCount() < LIMIT) { // we only enforce the check if mmapfs is an allowed store type
final String message = String.format( if (IndexModule.NODE_STORE_ALLOW_MMAPFS.get(context.settings)) {
Locale.ROOT, if (getMaxMapCount() != -1 && getMaxMapCount() < LIMIT) {
"max virtual memory areas vm.max_map_count [%d] is too low, increase to at least [%d]", final String message = String.format(
getMaxMapCount(), Locale.ROOT,
LIMIT); "max virtual memory areas vm.max_map_count [%d] is too low, increase to at least [%d]",
return BootstrapCheckResult.failure(message); getMaxMapCount(),
LIMIT);
return BootstrapCheckResult.failure(message);
} else {
return BootstrapCheckResult.success();
}
} else { } else {
return BootstrapCheckResult.success(); return BootstrapCheckResult.success();
} }

View File

@ -25,6 +25,8 @@ import org.apache.lucene.util.BitUtil;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import static org.apache.lucene.geo.GeoUtils.MAX_LAT_INCL;
/** /**
* Utilities for converting to/from the GeoHash standard * Utilities for converting to/from the GeoHash standard
* *
@ -48,6 +50,8 @@ public class GeoHashUtils {
private static final double LAT_SCALE = (0x1L<<BITS)/180.0D; private static final double LAT_SCALE = (0x1L<<BITS)/180.0D;
private static final double LON_SCALE = (0x1L<<BITS)/360.0D; private static final double LON_SCALE = (0x1L<<BITS)/360.0D;
private static final short MORTON_OFFSET = (BITS<<1) - (PRECISION*5); private static final short MORTON_OFFSET = (BITS<<1) - (PRECISION*5);
/** Bit encoded representation of the latitude of north pole */
private static final long MAX_LAT_BITS = (0x1L << (PRECISION * 5 / 2)) - 1;
// No instance: // No instance:
private GeoHashUtils() { private GeoHashUtils() {
@ -218,12 +222,19 @@ public class GeoHashUtils {
long ghLong = longEncode(geohash, len); long ghLong = longEncode(geohash, len);
// shift away the level // shift away the level
ghLong >>>= 4; ghLong >>>= 4;
// deinterleave and add 1 to lat and lon to get topRight // deinterleave
long lat = BitUtil.deinterleave(ghLong >>> 1) + 1; long lon = BitUtil.deinterleave(ghLong >>> 1);
long lon = BitUtil.deinterleave(ghLong) + 1; long lat = BitUtil.deinterleave(ghLong);
GeoPoint topRight = GeoPoint.fromGeohash(BitUtil.interleave((int)lon, (int)lat) << 4 | len); if (lat < MAX_LAT_BITS) {
// add 1 to lat and lon to get topRight
return new Rectangle(bottomLeft.lat(), topRight.lat(), bottomLeft.lon(), topRight.lon()); GeoPoint topRight = GeoPoint.fromGeohash(BitUtil.interleave((int)(lat + 1), (int)(lon + 1)) << 4 | len);
return new Rectangle(bottomLeft.lat(), topRight.lat(), bottomLeft.lon(), topRight.lon());
} else {
// We cannot go north of north pole, so just using 90 degrees instead of calculating it using
// add 1 to lon to get lon of topRight, we are going to use 90 for lat
GeoPoint topRight = GeoPoint.fromGeohash(BitUtil.interleave((int)lat, (int)(lon + 1)) << 4 | len);
return new Rectangle(bottomLeft.lat(), MAX_LAT_INCL, bottomLeft.lon(), topRight.lon());
}
} }
/** /**

View File

@ -101,7 +101,7 @@ public class Lucene {
public static final ScoreDoc[] EMPTY_SCORE_DOCS = new ScoreDoc[0]; public static final ScoreDoc[] EMPTY_SCORE_DOCS = new ScoreDoc[0];
public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(0, EMPTY_SCORE_DOCS, 0.0f); public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(0, EMPTY_SCORE_DOCS, Float.NaN);
public static Version parseVersion(@Nullable String version, Version defaultVersion, Logger logger) { public static Version parseVersion(@Nullable String version, Version defaultVersion, Logger logger) {
if (version == null) { if (version == null) {

View File

@ -199,7 +199,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
* Also automatically adds empty consumers for all settings in order to activate logging * Also automatically adds empty consumers for all settings in order to activate logging
*/ */
public synchronized void addSettingsUpdateConsumer(Consumer<Settings> consumer, List<? extends Setting<?>> settings) { public synchronized void addSettingsUpdateConsumer(Consumer<Settings> consumer, List<? extends Setting<?>> settings) {
addSettingsUpdater(Setting.groupedSettingsUpdater(consumer, logger, settings)); addSettingsUpdater(Setting.groupedSettingsUpdater(consumer, settings));
} }
/** /**
@ -208,11 +208,78 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
*/ */
public synchronized <T> void addAffixUpdateConsumer(Setting.AffixSetting<T> setting, BiConsumer<String, T> consumer, public synchronized <T> void addAffixUpdateConsumer(Setting.AffixSetting<T> setting, BiConsumer<String, T> consumer,
BiConsumer<String, T> validator) { BiConsumer<String, T> validator) {
ensureSettingIsRegistered(setting);
addSettingsUpdater(setting.newAffixUpdater(consumer, logger, validator));
}
/**
* Adds a affix settings consumer that accepts the values for two settings. The consumer is only notified if one or both settings change
* and if the provided validator succeeded.
* <p>
* Note: Only settings registered in {@link SettingsModule} can be changed dynamically.
* </p>
* This method registers a compound updater that is useful if two settings are depending on each other.
* The consumer is always provided with both values even if only one of the two changes.
*/
public synchronized <A,B> void addAffixUpdateConsumer(Setting.AffixSetting<A> settingA, Setting.AffixSetting<B> settingB,
BiConsumer<String, Tuple<A, B>> consumer,
BiConsumer<String, Tuple<A, B>> validator) {
// it would be awesome to have a generic way to do that ie. a set of settings that map to an object with a builder
// down the road this would be nice to have!
ensureSettingIsRegistered(settingA);
ensureSettingIsRegistered(settingB);
SettingUpdater<Map<SettingUpdater<A>, A>> affixUpdaterA = settingA.newAffixUpdater((a,b)-> {}, logger, (a,b)-> {});
SettingUpdater<Map<SettingUpdater<B>, B>> affixUpdaterB = settingB.newAffixUpdater((a,b)-> {}, logger, (a,b)-> {});
addSettingsUpdater(new SettingUpdater<Map<String, Tuple<A, B>>>() {
@Override
public boolean hasChanged(Settings current, Settings previous) {
return affixUpdaterA.hasChanged(current, previous) || affixUpdaterB.hasChanged(current, previous);
}
@Override
public Map<String, Tuple<A, B>> getValue(Settings current, Settings previous) {
Map<String, Tuple<A, B>> map = new HashMap<>();
BiConsumer<String, A> aConsumer = (key, value) -> {
assert map.containsKey(key) == false : "duplicate key: " + key;
map.put(key, new Tuple<>(value, settingB.getConcreteSettingForNamespace(key).get(current)));
};
BiConsumer<String, B> bConsumer = (key, value) -> {
Tuple<A, B> abTuple = map.get(key);
if (abTuple != null) {
map.put(key, new Tuple<>(abTuple.v1(), value));
} else {
assert settingA.getConcreteSettingForNamespace(key).get(current).equals(settingA.getConcreteSettingForNamespace
(key).get(previous)) : "expected: " + settingA.getConcreteSettingForNamespace(key).get(current)
+ " but was " + settingA.getConcreteSettingForNamespace(key).get(previous);
map.put(key, new Tuple<>(settingA.getConcreteSettingForNamespace(key).get(current), value));
}
};
SettingUpdater<Map<SettingUpdater<A>, A>> affixUpdaterA = settingA.newAffixUpdater(aConsumer, logger, (a,b) ->{});
SettingUpdater<Map<SettingUpdater<B>, B>> affixUpdaterB = settingB.newAffixUpdater(bConsumer, logger, (a,b) ->{});
affixUpdaterA.apply(current, previous);
affixUpdaterB.apply(current, previous);
for (Map.Entry<String, Tuple<A, B>> entry : map.entrySet()) {
validator.accept(entry.getKey(), entry.getValue());
}
return Collections.unmodifiableMap(map);
}
@Override
public void apply(Map<String, Tuple<A, B>> values, Settings current, Settings previous) {
for (Map.Entry<String, Tuple<A, B>> entry : values.entrySet()) {
consumer.accept(entry.getKey(), entry.getValue());
}
}
});
}
private void ensureSettingIsRegistered(Setting.AffixSetting<?> setting) {
final Setting<?> registeredSetting = this.complexMatchers.get(setting.getKey()); final Setting<?> registeredSetting = this.complexMatchers.get(setting.getKey());
if (setting != registeredSetting) { if (setting != registeredSetting) {
throw new IllegalArgumentException("Setting is not registered for key [" + setting.getKey() + "]"); throw new IllegalArgumentException("Setting is not registered for key [" + setting.getKey() + "]");
} }
addSettingsUpdater(setting.newAffixUpdater(consumer, logger, validator));
} }
/** /**

View File

@ -63,6 +63,7 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.indices.IndexingMemoryController; import org.elasticsearch.indices.IndexingMemoryController;
import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.IndicesQueryCache;
@ -264,6 +265,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING,
HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_LIMIT_SETTING, HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_OVERHEAD_SETTING, HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_OVERHEAD_SETTING,
IndexModule.NODE_STORE_ALLOW_MMAPFS,
ClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, ClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING,
SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING,
SearchService.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS, SearchService.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS,

View File

@ -547,7 +547,7 @@ public class Setting<T> implements ToXContentObject {
}; };
} }
static AbstractScopedSettings.SettingUpdater<Settings> groupedSettingsUpdater(Consumer<Settings> consumer, Logger logger, static AbstractScopedSettings.SettingUpdater<Settings> groupedSettingsUpdater(Consumer<Settings> consumer,
final List<? extends Setting<?>> configuredSettings) { final List<? extends Setting<?>> configuredSettings) {
return new AbstractScopedSettings.SettingUpdater<Settings>() { return new AbstractScopedSettings.SettingUpdater<Settings>() {

View File

@ -21,10 +21,11 @@ package org.elasticsearch.index;
import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.BM25Similarity;
import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.store.MMapDirectory;
import org.apache.lucene.util.Constants;
import org.apache.lucene.util.SetOnce; import org.apache.lucene.util.SetOnce;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.TriFunction;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
@ -59,7 +60,6 @@ import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Locale;
import java.util.Map; import java.util.Map;
import java.util.Objects; import java.util.Objects;
import java.util.Set; import java.util.Set;
@ -84,8 +84,10 @@ import java.util.function.Function;
*/ */
public final class IndexModule { public final class IndexModule {
public static final Setting<Boolean> NODE_STORE_ALLOW_MMAPFS = Setting.boolSetting("node.store.allow_mmapfs", true, Property.NodeScope);
public static final Setting<String> INDEX_STORE_TYPE_SETTING = public static final Setting<String> INDEX_STORE_TYPE_SETTING =
new Setting<>("index.store.type", "", Function.identity(), Property.IndexScope, Property.NodeScope); new Setting<>("index.store.type", "", Function.identity(), Property.IndexScope, Property.NodeScope);
/** On which extensions to load data into the file-system cache upon opening of files. /** On which extensions to load data into the file-system cache upon opening of files.
* This only works with the mmap directory, and even in that case is still * This only works with the mmap directory, and even in that case is still
@ -289,7 +291,7 @@ public final class IndexModule {
} }
} }
private static boolean isBuiltinType(String storeType) { public static boolean isBuiltinType(String storeType) {
for (Type type : Type.values()) { for (Type type : Type.values()) {
if (type.match(storeType)) { if (type.match(storeType)) {
return true; return true;
@ -298,21 +300,48 @@ public final class IndexModule {
return false; return false;
} }
public enum Type { public enum Type {
NIOFS, NIOFS("niofs"),
MMAPFS, MMAPFS("mmapfs"),
SIMPLEFS, SIMPLEFS("simplefs"),
FS; FS("fs");
private final String settingsKey;
Type(final String settingsKey) {
this.settingsKey = settingsKey;
}
private static final Map<String, Type> TYPES;
static {
final Map<String, Type> types = new HashMap<>(4);
for (final Type type : values()) {
types.put(type.settingsKey, type);
}
TYPES = Collections.unmodifiableMap(types);
}
public String getSettingsKey() { public String getSettingsKey() {
return this.name().toLowerCase(Locale.ROOT); return this.settingsKey;
} }
public static Type fromSettingsKey(final String key) {
final Type type = TYPES.get(key);
if (type == null) {
throw new IllegalArgumentException("no matching type for [" + key + "]");
}
return type;
}
/** /**
* Returns true iff this settings matches the type. * Returns true iff this settings matches the type.
*/ */
public boolean match(String setting) { public boolean match(String setting) {
return getSettingsKey().equals(setting); return getSettingsKey().equals(setting);
} }
} }
/** /**
@ -325,6 +354,16 @@ public final class IndexModule {
IndexSearcherWrapper newWrapper(IndexService indexService); IndexSearcherWrapper newWrapper(IndexService indexService);
} }
public static Type defaultStoreType(final boolean allowMmapfs) {
if (allowMmapfs && Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) {
return Type.MMAPFS;
} else if (Constants.WINDOWS) {
return Type.SIMPLEFS;
} else {
return Type.NIOFS;
}
}
public IndexService newIndexService( public IndexService newIndexService(
NodeEnvironment environment, NodeEnvironment environment,
NamedXContentRegistry xContentRegistry, NamedXContentRegistry xContentRegistry,
@ -343,20 +382,7 @@ public final class IndexModule {
IndexSearcherWrapperFactory searcherWrapperFactory = indexSearcherWrapper.get() == null IndexSearcherWrapperFactory searcherWrapperFactory = indexSearcherWrapper.get() == null
? (shard) -> null : indexSearcherWrapper.get(); ? (shard) -> null : indexSearcherWrapper.get();
eventListener.beforeIndexCreated(indexSettings.getIndex(), indexSettings.getSettings()); eventListener.beforeIndexCreated(indexSettings.getIndex(), indexSettings.getSettings());
final String storeType = indexSettings.getValue(INDEX_STORE_TYPE_SETTING); final IndexStore store = getIndexStore(indexSettings, indexStoreFactories);
final IndexStore store;
if (Strings.isEmpty(storeType) || isBuiltinType(storeType)) {
store = new IndexStore(indexSettings);
} else {
Function<IndexSettings, IndexStore> factory = indexStoreFactories.get(storeType);
if (factory == null) {
throw new IllegalArgumentException("Unknown store type [" + storeType + "]");
}
store = factory.apply(indexSettings);
if (store == null) {
throw new IllegalStateException("store must not be null");
}
}
final QueryCache queryCache; final QueryCache queryCache;
if (indexSettings.getValue(INDEX_QUERY_CACHE_ENABLED_SETTING)) { if (indexSettings.getValue(INDEX_QUERY_CACHE_ENABLED_SETTING)) {
BiFunction<IndexSettings, IndicesQueryCache, QueryCache> queryCacheProvider = forceQueryCacheProvider.get(); BiFunction<IndexSettings, IndicesQueryCache, QueryCache> queryCacheProvider = forceQueryCacheProvider.get();
@ -375,6 +401,39 @@ public final class IndexModule {
indicesFieldDataCache, searchOperationListeners, indexOperationListeners, namedWriteableRegistry); indicesFieldDataCache, searchOperationListeners, indexOperationListeners, namedWriteableRegistry);
} }
private static IndexStore getIndexStore(
final IndexSettings indexSettings, final Map<String, Function<IndexSettings, IndexStore>> indexStoreFactories) {
final String storeType = indexSettings.getValue(INDEX_STORE_TYPE_SETTING);
final Type type;
final Boolean allowMmapfs = NODE_STORE_ALLOW_MMAPFS.get(indexSettings.getNodeSettings());
if (storeType.isEmpty() || Type.FS.getSettingsKey().equals(storeType)) {
type = defaultStoreType(allowMmapfs);
} else {
if (isBuiltinType(storeType)) {
type = Type.fromSettingsKey(storeType);
} else {
type = null;
}
}
if (type != null && type == Type.MMAPFS && allowMmapfs == false) {
throw new IllegalArgumentException("store type [mmapfs] is not allowed");
}
final IndexStore store;
if (storeType.isEmpty() || isBuiltinType(storeType)) {
store = new IndexStore(indexSettings);
} else {
Function<IndexSettings, IndexStore> factory = indexStoreFactories.get(storeType);
if (factory == null) {
throw new IllegalArgumentException("Unknown store type [" + storeType + "]");
}
store = factory.apply(indexSettings);
if (store == null) {
throw new IllegalStateException("store must not be null");
}
}
return store;
}
/** /**
* creates a new mapper service to do administrative work like mapping updates. This *should not* be used for document parsing. * creates a new mapper service to do administrative work like mapping updates. This *should not* be used for document parsing.
* doing so will result in an exception. * doing so will result in an exception.

View File

@ -1623,10 +1623,12 @@ public abstract class Engine implements Closeable {
public abstract int fillSeqNoGaps(long primaryTerm) throws IOException; public abstract int fillSeqNoGaps(long primaryTerm) throws IOException;
/** /**
* Performs recovery from the transaction log. * Performs recovery from the transaction log up to {@code recoverUpToSeqNo} (inclusive).
* This operation will close the engine if the recovery fails. * This operation will close the engine if the recovery fails.
*
* @param recoverUpToSeqNo the upper bound, inclusive, of sequence number to be recovered
*/ */
public abstract Engine recoverFromTranslog() throws IOException; public abstract Engine recoverFromTranslog(long recoverUpToSeqNo) throws IOException;
/** /**
* Do not replay translog operations, but make the engine be ready. * Do not replay translog operations, but make the engine be ready.

View File

@ -364,7 +364,7 @@ public class InternalEngine extends Engine {
} }
@Override @Override
public InternalEngine recoverFromTranslog() throws IOException { public InternalEngine recoverFromTranslog(long recoverUpToSeqNo) throws IOException {
flushLock.lock(); flushLock.lock();
try (ReleasableLock lock = readLock.acquire()) { try (ReleasableLock lock = readLock.acquire()) {
ensureOpen(); ensureOpen();
@ -372,7 +372,7 @@ public class InternalEngine extends Engine {
throw new IllegalStateException("Engine has already been recovered"); throw new IllegalStateException("Engine has already been recovered");
} }
try { try {
recoverFromTranslogInternal(); recoverFromTranslogInternal(recoverUpToSeqNo);
} catch (Exception e) { } catch (Exception e) {
try { try {
pendingTranslogRecovery.set(true); // just play safe and never allow commits on this see #ensureCanFlush pendingTranslogRecovery.set(true); // just play safe and never allow commits on this see #ensureCanFlush
@ -394,11 +394,12 @@ public class InternalEngine extends Engine {
pendingTranslogRecovery.set(false); // we are good - now we can commit pendingTranslogRecovery.set(false); // we are good - now we can commit
} }
private void recoverFromTranslogInternal() throws IOException { private void recoverFromTranslogInternal(long recoverUpToSeqNo) throws IOException {
Translog.TranslogGeneration translogGeneration = translog.getGeneration(); Translog.TranslogGeneration translogGeneration = translog.getGeneration();
final int opsRecovered; final int opsRecovered;
final long translogGen = Long.parseLong(lastCommittedSegmentInfos.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)); final long translogFileGen = Long.parseLong(lastCommittedSegmentInfos.getUserData().get(Translog.TRANSLOG_GENERATION_KEY));
try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(translogGen)) { try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(
new Translog.TranslogGeneration(translog.getTranslogUUID(), translogFileGen), recoverUpToSeqNo)) {
opsRecovered = config().getTranslogRecoveryRunner().run(this, snapshot); opsRecovered = config().getTranslogRecoveryRunner().run(this, snapshot);
} catch (Exception e) { } catch (Exception e) {
throw new EngineException(shardId, "failed to recover from translog", e); throw new EngineException(shardId, "failed to recover from translog", e);

View File

@ -28,6 +28,7 @@ import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.DocValuesFieldExistsQuery;
import org.apache.lucene.search.NormsFieldExistsQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
@ -166,7 +167,7 @@ public final class KeywordFieldMapper extends FieldMapper {
builder.ignoreAbove(XContentMapValues.nodeIntegerValue(propNode, -1)); builder.ignoreAbove(XContentMapValues.nodeIntegerValue(propNode, -1));
iterator.remove(); iterator.remove();
} else if (propName.equals("norms")) { } else if (propName.equals("norms")) {
builder.omitNorms(XContentMapValues.nodeBooleanValue(propNode, "norms") == false); TypeParsers.parseNorms(builder, name, propNode);
iterator.remove(); iterator.remove();
} else if (propName.equals("eager_global_ordinals")) { } else if (propName.equals("eager_global_ordinals")) {
builder.eagerGlobalOrdinals(XContentMapValues.nodeBooleanValue(propNode, "eager_global_ordinals")); builder.eagerGlobalOrdinals(XContentMapValues.nodeBooleanValue(propNode, "eager_global_ordinals"));
@ -256,8 +257,10 @@ public final class KeywordFieldMapper extends FieldMapper {
public Query existsQuery(QueryShardContext context) { public Query existsQuery(QueryShardContext context) {
if (hasDocValues()) { if (hasDocValues()) {
return new DocValuesFieldExistsQuery(name()); return new DocValuesFieldExistsQuery(name());
} else { } else if (omitNorms()) {
return new TermQuery(new Term(FieldNamesFieldMapper.NAME, name())); return new TermQuery(new Term(FieldNamesFieldMapper.NAME, name()));
} else {
return new NormsFieldExistsQuery(name());
} }
} }
@ -366,17 +369,19 @@ public final class KeywordFieldMapper extends FieldMapper {
// convert to utf8 only once before feeding postings/dv/stored fields // convert to utf8 only once before feeding postings/dv/stored fields
final BytesRef binaryValue = new BytesRef(value); final BytesRef binaryValue = new BytesRef(value);
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
Field field = new Field(fieldType().name(), binaryValue, fieldType()); Field field = new Field(fieldType().name(), binaryValue, fieldType());
fields.add(field); fields.add(field);
if (fieldType().hasDocValues() == false && fieldType().omitNorms()) {
createFieldNamesField(context, fields);
}
} }
if (fieldType().hasDocValues()) { if (fieldType().hasDocValues()) {
fields.add(new SortedSetDocValuesField(fieldType().name(), binaryValue)); fields.add(new SortedSetDocValuesField(fieldType().name(), binaryValue));
} else if (fieldType().stored() || fieldType().indexOptions() != IndexOptions.NONE) {
createFieldNamesField(context, fields);
} }
} }
@Override @Override
protected String contentType() { protected String contentType() {
return CONTENT_TYPE; return CONTENT_TYPE;

View File

@ -35,6 +35,7 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermInSetQuery;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.geo.ShapeRelation;
import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.DateMathParser;
@ -314,7 +315,13 @@ public abstract class MappedFieldType extends FieldType {
/** Generates a query that will only match documents that contain the given value. /** Generates a query that will only match documents that contain the given value.
* The default implementation returns a {@link TermQuery} over the value bytes, * The default implementation returns a {@link TermQuery} over the value bytes,
* boosted by {@link #boost()}. * boosted by {@link #boost()}.
* @throws IllegalArgumentException if {@code value} cannot be converted to the expected data type */ * @throws IllegalArgumentException if {@code value} cannot be converted to the expected data type or if the field is not searchable
* due to the way it is configured (eg. not indexed)
* @throws ElasticsearchParseException if {@code value} cannot be converted to the expected data type
* @throws UnsupportedOperationException if the field is not searchable regardless of options
* @throws QueryShardException if the field is not searchable regardless of options
*/
// TODO: Standardize exception types
public abstract Query termQuery(Object value, @Nullable QueryShardContext context); public abstract Query termQuery(Object value, @Nullable QueryShardContext context);
/** Build a constant-scoring query that matches all values. The default implementation uses a /** Build a constant-scoring query that matches all values. The default implementation uses a

View File

@ -122,8 +122,7 @@ public class TypeParsers {
} }
} }
public static void parseNorms(FieldMapper.Builder builder, String fieldName, Object propNode, public static void parseNorms(FieldMapper.Builder builder, String fieldName, Object propNode) {
Mapper.TypeParser.ParserContext parserContext) {
builder.omitNorms(XContentMapValues.nodeBooleanValue(propNode, fieldName + ".norms") == false); builder.omitNorms(XContentMapValues.nodeBooleanValue(propNode, fieldName + ".norms") == false);
} }
@ -140,7 +139,7 @@ public class TypeParsers {
final String propName = entry.getKey(); final String propName = entry.getKey();
final Object propNode = entry.getValue(); final Object propNode = entry.getValue();
if ("norms".equals(propName)) { if ("norms".equals(propName)) {
parseNorms(builder, name, propNode, parserContext); parseNorms(builder, name, propNode);
iterator.remove(); iterator.remove();
} }
} }

View File

@ -398,7 +398,7 @@ public class NestedQueryBuilder extends AbstractQueryBuilder<NestedQueryBuilder>
if (size() == 0) { if (size() == 0) {
TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
intersect(weight, innerHitQueryWeight, totalHitCountCollector, ctx); intersect(weight, innerHitQueryWeight, totalHitCountCollector, ctx);
result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0); result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, Float.NaN);
} else { } else {
int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc()); int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc());
TopDocsCollector<?> topDocsCollector; TopDocsCollector<?> topDocsCollector;

View File

@ -328,8 +328,9 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
/** /**
* @param type Sets how multiple fields should be combined to build textual part queries. * @param type Sets how multiple fields should be combined to build textual part queries.
*/ */
public void type(MultiMatchQueryBuilder.Type type) { public QueryStringQueryBuilder type(MultiMatchQueryBuilder.Type type) {
this.type = type; this.type = type;
return this;
} }
/** /**
@ -388,7 +389,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
this.analyzer = analyzer; this.analyzer = analyzer;
return this; return this;
} }
/** /**
* The optional analyzer used to analyze the query string. Note, if a field has search analyzer * The optional analyzer used to analyze the query string. Note, if a field has search analyzer
* defined for it, then it will be used automatically. Defaults to the smart search analyzer. * defined for it, then it will be used automatically. Defaults to the smart search analyzer.
@ -899,9 +900,9 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
Objects.equals(tieBreaker, other.tieBreaker) && Objects.equals(tieBreaker, other.tieBreaker) &&
Objects.equals(rewrite, other.rewrite) && Objects.equals(rewrite, other.rewrite) &&
Objects.equals(minimumShouldMatch, other.minimumShouldMatch) && Objects.equals(minimumShouldMatch, other.minimumShouldMatch) &&
Objects.equals(lenient, other.lenient) && Objects.equals(lenient, other.lenient) &&
Objects.equals( Objects.equals(
timeZone == null ? null : timeZone.getID(), timeZone == null ? null : timeZone.getID(),
other.timeZone == null ? null : other.timeZone.getID()) && other.timeZone == null ? null : other.timeZone.getID()) &&
Objects.equals(escape, other.escape) && Objects.equals(escape, other.escape) &&
Objects.equals(maxDeterminizedStates, other.maxDeterminizedStates) && Objects.equals(maxDeterminizedStates, other.maxDeterminizedStates) &&

View File

@ -120,7 +120,7 @@ public class MultiMatchQuery extends MatchQuery {
private Query combineGrouped(List<? extends Query> groupQuery) { private Query combineGrouped(List<? extends Query> groupQuery) {
if (groupQuery == null || groupQuery.isEmpty()) { if (groupQuery == null || groupQuery.isEmpty()) {
return new MatchNoDocsQuery("[multi_match] list of group queries was empty"); return zeroTermsQuery();
} }
if (groupQuery.size() == 1) { if (groupQuery.size() == 1) {
return groupQuery.get(0); return groupQuery.get(0);

Some files were not shown because too many files have changed in this diff Show More