mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-09 14:34:43 +00:00
Merge remote-tracking branch 'es/master' into ccr
* es/master: (62 commits) [DOCS] Add docs for Application Privileges (#32635) Add versions 5.6.12 and 6.4.1 Do NOT allow termvectors on nested fields (#32728) [Rollup] Return empty response when aggs are missing (#32796) [TEST] Add some ACL yaml tests for Rollup (#33035) Move non duplicated actions back into xpack core (#32952) Test fix - GraphExploreResponseTests should not randomise array elements Closes #33086 Use `addIfAbsent` instead of checking if an element is contained TESTS: Fix Random Fail in MockTcpTransportTests (#33061) HLRC: Fix Compile Error From Missing Throws (#33083) [DOCS] Remove reload password from docs cf. #32889 HLRC: Add ML Get Buckets API (#33056) Watcher: Improve error messages for CronEvalTool (#32800) Search: Support of wildcard on docvalue_fields (#32980) Change query field expansion (#33020) INGEST: Cleanup Redundant Put Method (#33034) SQL: skip uppercasing/lowercasing function tests for AZ locales as well (#32910) Fix the default pom file name (#33063) Switch ml basic tests to new style Requests (#32483) Switch some watcher tests to new style Requests (#33044) ...
This commit is contained in:
commit
82592dda5a
@ -325,21 +325,19 @@ common configurations in our build and how we use them:
|
||||
|
||||
<dl>
|
||||
<dt>`compile`</dt><dd>Code that is on the classpath at both compile and
|
||||
runtime. If the [`shadow`][shadow-plugin] plugin is applied to the project then
|
||||
this code is bundled into the jar produced by the project.</dd>
|
||||
runtime.</dd>
|
||||
<dt>`runtime`</dt><dd>Code that is not on the classpath at compile time but is
|
||||
on the classpath at runtime. We mostly use this configuration to make sure that
|
||||
we do not accidentally compile against dependencies of our dependencies also
|
||||
known as "transitive" dependencies".</dd>
|
||||
<dt>`compileOnly`</dt><dd>Code that is on the classpath at comile time but that
|
||||
<dt>`compileOnly`</dt><dd>Code that is on the classpath at compile time but that
|
||||
should not be shipped with the project because it is "provided" by the runtime
|
||||
somehow. Elasticsearch plugins use this configuration to include dependencies
|
||||
that are bundled with Elasticsearch's server.</dd>
|
||||
<dt>`shadow`</dt><dd>Only available in projects with the shadow plugin. Code
|
||||
that is on the classpath at both compile and runtime but it *not* bundled into
|
||||
the jar produced by the project. If you depend on a project with the `shadow`
|
||||
plugin then you need to depend on this configuration because it will bring
|
||||
along all of the dependencies you need at runtime.</dd>
|
||||
<dt>`bundle`</dt><dd>Only available in projects with the shadow plugin,
|
||||
dependencies with this configuration are bundled into the jar produced by the
|
||||
build. Since IDEs do not understand this configuration we rig them to treat
|
||||
dependencies in this configuration as `compile` dependencies.</dd>
|
||||
<dt>`testCompile`</dt><dd>Code that is on the classpath for compiling tests
|
||||
that are part of this project but not production code. The canonical example
|
||||
of this is `junit`.</dd>
|
||||
|
39
build.gradle
39
build.gradle
@ -22,6 +22,7 @@ import org.elasticsearch.gradle.LoggedExec
|
||||
import org.elasticsearch.gradle.Version
|
||||
import org.elasticsearch.gradle.VersionCollection
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.elasticsearch.gradle.plugin.PluginBuildPlugin
|
||||
import org.gradle.plugins.ide.eclipse.model.SourceFolder
|
||||
import org.gradle.util.GradleVersion
|
||||
import org.gradle.util.DistributionLocator
|
||||
@ -304,7 +305,7 @@ subprojects {
|
||||
// org.elasticsearch:elasticsearch must be the last one or all the links for the
|
||||
// other packages (e.g org.elasticsearch.client) will point to server rather than
|
||||
// their own artifacts.
|
||||
if (project.plugins.hasPlugin(BuildPlugin)) {
|
||||
if (project.plugins.hasPlugin(BuildPlugin) || project.plugins.hasPlugin(PluginBuildPlugin)) {
|
||||
String artifactsHost = VersionProperties.elasticsearch.isSnapshot() ? "https://snapshots.elastic.co" : "https://artifacts.elastic.co"
|
||||
Closure sortClosure = { a, b -> b.group <=> a.group }
|
||||
Closure depJavadocClosure = { shadowed, dep ->
|
||||
@ -322,13 +323,6 @@ subprojects {
|
||||
*/
|
||||
project.evaluationDependsOn(upstreamProject.path)
|
||||
project.javadoc.source += upstreamProject.javadoc.source
|
||||
/*
|
||||
* Do not add those projects to the javadoc classpath because
|
||||
* we are going to resolve them with their source instead.
|
||||
*/
|
||||
project.javadoc.classpath = project.javadoc.classpath.filter { f ->
|
||||
false == upstreamProject.configurations.archives.artifacts.files.files.contains(f)
|
||||
}
|
||||
/*
|
||||
* Instead we need the upstream project's javadoc classpath so
|
||||
* we don't barf on the classes that it references.
|
||||
@ -345,16 +339,16 @@ subprojects {
|
||||
project.configurations.compile.dependencies
|
||||
.findAll()
|
||||
.toSorted(sortClosure)
|
||||
.each({ c -> depJavadocClosure(hasShadow, c) })
|
||||
.each({ c -> depJavadocClosure(false, c) })
|
||||
project.configurations.compileOnly.dependencies
|
||||
.findAll()
|
||||
.toSorted(sortClosure)
|
||||
.each({ c -> depJavadocClosure(hasShadow, c) })
|
||||
.each({ c -> depJavadocClosure(false, c) })
|
||||
if (hasShadow) {
|
||||
project.configurations.shadow.dependencies
|
||||
project.configurations.bundle.dependencies
|
||||
.findAll()
|
||||
.toSorted(sortClosure)
|
||||
.each({ c -> depJavadocClosure(false, c) })
|
||||
.each({ c -> depJavadocClosure(true, c) })
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -523,25 +517,18 @@ allprojects {
|
||||
allprojects {
|
||||
/*
|
||||
* IntelliJ and Eclipse don't know about the shadow plugin so when we're
|
||||
* in "IntelliJ mode" or "Eclipse mode" add "runtime" dependencies
|
||||
* eveywhere where we see a "shadow" dependency which will cause them to
|
||||
* reference shadowed projects directly rather than rely on the shadowing
|
||||
* to include them. This is the correct thing for it to do because it
|
||||
* doesn't run the jar shadowing at all. This isn't needed for the project
|
||||
* in "IntelliJ mode" or "Eclipse mode" switch "bundle" dependencies into
|
||||
* regular "compile" dependencies. This isn't needed for the project
|
||||
* itself because the IDE configuration is done by SourceSets but it is
|
||||
* *is* needed for projects that depends on the project doing the shadowing.
|
||||
* Without this they won't properly depend on the shadowed project.
|
||||
*/
|
||||
if (isEclipse || isIdea) {
|
||||
configurations.all { Configuration configuration ->
|
||||
dependencies.all { Dependency dep ->
|
||||
if (dep instanceof ProjectDependency) {
|
||||
if (dep.getTargetConfiguration() == 'shadow') {
|
||||
configuration.dependencies.add(project.dependencies.project(path: dep.dependencyProject.path, configuration: 'runtime'))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
project.plugins.withType(ShadowPlugin).whenPluginAdded {
|
||||
project.afterEvaluate {
|
||||
project.configurations.compile.extendsFrom project.configurations.bundle
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -79,8 +79,9 @@ class BuildPlugin implements Plugin<Project> {
|
||||
}
|
||||
project.pluginManager.apply('java')
|
||||
project.pluginManager.apply('carrotsearch.randomized-testing')
|
||||
// these plugins add lots of info to our jars
|
||||
configureConfigurations(project)
|
||||
configureJars(project) // jar config must be added before info broker
|
||||
// these plugins add lots of info to our jars
|
||||
project.pluginManager.apply('nebula.info-broker')
|
||||
project.pluginManager.apply('nebula.info-basic')
|
||||
project.pluginManager.apply('nebula.info-java')
|
||||
@ -91,8 +92,8 @@ class BuildPlugin implements Plugin<Project> {
|
||||
|
||||
globalBuildInfo(project)
|
||||
configureRepositories(project)
|
||||
configureConfigurations(project)
|
||||
project.ext.versions = VersionProperties.versions
|
||||
configureSourceSets(project)
|
||||
configureCompile(project)
|
||||
configureJavadoc(project)
|
||||
configureSourcesJar(project)
|
||||
@ -421,8 +422,10 @@ class BuildPlugin implements Plugin<Project> {
|
||||
project.configurations.compile.dependencies.all(disableTransitiveDeps)
|
||||
project.configurations.testCompile.dependencies.all(disableTransitiveDeps)
|
||||
project.configurations.compileOnly.dependencies.all(disableTransitiveDeps)
|
||||
|
||||
project.plugins.withType(ShadowPlugin).whenPluginAdded {
|
||||
project.configurations.shadow.dependencies.all(disableTransitiveDeps)
|
||||
Configuration bundle = project.configurations.create('bundle')
|
||||
bundle.dependencies.all(disableTransitiveDeps)
|
||||
}
|
||||
}
|
||||
|
||||
@ -528,12 +531,16 @@ class BuildPlugin implements Plugin<Project> {
|
||||
project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask ->
|
||||
// The GenerateMavenPom task is aggressive about setting the destination, instead of fighting it,
|
||||
// just make a copy.
|
||||
generatePOMTask.ext.pomFileName = "${project.archivesBaseName}-${project.version}.pom"
|
||||
generatePOMTask.ext.pomFileName = null
|
||||
doLast {
|
||||
project.copy {
|
||||
from generatePOMTask.destination
|
||||
into "${project.buildDir}/distributions"
|
||||
rename { generatePOMTask.ext.pomFileName }
|
||||
rename {
|
||||
generatePOMTask.ext.pomFileName == null ?
|
||||
"${project.archivesBaseName}-${project.version}.pom" :
|
||||
generatePOMTask.ext.pomFileName
|
||||
}
|
||||
}
|
||||
}
|
||||
// build poms with assemble (if the assemble task exists)
|
||||
@ -556,30 +563,6 @@ class BuildPlugin implements Plugin<Project> {
|
||||
publications {
|
||||
nebula(MavenPublication) {
|
||||
artifacts = [ project.tasks.shadowJar ]
|
||||
artifactId = project.archivesBaseName
|
||||
/*
|
||||
* Configure the pom to include the "shadow" as compile dependencies
|
||||
* because that is how we're using them but remove all other dependencies
|
||||
* because they've been shaded into the jar.
|
||||
*/
|
||||
pom.withXml { XmlProvider xml ->
|
||||
Node root = xml.asNode()
|
||||
root.remove(root.dependencies)
|
||||
Node dependenciesNode = root.appendNode('dependencies')
|
||||
project.configurations.shadow.allDependencies.each {
|
||||
if (false == it instanceof SelfResolvingDependency) {
|
||||
Node dependencyNode = dependenciesNode.appendNode('dependency')
|
||||
dependencyNode.appendNode('groupId', it.group)
|
||||
dependencyNode.appendNode('artifactId', it.name)
|
||||
dependencyNode.appendNode('version', it.version)
|
||||
dependencyNode.appendNode('scope', 'compile')
|
||||
}
|
||||
}
|
||||
// Be tidy and remove the element if it is empty
|
||||
if (dependenciesNode.children.empty) {
|
||||
root.remove(dependenciesNode)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -587,6 +570,20 @@ class BuildPlugin implements Plugin<Project> {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add dependencies that we are going to bundle to the compile classpath.
|
||||
*/
|
||||
static void configureSourceSets(Project project) {
|
||||
project.plugins.withType(ShadowPlugin).whenPluginAdded {
|
||||
['main', 'test'].each {name ->
|
||||
SourceSet sourceSet = project.sourceSets.findByName(name)
|
||||
if (sourceSet != null) {
|
||||
sourceSet.compileClasspath += project.configurations.bundle
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Adds compiler settings to the project */
|
||||
static void configureCompile(Project project) {
|
||||
if (project.compilerJavaVersion < JavaVersion.VERSION_1_10) {
|
||||
@ -764,9 +761,16 @@ class BuildPlugin implements Plugin<Project> {
|
||||
* better to be safe
|
||||
*/
|
||||
mergeServiceFiles()
|
||||
/*
|
||||
* Bundle dependencies of the "bundled" configuration.
|
||||
*/
|
||||
configurations = [project.configurations.bundle]
|
||||
}
|
||||
// Make sure we assemble the shadow jar
|
||||
project.tasks.assemble.dependsOn project.tasks.shadowJar
|
||||
project.artifacts {
|
||||
apiElements project.tasks.shadowJar
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -873,13 +877,8 @@ class BuildPlugin implements Plugin<Project> {
|
||||
exclude '**/*$*.class'
|
||||
|
||||
project.plugins.withType(ShadowPlugin).whenPluginAdded {
|
||||
/*
|
||||
* If we make a shaded jar we test against it.
|
||||
*/
|
||||
// Test against a shadow jar if we made one
|
||||
classpath -= project.tasks.compileJava.outputs.files
|
||||
classpath -= project.configurations.compile
|
||||
classpath -= project.configurations.runtime
|
||||
classpath += project.configurations.shadow
|
||||
classpath += project.tasks.shadowJar.outputs.files
|
||||
dependsOn project.tasks.shadowJar
|
||||
}
|
||||
@ -905,26 +904,6 @@ class BuildPlugin implements Plugin<Project> {
|
||||
additionalTest.dependsOn(project.tasks.testClasses)
|
||||
project.check.dependsOn(additionalTest)
|
||||
});
|
||||
|
||||
project.plugins.withType(ShadowPlugin).whenPluginAdded {
|
||||
/*
|
||||
* We need somewhere to configure dependencies that we don't wish
|
||||
* to shade into the jar. The shadow plugin creates a "shadow"
|
||||
* configuration which is *almost* exactly that. It is never
|
||||
* bundled into the shaded jar but is used for main source
|
||||
* compilation. Unfortunately, by default it is not used for
|
||||
* *test* source compilation and isn't used in tests at all. This
|
||||
* change makes it available for test compilation.
|
||||
*
|
||||
* Note that this isn't going to work properly with qa projects
|
||||
* but they have no business applying the shadow plugin in the
|
||||
* firstplace.
|
||||
*/
|
||||
SourceSet testSourceSet = project.sourceSets.findByName('test')
|
||||
if (testSourceSet != null) {
|
||||
testSourceSet.compileClasspath += project.configurations.shadow
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static configurePrecommit(Project project) {
|
||||
@ -936,7 +915,7 @@ class BuildPlugin implements Plugin<Project> {
|
||||
it.group.startsWith('org.elasticsearch') == false
|
||||
} - project.configurations.compileOnly
|
||||
project.plugins.withType(ShadowPlugin).whenPluginAdded {
|
||||
project.dependencyLicenses.dependencies += project.configurations.shadow.fileCollection {
|
||||
project.dependencyLicenses.dependencies += project.configurations.bundle.fileCollection {
|
||||
it.group.startsWith('org.elasticsearch') == false
|
||||
}
|
||||
}
|
||||
@ -947,7 +926,7 @@ class BuildPlugin implements Plugin<Project> {
|
||||
deps.runtimeConfiguration = project.configurations.runtime
|
||||
project.plugins.withType(ShadowPlugin).whenPluginAdded {
|
||||
deps.runtimeConfiguration = project.configurations.create('infoDeps')
|
||||
deps.runtimeConfiguration.extendsFrom(project.configurations.runtime, project.configurations.shadow)
|
||||
deps.runtimeConfiguration.extendsFrom(project.configurations.runtime, project.configurations.bundle)
|
||||
}
|
||||
deps.compileOnlyConfiguration = project.configurations.compileOnly
|
||||
project.afterEvaluate {
|
||||
|
@ -157,11 +157,10 @@ public class PluginBuildPlugin extends BuildPlugin {
|
||||
from pluginMetadata // metadata (eg custom security policy)
|
||||
/*
|
||||
* If the plugin is using the shadow plugin then we need to bundle
|
||||
* "shadow" things rather than the default jar and dependencies so
|
||||
* we don't hit jar hell.
|
||||
* that shadow jar.
|
||||
*/
|
||||
from { project.plugins.hasPlugin(ShadowPlugin) ? project.shadowJar : project.jar }
|
||||
from { project.plugins.hasPlugin(ShadowPlugin) ? project.configurations.shadow : project.configurations.runtime - project.configurations.compileOnly }
|
||||
from project.configurations.runtime - project.configurations.compileOnly
|
||||
// extra files for the plugin to go into the zip
|
||||
from('src/main/packaging') // TODO: move all config/bin/_size/etc into packaging
|
||||
from('src/main') {
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
@ -39,6 +40,9 @@ public class JarHellTask extends LoggedExec {
|
||||
public JarHellTask() {
|
||||
project.afterEvaluate {
|
||||
FileCollection classpath = project.sourceSets.test.runtimeClasspath
|
||||
if (project.plugins.hasPlugin(ShadowPlugin)) {
|
||||
classpath += project.configurations.bundle
|
||||
}
|
||||
inputs.files(classpath)
|
||||
dependsOn(classpath)
|
||||
description = "Runs CheckJarHell on ${classpath}"
|
||||
|
@ -18,18 +18,12 @@
|
||||
*/
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis
|
||||
import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin
|
||||
import org.elasticsearch.gradle.ExportElasticsearchBuildResourcesTask
|
||||
import org.gradle.api.JavaVersion
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.artifacts.Configuration
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
import org.gradle.api.plugins.quality.Checkstyle
|
||||
import org.gradle.api.tasks.JavaExec
|
||||
import org.gradle.api.tasks.StopExecutionException
|
||||
|
||||
/**
|
||||
* Validation tasks which should be run before committing. These run before tests.
|
||||
*/
|
||||
@ -38,8 +32,8 @@ class PrecommitTasks {
|
||||
/** Adds a precommit task, which depends on non-test verification tasks. */
|
||||
public static Task create(Project project, boolean includeDependencyLicenses) {
|
||||
List<Task> precommitTasks = [
|
||||
configureForbiddenApis(project),
|
||||
configureCheckstyle(project),
|
||||
configureForbiddenApisCli(project),
|
||||
configureNamingConventions(project),
|
||||
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
|
||||
project.tasks.create('licenseHeaders', LicenseHeadersTask.class),
|
||||
@ -48,9 +42,6 @@ class PrecommitTasks {
|
||||
project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class)
|
||||
]
|
||||
|
||||
// Configure it but don't add it as a dependency yet
|
||||
configureForbiddenApisCli(project)
|
||||
|
||||
// tasks with just tests don't need dependency licenses, so this flag makes adding
|
||||
// the task optional
|
||||
if (includeDependencyLicenses) {
|
||||
@ -84,77 +75,60 @@ class PrecommitTasks {
|
||||
return project.tasks.create(precommitOptions)
|
||||
}
|
||||
|
||||
private static Task configureForbiddenApis(Project project) {
|
||||
project.pluginManager.apply(ForbiddenApisPlugin.class)
|
||||
project.forbiddenApis {
|
||||
failOnUnsupportedJava = false
|
||||
bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-non-portable', 'jdk-system-out']
|
||||
signaturesURLs = [getClass().getResource('/forbidden/jdk-signatures.txt'),
|
||||
getClass().getResource('/forbidden/es-all-signatures.txt')]
|
||||
suppressAnnotations = ['**.SuppressForbidden']
|
||||
}
|
||||
project.tasks.withType(CheckForbiddenApis) {
|
||||
// we do not use the += operator to add signatures, as conventionMappings of Gradle do not work when it's configured using withType:
|
||||
if (name.endsWith('Test')) {
|
||||
signaturesURLs = project.forbiddenApis.signaturesURLs +
|
||||
[ getClass().getResource('/forbidden/es-test-signatures.txt'), getClass().getResource('/forbidden/http-signatures.txt') ]
|
||||
} else {
|
||||
signaturesURLs = project.forbiddenApis.signaturesURLs +
|
||||
[ getClass().getResource('/forbidden/es-server-signatures.txt') ]
|
||||
}
|
||||
// forbidden apis doesn't support Java 11, so stop at 10
|
||||
String targetMajorVersion = (project.compilerJavaVersion.compareTo(JavaVersion.VERSION_1_10) > 0 ?
|
||||
JavaVersion.VERSION_1_10 :
|
||||
project.compilerJavaVersion).getMajorVersion()
|
||||
targetCompatibility = Integer.parseInt(targetMajorVersion) >= 9 ?targetMajorVersion : "1.${targetMajorVersion}"
|
||||
}
|
||||
Task forbiddenApis = project.tasks.findByName('forbiddenApis')
|
||||
forbiddenApis.group = "" // clear group, so this does not show up under verification tasks
|
||||
|
||||
return forbiddenApis
|
||||
}
|
||||
|
||||
private static Task configureForbiddenApisCli(Project project) {
|
||||
project.configurations.create("forbiddenApisCliJar")
|
||||
Configuration forbiddenApisConfiguration = project.configurations.create("forbiddenApisCliJar")
|
||||
project.dependencies {
|
||||
forbiddenApisCliJar 'de.thetaphi:forbiddenapis:2.5'
|
||||
forbiddenApisCliJar ('de.thetaphi:forbiddenapis:2.5')
|
||||
}
|
||||
Task forbiddenApisCli = project.tasks.create('forbiddenApisCli')
|
||||
Task forbiddenApisCli = project.tasks.create('forbiddenApis')
|
||||
|
||||
project.sourceSets.forEach { sourceSet ->
|
||||
forbiddenApisCli.dependsOn(
|
||||
project.tasks.create(sourceSet.getTaskName('forbiddenApisCli', null), JavaExec) {
|
||||
project.tasks.create(sourceSet.getTaskName('forbiddenApis', null), ForbiddenApisCliTask) {
|
||||
ExportElasticsearchBuildResourcesTask buildResources = project.tasks.getByName('buildResources')
|
||||
dependsOn(buildResources)
|
||||
classpath = project.files(
|
||||
project.configurations.forbiddenApisCliJar,
|
||||
execAction = { spec ->
|
||||
spec.classpath = project.files(
|
||||
project.configurations.forbiddenApisCliJar,
|
||||
sourceSet.compileClasspath,
|
||||
sourceSet.runtimeClasspath
|
||||
)
|
||||
spec.executable = "${project.runtimeJavaHome}/bin/java"
|
||||
}
|
||||
inputs.files(
|
||||
forbiddenApisConfiguration,
|
||||
sourceSet.compileClasspath,
|
||||
sourceSet.runtimeClasspath
|
||||
)
|
||||
main = 'de.thetaphi.forbiddenapis.cli.CliMain'
|
||||
executable = "${project.runtimeJavaHome}/bin/java"
|
||||
args "-b", 'jdk-unsafe-1.8'
|
||||
args "-b", 'jdk-deprecated-1.8'
|
||||
args "-b", 'jdk-non-portable'
|
||||
args "-b", 'jdk-system-out'
|
||||
args "-f", buildResources.copy("forbidden/jdk-signatures.txt")
|
||||
args "-f", buildResources.copy("forbidden/es-all-signatures.txt")
|
||||
args "--suppressannotation", '**.SuppressForbidden'
|
||||
|
||||
targetCompatibility = project.compilerJavaVersion
|
||||
bundledSignatures = [
|
||||
"jdk-unsafe", "jdk-deprecated", "jdk-non-portable", "jdk-system-out"
|
||||
]
|
||||
signaturesFiles = project.files(
|
||||
buildResources.copy("forbidden/jdk-signatures.txt"),
|
||||
buildResources.copy("forbidden/es-all-signatures.txt")
|
||||
)
|
||||
suppressAnnotations = ['**.SuppressForbidden']
|
||||
if (sourceSet.name == 'test') {
|
||||
args "-f", buildResources.copy("forbidden/es-test-signatures.txt")
|
||||
args "-f", buildResources.copy("forbidden/http-signatures.txt")
|
||||
signaturesFiles += project.files(
|
||||
buildResources.copy("forbidden/es-test-signatures.txt"),
|
||||
buildResources.copy("forbidden/http-signatures.txt")
|
||||
)
|
||||
} else {
|
||||
args "-f", buildResources.copy("forbidden/es-server-signatures.txt")
|
||||
signaturesFiles += project.files(buildResources.copy("forbidden/es-server-signatures.txt"))
|
||||
}
|
||||
dependsOn sourceSet.classesTaskName
|
||||
doFirst {
|
||||
// Forbidden APIs expects only existing dirs, and requires at least one
|
||||
FileCollection existingOutputs = sourceSet.output.classesDirs
|
||||
.filter { it.exists() }
|
||||
if (existingOutputs.isEmpty()) {
|
||||
throw new StopExecutionException("${sourceSet.name} has no outputs")
|
||||
}
|
||||
existingOutputs.forEach { args "-d", it }
|
||||
classesDirs = sourceSet.output.classesDirs
|
||||
ext.replaceSignatureFiles = { String... names ->
|
||||
signaturesFiles = project.files(
|
||||
names.collect { buildResources.copy("forbidden/${it}.txt") }
|
||||
)
|
||||
}
|
||||
ext.addSignatureFiles = { String... names ->
|
||||
signaturesFiles += project.files(
|
||||
names.collect { buildResources.copy("forbidden/${it}.txt") }
|
||||
)
|
||||
}
|
||||
}
|
||||
)
|
||||
|
@ -18,6 +18,7 @@
|
||||
*/
|
||||
package org.elasticsearch.gradle.precommit;
|
||||
|
||||
import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin
|
||||
import org.apache.tools.ant.BuildEvent;
|
||||
import org.apache.tools.ant.BuildException;
|
||||
import org.apache.tools.ant.BuildListener;
|
||||
@ -82,6 +83,11 @@ public class ThirdPartyAuditTask extends AntTask {
|
||||
configuration = project.configurations.findByName('testCompile')
|
||||
}
|
||||
assert configuration != null
|
||||
if (project.plugins.hasPlugin(ShadowPlugin)) {
|
||||
Configuration original = configuration
|
||||
configuration = project.configurations.create('thirdPartyAudit')
|
||||
configuration.extendsFrom(original, project.configurations.bundle)
|
||||
}
|
||||
if (compileOnly == null) {
|
||||
classpath = configuration
|
||||
} else {
|
||||
|
@ -35,6 +35,7 @@ import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
@ -105,7 +106,7 @@ public class ExportElasticsearchBuildResourcesTask extends DefaultTask {
|
||||
if (is == null) {
|
||||
throw new GradleException("Can't export `" + resourcePath + "` from build-tools: not found");
|
||||
}
|
||||
Files.copy(is, destination);
|
||||
Files.copy(is, destination, StandardCopyOption.REPLACE_EXISTING);
|
||||
} catch (IOException e) {
|
||||
throw new GradleException("Can't write resource `" + resourcePath + "` to " + destination, e);
|
||||
}
|
||||
|
@ -0,0 +1,154 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.gradle.precommit;
|
||||
|
||||
import de.thetaphi.forbiddenapis.cli.CliMain;
|
||||
import org.gradle.api.Action;
|
||||
import org.gradle.api.DefaultTask;
|
||||
import org.gradle.api.JavaVersion;
|
||||
import org.gradle.api.file.FileCollection;
|
||||
import org.gradle.api.tasks.Input;
|
||||
import org.gradle.api.tasks.InputFiles;
|
||||
import org.gradle.api.tasks.OutputFile;
|
||||
import org.gradle.api.tasks.SkipWhenEmpty;
|
||||
import org.gradle.api.tasks.TaskAction;
|
||||
import org.gradle.process.JavaExecSpec;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
public class ForbiddenApisCliTask extends DefaultTask {
|
||||
|
||||
private FileCollection signaturesFiles;
|
||||
private List<String> signatures = new ArrayList<>();
|
||||
private Set<String> bundledSignatures = new LinkedHashSet<>();
|
||||
private Set<String> suppressAnnotations = new LinkedHashSet<>();
|
||||
private JavaVersion targetCompatibility;
|
||||
private FileCollection classesDirs;
|
||||
private Action<JavaExecSpec> execAction;
|
||||
|
||||
public JavaVersion getTargetCompatibility() {
|
||||
return targetCompatibility;
|
||||
}
|
||||
|
||||
public void setTargetCompatibility(JavaVersion targetCompatibility) {
|
||||
this.targetCompatibility = targetCompatibility;
|
||||
}
|
||||
|
||||
public Action<JavaExecSpec> getExecAction() {
|
||||
return execAction;
|
||||
}
|
||||
|
||||
public void setExecAction(Action<JavaExecSpec> execAction) {
|
||||
this.execAction = execAction;
|
||||
}
|
||||
|
||||
@OutputFile
|
||||
public File getMarkerFile() {
|
||||
return new File(
|
||||
new File(getProject().getBuildDir(), "precommit"),
|
||||
getName()
|
||||
);
|
||||
}
|
||||
|
||||
@InputFiles
|
||||
@SkipWhenEmpty
|
||||
public FileCollection getClassesDirs() {
|
||||
return classesDirs.filter(File::exists);
|
||||
}
|
||||
|
||||
public void setClassesDirs(FileCollection classesDirs) {
|
||||
this.classesDirs = classesDirs;
|
||||
}
|
||||
|
||||
@InputFiles
|
||||
public FileCollection getSignaturesFiles() {
|
||||
return signaturesFiles;
|
||||
}
|
||||
|
||||
public void setSignaturesFiles(FileCollection signaturesFiles) {
|
||||
this.signaturesFiles = signaturesFiles;
|
||||
}
|
||||
|
||||
@Input
|
||||
public List<String> getSignatures() {
|
||||
return signatures;
|
||||
}
|
||||
|
||||
public void setSignatures(List<String> signatures) {
|
||||
this.signatures = signatures;
|
||||
}
|
||||
|
||||
@Input
|
||||
public Set<String> getBundledSignatures() {
|
||||
return bundledSignatures;
|
||||
}
|
||||
|
||||
public void setBundledSignatures(Set<String> bundledSignatures) {
|
||||
this.bundledSignatures = bundledSignatures;
|
||||
}
|
||||
|
||||
@Input
|
||||
public Set<String> getSuppressAnnotations() {
|
||||
return suppressAnnotations;
|
||||
}
|
||||
|
||||
public void setSuppressAnnotations(Set<String> suppressAnnotations) {
|
||||
this.suppressAnnotations = suppressAnnotations;
|
||||
}
|
||||
|
||||
@TaskAction
|
||||
public void runForbiddenApisAndWriteMarker() throws IOException {
|
||||
getProject().javaexec((JavaExecSpec spec) -> {
|
||||
execAction.execute(spec);
|
||||
spec.setMain(CliMain.class.getName());
|
||||
// build the command line
|
||||
getSignaturesFiles().forEach(file -> spec.args("-f", file.getAbsolutePath()));
|
||||
getSuppressAnnotations().forEach(annotation -> spec.args("--suppressannotation", annotation));
|
||||
getBundledSignatures().forEach(bundled -> {
|
||||
// there's no option for target compatibility so we have to interpret it
|
||||
final String prefix;
|
||||
if (bundled.equals("jdk-system-out") ||
|
||||
bundled.equals("jdk-reflection") ||
|
||||
bundled.equals("jdk-non-portable")) {
|
||||
prefix = "";
|
||||
} else {
|
||||
prefix = "-" + (
|
||||
getTargetCompatibility().compareTo(JavaVersion.VERSION_1_9) >= 0 ?
|
||||
getTargetCompatibility().getMajorVersion() :
|
||||
"1." + getTargetCompatibility().getMajorVersion())
|
||||
;
|
||||
}
|
||||
spec.args("-b", bundled + prefix);
|
||||
}
|
||||
);
|
||||
getClassesDirs().forEach(dir ->
|
||||
spec.args("-d", dir)
|
||||
);
|
||||
});
|
||||
Files.write(getMarkerFile().toPath(), Collections.emptyList());
|
||||
}
|
||||
|
||||
}
|
@ -16,8 +16,6 @@
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
import org.elasticsearch.gradle.test.RestIntegTestTask
|
||||
import org.gradle.api.internal.provider.Providers
|
||||
|
||||
@ -47,13 +45,13 @@ dependencies {
|
||||
* Everything in the "shadow" configuration is *not* copied into the
|
||||
* shadowJar.
|
||||
*/
|
||||
shadow "org.elasticsearch:elasticsearch:${version}"
|
||||
shadow "org.elasticsearch.client:elasticsearch-rest-client:${version}"
|
||||
shadow "org.elasticsearch.plugin:parent-join-client:${version}"
|
||||
shadow "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}"
|
||||
shadow "org.elasticsearch.plugin:rank-eval-client:${version}"
|
||||
shadow "org.elasticsearch.plugin:lang-mustache-client:${version}"
|
||||
compile project(':x-pack:protocol')
|
||||
compile "org.elasticsearch:elasticsearch:${version}"
|
||||
compile "org.elasticsearch.client:elasticsearch-rest-client:${version}"
|
||||
compile "org.elasticsearch.plugin:parent-join-client:${version}"
|
||||
compile "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}"
|
||||
compile "org.elasticsearch.plugin:rank-eval-client:${version}"
|
||||
compile "org.elasticsearch.plugin:lang-mustache-client:${version}"
|
||||
bundle project(':x-pack:protocol')
|
||||
|
||||
testCompile "org.elasticsearch.client:test:${version}"
|
||||
testCompile "org.elasticsearch.test:framework:${version}"
|
||||
@ -75,8 +73,8 @@ dependencyLicenses {
|
||||
forbiddenApisMain {
|
||||
// core does not depend on the httpclient for compile so we add the signatures here. We don't add them for test as they are already
|
||||
// specified
|
||||
signaturesURLs += [PrecommitTasks.getResource('/forbidden/http-signatures.txt')]
|
||||
signaturesURLs += [file('src/main/resources/forbidden/rest-high-level-signatures.txt').toURI().toURL()]
|
||||
addSignatureFiles 'http-signatures'
|
||||
signaturesFiles += files('src/main/resources/forbidden/rest-high-level-signatures.txt')
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
|
@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest;
|
||||
import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static java.util.Collections.emptySet;
|
||||
|
||||
|
||||
public class GraphClient {
|
||||
private final RestHighLevelClient restHighLevelClient;
|
||||
|
||||
GraphClient(RestHighLevelClient restHighLevelClient) {
|
||||
this.restHighLevelClient = restHighLevelClient;
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes an exploration request using the Graph API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html">Graph API
|
||||
* on elastic.co</a>.
|
||||
*/
|
||||
public final GraphExploreResponse explore(GraphExploreRequest graphExploreRequest,
|
||||
RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(graphExploreRequest, RequestConverters::xPackGraphExplore,
|
||||
options, GraphExploreResponse::fromXContext, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously executes an exploration request using the Graph API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html">Graph API
|
||||
* on elastic.co</a>.
|
||||
*/
|
||||
public final void exploreAsync(GraphExploreRequest graphExploreRequest,
|
||||
RequestOptions options,
|
||||
ActionListener<GraphExploreResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(graphExploreRequest, RequestConverters::xPackGraphExplore,
|
||||
options, GraphExploreResponse::fromXContext, listener, emptySet());
|
||||
}
|
||||
|
||||
}
|
@ -20,10 +20,15 @@
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.client.methods.HttpDelete;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.elasticsearch.client.RequestConverters.EndpointBuilder;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.protocol.xpack.ml.CloseJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.GetJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.GetBucketsRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.OpenJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.PutJobRequest;
|
||||
|
||||
@ -48,6 +53,23 @@ final class MLRequestConverters {
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request getJob(GetJobRequest getJobRequest) {
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack")
|
||||
.addPathPartAsIs("ml")
|
||||
.addPathPartAsIs("anomaly_detectors")
|
||||
.addPathPart(Strings.collectionToCommaDelimitedString(getJobRequest.getJobIds()))
|
||||
.build();
|
||||
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
|
||||
|
||||
RequestConverters.Params params = new RequestConverters.Params(request);
|
||||
if (getJobRequest.isAllowNoJobs() != null) {
|
||||
params.putParam("allow_no_jobs", Boolean.toString(getJobRequest.isAllowNoJobs()));
|
||||
}
|
||||
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request openJob(OpenJobRequest openJobRequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack")
|
||||
@ -57,7 +79,20 @@ final class MLRequestConverters {
|
||||
.addPathPartAsIs("_open")
|
||||
.build();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
|
||||
request.setJsonEntity(openJobRequest.toString());
|
||||
request.setEntity(createEntity(openJobRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request closeJob(CloseJobRequest closeJobRequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack")
|
||||
.addPathPartAsIs("ml")
|
||||
.addPathPartAsIs("anomaly_detectors")
|
||||
.addPathPart(Strings.collectionToCommaDelimitedString(closeJobRequest.getJobIds()))
|
||||
.addPathPartAsIs("_close")
|
||||
.build();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
|
||||
request.setEntity(createEntity(closeJobRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
@ -75,4 +110,18 @@ final class MLRequestConverters {
|
||||
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request getBuckets(GetBucketsRequest getBucketsRequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack")
|
||||
.addPathPartAsIs("ml")
|
||||
.addPathPartAsIs("anomaly_detectors")
|
||||
.addPathPart(getBucketsRequest.getJobId())
|
||||
.addPathPartAsIs("results")
|
||||
.addPathPartAsIs("buckets")
|
||||
.build();
|
||||
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
|
||||
request.setEntity(createEntity(getBucketsRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
}
|
||||
|
@ -19,8 +19,14 @@
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.protocol.xpack.ml.CloseJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.CloseJobResponse;
|
||||
import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse;
|
||||
import org.elasticsearch.protocol.xpack.ml.GetBucketsRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.GetBucketsResponse;
|
||||
import org.elasticsearch.protocol.xpack.ml.GetJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.GetJobResponse;
|
||||
import org.elasticsearch.protocol.xpack.ml.OpenJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.OpenJobResponse;
|
||||
import org.elasticsearch.protocol.xpack.ml.PutJobRequest;
|
||||
@ -50,7 +56,7 @@ public final class MachineLearningClient {
|
||||
* For additional info
|
||||
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html">ML PUT job documentation</a>
|
||||
*
|
||||
* @param request the PutJobRequest containing the {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} settings
|
||||
* @param request The PutJobRequest containing the {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} settings
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return PutJobResponse with enclosed {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} object
|
||||
* @throws IOException when there is a serialization issue sending the request or receiving the response
|
||||
@ -69,7 +75,7 @@ public final class MachineLearningClient {
|
||||
* For additional info
|
||||
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html">ML PUT job documentation</a>
|
||||
*
|
||||
* @param request the request containing the {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} settings
|
||||
* @param request The request containing the {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} settings
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener Listener to be notified upon request completion
|
||||
*/
|
||||
@ -82,13 +88,54 @@ public final class MachineLearningClient {
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets one or more Machine Learning job configuration info.
|
||||
*
|
||||
* <p>
|
||||
* For additional info
|
||||
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job.html"></a>
|
||||
* </p>
|
||||
* @param request {@link GetJobRequest} Request containing a list of jobId(s) and additional options
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return {@link GetJobResponse} response object containing
|
||||
* the {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} objects and the number of jobs found
|
||||
* @throws IOException when there is a serialization issue sending the request or receiving the response
|
||||
*/
|
||||
public GetJobResponse getJob(GetJobRequest request, RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(request,
|
||||
MLRequestConverters::getJob,
|
||||
options,
|
||||
GetJobResponse::fromXContent,
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets one or more Machine Learning job configuration info, asynchronously.
|
||||
*
|
||||
* <p>
|
||||
* For additional info
|
||||
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job.html"></a>
|
||||
* </p>
|
||||
* @param request {@link GetJobRequest} Request containing a list of jobId(s) and additional options
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener Listener to be notified with {@link GetJobResponse} upon request completion
|
||||
*/
|
||||
public void getJobAsync(GetJobRequest request, RequestOptions options, ActionListener<GetJobResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(request,
|
||||
MLRequestConverters::getJob,
|
||||
options,
|
||||
GetJobResponse::fromXContent,
|
||||
listener,
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the given Machine Learning Job
|
||||
* <p>
|
||||
* For additional info
|
||||
* see <a href="http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html">ML Delete Job documentation</a>
|
||||
* </p>
|
||||
* @param request the request to delete the job
|
||||
* @param request The request to delete the job
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return action acknowledgement
|
||||
* @throws IOException when there is a serialization issue sending the request or receiving the response
|
||||
@ -107,7 +154,7 @@ public final class MachineLearningClient {
|
||||
* For additional info
|
||||
* see <a href="http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html">ML Delete Job documentation</a>
|
||||
* </p>
|
||||
* @param request the request to delete the job
|
||||
* @param request The request to delete the job
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener Listener to be notified upon request completion
|
||||
*/
|
||||
@ -131,7 +178,7 @@ public final class MachineLearningClient {
|
||||
* For additional info
|
||||
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html"></a>
|
||||
* </p>
|
||||
* @param request request containing job_id and additional optional options
|
||||
* @param request Request containing job_id and additional optional options
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return response containing if the job was successfully opened or not.
|
||||
* @throws IOException when there is a serialization issue sending the request or receiving the response
|
||||
@ -154,7 +201,7 @@ public final class MachineLearningClient {
|
||||
* For additional info
|
||||
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html"></a>
|
||||
* </p>
|
||||
* @param request request containing job_id and additional optional options
|
||||
* @param request Request containing job_id and additional optional options
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener Listener to be notified upon request completion
|
||||
*/
|
||||
@ -166,4 +213,76 @@ public final class MachineLearningClient {
|
||||
listener,
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Closes one or more Machine Learning Jobs. A job can be opened and closed multiple times throughout its lifecycle.
|
||||
*
|
||||
* A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results.
|
||||
*
|
||||
* @param request Request containing job_ids and additional options. See {@link CloseJobRequest}
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return response containing if the job was successfully closed or not.
|
||||
* @throws IOException when there is a serialization issue sending the request or receiving the response
|
||||
*/
|
||||
public CloseJobResponse closeJob(CloseJobRequest request, RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(request,
|
||||
MLRequestConverters::closeJob,
|
||||
options,
|
||||
CloseJobResponse::fromXContent,
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Closes one or more Machine Learning Jobs asynchronously, notifies listener on completion
|
||||
*
|
||||
* A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results.
|
||||
*
|
||||
* @param request Request containing job_ids and additional options. See {@link CloseJobRequest}
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener Listener to be notified upon request completion
|
||||
*/
|
||||
public void closeJobAsync(CloseJobRequest request, RequestOptions options, ActionListener<CloseJobResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(request,
|
||||
MLRequestConverters::closeJob,
|
||||
options,
|
||||
CloseJobResponse::fromXContent,
|
||||
listener,
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the buckets for a Machine Learning Job.
|
||||
* <p>
|
||||
* For additional info
|
||||
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-bucket.html">ML GET buckets documentation</a>
|
||||
*
|
||||
* @param request The request
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
*/
|
||||
public GetBucketsResponse getBuckets(GetBucketsRequest request, RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(request,
|
||||
MLRequestConverters::getBuckets,
|
||||
options,
|
||||
GetBucketsResponse::fromXContent,
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the buckets for a Machine Learning Job, notifies listener once the requested buckets are retrieved.
|
||||
* <p>
|
||||
* For additional info
|
||||
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-bucket.html">ML GET buckets documentation</a>
|
||||
*
|
||||
* @param request The request
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener Listener to be notified upon request completion
|
||||
*/
|
||||
public void getBucketsAsync(GetBucketsRequest request, RequestOptions options, ActionListener<GetBucketsResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(request,
|
||||
MLRequestConverters::getBuckets,
|
||||
options,
|
||||
GetBucketsResponse::fromXContent,
|
||||
listener,
|
||||
Collections.emptySet());
|
||||
}
|
||||
}
|
||||
|
@ -114,6 +114,7 @@ import org.elasticsearch.protocol.xpack.license.PutLicenseRequest;
|
||||
import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
|
||||
import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest;
|
||||
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||
import org.elasticsearch.script.mustache.MultiSearchTemplateRequest;
|
||||
import org.elasticsearch.script.mustache.SearchTemplateRequest;
|
||||
@ -1124,6 +1125,13 @@ final class RequestConverters {
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request xPackGraphExplore(GraphExploreRequest exploreRequest) throws IOException {
|
||||
String endpoint = endpoint(exploreRequest.indices(), exploreRequest.types(), "_xpack/graph/_explore");
|
||||
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
|
||||
request.setEntity(createEntity(exploreRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request xPackWatcherPutWatch(PutWatchRequest putWatchRequest) {
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack")
|
||||
|
@ -209,6 +209,7 @@ public class RestHighLevelClient implements Closeable {
|
||||
private final TasksClient tasksClient = new TasksClient(this);
|
||||
private final XPackClient xPackClient = new XPackClient(this);
|
||||
private final WatcherClient watcherClient = new WatcherClient(this);
|
||||
private final GraphClient graphClient = new GraphClient(this);
|
||||
private final LicenseClient licenseClient = new LicenseClient(this);
|
||||
private final MigrationClient migrationClient = new MigrationClient(this);
|
||||
private final MachineLearningClient machineLearningClient = new MachineLearningClient(this);
|
||||
@ -324,6 +325,16 @@ public class RestHighLevelClient implements Closeable {
|
||||
* Watcher APIs on elastic.co</a> for more information.
|
||||
*/
|
||||
public WatcherClient watcher() { return watcherClient; }
|
||||
|
||||
/**
|
||||
* Provides methods for accessing the Elastic Licensed Graph explore API that
|
||||
* is shipped with the default distribution of Elasticsearch. All of
|
||||
* these APIs will 404 if run against the OSS distribution of Elasticsearch.
|
||||
* <p>
|
||||
* See the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html">
|
||||
* Graph API on elastic.co</a> for more information.
|
||||
*/
|
||||
public GraphClient graph() { return graphClient; }
|
||||
|
||||
/**
|
||||
* Provides methods for accessing the Elastic Licensed Licensing APIs that
|
||||
@ -949,6 +960,11 @@ public class RestHighLevelClient implements Closeable {
|
||||
FieldCapabilitiesResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation
|
||||
* layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}.
|
||||
*/
|
||||
@Deprecated
|
||||
protected final <Req extends ActionRequest, Resp> Resp performRequestAndParseEntity(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
@ -958,15 +974,58 @@ public class RestHighLevelClient implements Closeable {
|
||||
response -> parseEntity(response.getEntity(), entityParser), ignores);
|
||||
}
|
||||
|
||||
/**
|
||||
* Defines a helper method for performing a request and then parsing the returned entity using the provided entityParser.
|
||||
*/
|
||||
protected final <Req extends Validatable, Resp> Resp performRequestAndParseEntity(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
CheckedFunction<XContentParser, Resp, IOException> entityParser,
|
||||
Set<Integer> ignores) throws IOException {
|
||||
return performRequest(request, requestConverter, options,
|
||||
response -> parseEntity(response.getEntity(), entityParser), ignores);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation
|
||||
* layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}.
|
||||
*/
|
||||
@Deprecated
|
||||
protected final <Req extends ActionRequest, Resp> Resp performRequest(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
Set<Integer> ignores) throws IOException {
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
Set<Integer> ignores) throws IOException {
|
||||
ActionRequestValidationException validationException = request.validate();
|
||||
if (validationException != null) {
|
||||
if (validationException != null && validationException.validationErrors().isEmpty() == false) {
|
||||
throw validationException;
|
||||
}
|
||||
return internalPerformRequest(request, requestConverter, options, responseConverter, ignores);
|
||||
}
|
||||
|
||||
/**
|
||||
* Defines a helper method for performing a request.
|
||||
*/
|
||||
protected final <Req extends Validatable, Resp> Resp performRequest(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
Set<Integer> ignores) throws IOException {
|
||||
ValidationException validationException = request.validate();
|
||||
if (validationException != null && validationException.validationErrors().isEmpty() == false) {
|
||||
throw validationException;
|
||||
}
|
||||
return internalPerformRequest(request, requestConverter, options, responseConverter, ignores);
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides common functionality for performing a request.
|
||||
*/
|
||||
private <Req, Resp> Resp internalPerformRequest(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
Set<Integer> ignores) throws IOException {
|
||||
Request req = requestConverter.apply(request);
|
||||
req.setOptions(options);
|
||||
Response response;
|
||||
@ -994,25 +1053,75 @@ public class RestHighLevelClient implements Closeable {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation
|
||||
* layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}.
|
||||
*/
|
||||
@Deprecated
|
||||
protected final <Req extends ActionRequest, Resp> void performRequestAsyncAndParseEntity(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
CheckedFunction<XContentParser, Resp, IOException> entityParser,
|
||||
ActionListener<Resp> listener, Set<Integer> ignores) {
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
CheckedFunction<XContentParser, Resp, IOException> entityParser,
|
||||
ActionListener<Resp> listener, Set<Integer> ignores) {
|
||||
performRequestAsync(request, requestConverter, options,
|
||||
response -> parseEntity(response.getEntity(), entityParser), listener, ignores);
|
||||
}
|
||||
|
||||
/**
|
||||
* Defines a helper method for asynchronously performing a request.
|
||||
*/
|
||||
protected final <Req extends Validatable, Resp> void performRequestAsyncAndParseEntity(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
CheckedFunction<XContentParser, Resp, IOException> entityParser,
|
||||
ActionListener<Resp> listener, Set<Integer> ignores) {
|
||||
performRequestAsync(request, requestConverter, options,
|
||||
response -> parseEntity(response.getEntity(), entityParser), listener, ignores);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation
|
||||
* layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}.
|
||||
*/
|
||||
@Deprecated
|
||||
protected final <Req extends ActionRequest, Resp> void performRequestAsync(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
ActionListener<Resp> listener, Set<Integer> ignores) {
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
ActionListener<Resp> listener, Set<Integer> ignores) {
|
||||
ActionRequestValidationException validationException = request.validate();
|
||||
if (validationException != null) {
|
||||
if (validationException != null && validationException.validationErrors().isEmpty() == false) {
|
||||
listener.onFailure(validationException);
|
||||
return;
|
||||
}
|
||||
internalPerformRequestAsync(request, requestConverter, options, responseConverter, listener, ignores);
|
||||
}
|
||||
|
||||
/**
|
||||
* Defines a helper method for asynchronously performing a request.
|
||||
*/
|
||||
protected final <Req extends Validatable, Resp> void performRequestAsync(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
ActionListener<Resp> listener, Set<Integer> ignores) {
|
||||
ValidationException validationException = request.validate();
|
||||
if (validationException != null && validationException.validationErrors().isEmpty() == false) {
|
||||
listener.onFailure(validationException);
|
||||
return;
|
||||
}
|
||||
internalPerformRequestAsync(request, requestConverter, options, responseConverter, listener, ignores);
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides common functionality for asynchronously performing a request.
|
||||
*/
|
||||
private <Req, Resp> void internalPerformRequestAsync(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
ActionListener<Resp> listener, Set<Integer> ignores) {
|
||||
Request req;
|
||||
try {
|
||||
req = requestConverter.apply(request);
|
||||
@ -1026,6 +1135,7 @@ public class RestHighLevelClient implements Closeable {
|
||||
client.performRequestAsync(req, responseListener);
|
||||
}
|
||||
|
||||
|
||||
final <Resp> ResponseListener wrapResponseListener(CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
ActionListener<Resp> actionListener, Set<Integer> ignores) {
|
||||
return new ResponseListener() {
|
||||
|
@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client;
|
||||
|
||||
/**
|
||||
* Defines a validation layer for Requests.
|
||||
*/
|
||||
public interface Validatable {
|
||||
ValidationException EMPTY_VALIDATION = new ValidationException() {
|
||||
@Override
|
||||
public void addValidationError(String error) {
|
||||
throw new UnsupportedOperationException("Validation messages should not be added to the empty validation");
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Perform validation. This method does not have to be overridden in the event that no validation needs to be done.
|
||||
*
|
||||
* @return potentially null, in the event of older actions, an empty {@link ValidationException} in newer actions, or finally a
|
||||
* {@link ValidationException} that contains a list of all failed validation.
|
||||
*/
|
||||
default ValidationException validate() {
|
||||
return EMPTY_VALIDATION;
|
||||
}
|
||||
}
|
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Encapsulates an accumulation of validation errors
|
||||
*/
|
||||
public class ValidationException extends IllegalArgumentException {
|
||||
private final List<String> validationErrors = new ArrayList<>();
|
||||
|
||||
/**
|
||||
* Add a new validation error to the accumulating validation errors
|
||||
* @param error the error to add
|
||||
*/
|
||||
public void addValidationError(String error) {
|
||||
validationErrors.add(error);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the validation errors accumulated
|
||||
*/
|
||||
public final List<String> validationErrors() {
|
||||
return validationErrors;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final String getMessage() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("Validation Failed: ");
|
||||
int index = 0;
|
||||
for (String error : validationErrors) {
|
||||
sb.append(++index).append(": ").append(error).append(";");
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
@ -0,0 +1,139 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.TermQueryBuilder;
|
||||
import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest;
|
||||
import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse;
|
||||
import org.elasticsearch.protocol.xpack.graph.Hop;
|
||||
import org.elasticsearch.protocol.xpack.graph.Vertex;
|
||||
import org.elasticsearch.protocol.xpack.graph.VertexRequest;
|
||||
import org.hamcrest.Matchers;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class GraphIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
@Before
|
||||
public void indexDocuments() throws IOException {
|
||||
// Create chain of doc IDs across indices 1->2->3
|
||||
Request doc1 = new Request(HttpPut.METHOD_NAME, "/index1/type/1");
|
||||
doc1.setJsonEntity("{ \"num\":[1], \"const\":\"start\"}");
|
||||
client().performRequest(doc1);
|
||||
|
||||
Request doc2 = new Request(HttpPut.METHOD_NAME, "/index2/type/1");
|
||||
doc2.setJsonEntity("{\"num\":[1,2], \"const\":\"foo\"}");
|
||||
client().performRequest(doc2);
|
||||
|
||||
Request doc3 = new Request(HttpPut.METHOD_NAME, "/index2/type/2");
|
||||
doc3.setJsonEntity("{\"num\":[2,3], \"const\":\"foo\"}");
|
||||
client().performRequest(doc3);
|
||||
|
||||
Request doc4 = new Request(HttpPut.METHOD_NAME, "/index_no_field_data/type/2");
|
||||
doc4.setJsonEntity("{\"num\":\"string\", \"const\":\"foo\"}");
|
||||
client().performRequest(doc4);
|
||||
|
||||
Request doc5 = new Request(HttpPut.METHOD_NAME, "/index_no_field_data/type/2");
|
||||
doc5.setJsonEntity("{\"num\":[2,4], \"const\":\"foo\"}");
|
||||
client().performRequest(doc5);
|
||||
|
||||
|
||||
client().performRequest(new Request(HttpPost.METHOD_NAME, "/_refresh"));
|
||||
}
|
||||
|
||||
public void testCleanExplore() throws Exception {
|
||||
GraphExploreRequest graphExploreRequest = new GraphExploreRequest();
|
||||
graphExploreRequest.indices("index1", "index2");
|
||||
graphExploreRequest.useSignificance(false);
|
||||
int numHops = 3;
|
||||
for (int i = 0; i < numHops; i++) {
|
||||
QueryBuilder guidingQuery = null;
|
||||
if (i == 0) {
|
||||
guidingQuery = new TermQueryBuilder("const.keyword", "start");
|
||||
} else if (randomBoolean()){
|
||||
guidingQuery = new TermQueryBuilder("const.keyword", "foo");
|
||||
}
|
||||
Hop hop = graphExploreRequest.createNextHop(guidingQuery);
|
||||
VertexRequest vr = hop.addVertexRequest("num");
|
||||
vr.minDocCount(1);
|
||||
}
|
||||
Map<String, Integer> expectedTermsAndDepths = new HashMap<>();
|
||||
expectedTermsAndDepths.put("1", 0);
|
||||
expectedTermsAndDepths.put("2", 1);
|
||||
expectedTermsAndDepths.put("3", 2);
|
||||
|
||||
GraphExploreResponse exploreResponse = highLevelClient().graph().explore(graphExploreRequest, RequestOptions.DEFAULT);
|
||||
Map<String, Integer> actualTermsAndDepths = new HashMap<>();
|
||||
Collection<Vertex> v = exploreResponse.getVertices();
|
||||
for (Vertex vertex : v) {
|
||||
actualTermsAndDepths.put(vertex.getTerm(), vertex.getHopDepth());
|
||||
}
|
||||
assertEquals(expectedTermsAndDepths, actualTermsAndDepths);
|
||||
assertThat(exploreResponse.isTimedOut(), Matchers.is(false));
|
||||
ShardOperationFailedException[] failures = exploreResponse.getShardFailures();
|
||||
assertThat(failures.length, Matchers.equalTo(0));
|
||||
|
||||
}
|
||||
|
||||
public void testBadExplore() throws Exception {
|
||||
//Explore indices where lack of fielddata=true on one index leads to partial failures
|
||||
GraphExploreRequest graphExploreRequest = new GraphExploreRequest();
|
||||
graphExploreRequest.indices("index1", "index2", "index_no_field_data");
|
||||
graphExploreRequest.useSignificance(false);
|
||||
int numHops = 3;
|
||||
for (int i = 0; i < numHops; i++) {
|
||||
QueryBuilder guidingQuery = null;
|
||||
if (i == 0) {
|
||||
guidingQuery = new TermQueryBuilder("const.keyword", "start");
|
||||
} else if (randomBoolean()){
|
||||
guidingQuery = new TermQueryBuilder("const.keyword", "foo");
|
||||
}
|
||||
Hop hop = graphExploreRequest.createNextHop(guidingQuery);
|
||||
VertexRequest vr = hop.addVertexRequest("num");
|
||||
vr.minDocCount(1);
|
||||
}
|
||||
Map<String, Integer> expectedTermsAndDepths = new HashMap<>();
|
||||
expectedTermsAndDepths.put("1", 0);
|
||||
expectedTermsAndDepths.put("2", 1);
|
||||
expectedTermsAndDepths.put("3", 2);
|
||||
|
||||
GraphExploreResponse exploreResponse = highLevelClient().graph().explore(graphExploreRequest, RequestOptions.DEFAULT);
|
||||
Map<String, Integer> actualTermsAndDepths = new HashMap<>();
|
||||
Collection<Vertex> v = exploreResponse.getVertices();
|
||||
for (Vertex vertex : v) {
|
||||
actualTermsAndDepths.put(vertex.getTerm(), vertex.getHopDepth());
|
||||
}
|
||||
assertEquals(expectedTermsAndDepths, actualTermsAndDepths);
|
||||
assertThat(exploreResponse.isTimedOut(), Matchers.is(false));
|
||||
ShardOperationFailedException[] failures = exploreResponse.getShardFailures();
|
||||
assertThat(failures.length, Matchers.equalTo(1));
|
||||
assertTrue(failures[0].reason().contains("Fielddata is disabled"));
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -20,16 +20,22 @@
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.client.methods.HttpDelete;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.protocol.xpack.ml.CloseJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.GetBucketsRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.GetJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.OpenJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.PutJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.job.config.AnalysisConfig;
|
||||
import org.elasticsearch.protocol.xpack.ml.job.config.Detector;
|
||||
import org.elasticsearch.protocol.xpack.ml.job.config.Job;
|
||||
import org.elasticsearch.protocol.xpack.ml.job.util.PageParams;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
@ -46,6 +52,7 @@ public class MLRequestConvertersTests extends ESTestCase {
|
||||
|
||||
Request request = MLRequestConverters.putJob(putJobRequest);
|
||||
|
||||
assertEquals(HttpPut.METHOD_NAME, request.getMethod());
|
||||
assertThat(request.getEndpoint(), equalTo("/_xpack/ml/anomaly_detectors/foo"));
|
||||
try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) {
|
||||
Job parsedJob = Job.PARSER.apply(parser, null).build();
|
||||
@ -53,6 +60,23 @@ public class MLRequestConvertersTests extends ESTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetJob() {
|
||||
GetJobRequest getJobRequest = new GetJobRequest();
|
||||
|
||||
Request request = MLRequestConverters.getJob(getJobRequest);
|
||||
|
||||
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/_xpack/ml/anomaly_detectors", request.getEndpoint());
|
||||
assertFalse(request.getParameters().containsKey("allow_no_jobs"));
|
||||
|
||||
getJobRequest = new GetJobRequest("job1", "jobs*");
|
||||
getJobRequest.setAllowNoJobs(true);
|
||||
request = MLRequestConverters.getJob(getJobRequest);
|
||||
|
||||
assertEquals("/_xpack/ml/anomaly_detectors/job1,jobs*", request.getEndpoint());
|
||||
assertEquals(Boolean.toString(true), request.getParameters().get("allow_no_jobs"));
|
||||
}
|
||||
|
||||
public void testOpenJob() throws Exception {
|
||||
String jobId = "some-job-id";
|
||||
OpenJobRequest openJobRequest = new OpenJobRequest(jobId);
|
||||
@ -61,9 +85,27 @@ public class MLRequestConvertersTests extends ESTestCase {
|
||||
Request request = MLRequestConverters.openJob(openJobRequest);
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_open", request.getEndpoint());
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
||||
request.getEntity().writeTo(bos);
|
||||
assertEquals(bos.toString("UTF-8"), "{\"job_id\":\""+ jobId +"\",\"timeout\":\"10m\"}");
|
||||
assertEquals(requestEntityToString(request), "{\"job_id\":\""+ jobId +"\",\"timeout\":\"10m\"}");
|
||||
}
|
||||
|
||||
public void testCloseJob() throws Exception {
|
||||
String jobId = "somejobid";
|
||||
CloseJobRequest closeJobRequest = new CloseJobRequest(jobId);
|
||||
|
||||
Request request = MLRequestConverters.closeJob(closeJobRequest);
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_close", request.getEndpoint());
|
||||
assertEquals("{\"job_id\":\"somejobid\"}", requestEntityToString(request));
|
||||
|
||||
closeJobRequest = new CloseJobRequest(jobId, "otherjobs*");
|
||||
closeJobRequest.setForce(true);
|
||||
closeJobRequest.setAllowNoJobs(false);
|
||||
closeJobRequest.setTimeout(TimeValue.timeValueMinutes(10));
|
||||
request = MLRequestConverters.closeJob(closeJobRequest);
|
||||
|
||||
assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + ",otherjobs*/_close", request.getEndpoint());
|
||||
assertEquals("{\"job_id\":\"somejobid,otherjobs*\",\"timeout\":\"10m\",\"force\":true,\"allow_no_jobs\":false}",
|
||||
requestEntityToString(request));
|
||||
}
|
||||
|
||||
public void testDeleteJob() {
|
||||
@ -80,6 +122,23 @@ public class MLRequestConvertersTests extends ESTestCase {
|
||||
assertEquals(Boolean.toString(true), request.getParameters().get("force"));
|
||||
}
|
||||
|
||||
public void testGetBuckets() throws IOException {
|
||||
String jobId = randomAlphaOfLength(10);
|
||||
GetBucketsRequest getBucketsRequest = new GetBucketsRequest(jobId);
|
||||
getBucketsRequest.setPageParams(new PageParams(100, 300));
|
||||
getBucketsRequest.setAnomalyScore(75.0);
|
||||
getBucketsRequest.setSort("anomaly_score");
|
||||
getBucketsRequest.setDescending(true);
|
||||
|
||||
Request request = MLRequestConverters.getBuckets(getBucketsRequest);
|
||||
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/results/buckets", request.getEndpoint());
|
||||
try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) {
|
||||
GetBucketsRequest parsedRequest = GetBucketsRequest.PARSER.apply(parser, null);
|
||||
assertThat(parsedRequest, equalTo(getBucketsRequest));
|
||||
}
|
||||
}
|
||||
|
||||
private static Job createValidJob(String jobId) {
|
||||
AnalysisConfig.Builder analysisConfig = AnalysisConfig.builder(Collections.singletonList(
|
||||
Detector.builder().setFunction("count").build()));
|
||||
@ -87,4 +146,10 @@ public class MLRequestConvertersTests extends ESTestCase {
|
||||
jobBuilder.setAnalysisConfig(analysisConfig);
|
||||
return jobBuilder.build();
|
||||
}
|
||||
}
|
||||
|
||||
private static String requestEntityToString(Request request) throws Exception {
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
||||
request.getEntity().writeTo(bos);
|
||||
return bos.toString("UTF-8");
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,217 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.protocol.xpack.ml.GetBucketsRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.GetBucketsResponse;
|
||||
import org.elasticsearch.protocol.xpack.ml.PutJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.job.config.Job;
|
||||
import org.elasticsearch.protocol.xpack.ml.job.results.Bucket;
|
||||
import org.elasticsearch.protocol.xpack.ml.job.util.PageParams;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
|
||||
public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
private static final String RESULTS_INDEX = ".ml-anomalies-shared";
|
||||
private static final String DOC = "doc";
|
||||
|
||||
private static final String JOB_ID = "get-results-it-job";
|
||||
|
||||
// 2018-08-01T00:00:00Z
|
||||
private static final long START_TIME_EPOCH_MS = 1533081600000L;
|
||||
|
||||
private BucketStats bucketStats = new BucketStats();
|
||||
|
||||
@Before
|
||||
public void createJobAndIndexResults() throws IOException {
|
||||
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
|
||||
Job job = MachineLearningIT.buildJob(JOB_ID);
|
||||
machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
|
||||
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
||||
|
||||
long time = START_TIME_EPOCH_MS;
|
||||
long endTime = time + 3600000L * 24 * 10; // 10 days of hourly buckets
|
||||
while (time < endTime) {
|
||||
addBucketIndexRequest(time, false, bulkRequest);
|
||||
addRecordIndexRequests(time, false, bulkRequest);
|
||||
time += 3600000L;
|
||||
}
|
||||
|
||||
// Also index an interim bucket
|
||||
addBucketIndexRequest(time, true, bulkRequest);
|
||||
addRecordIndexRequests(time, true, bulkRequest);
|
||||
|
||||
highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT);
|
||||
}
|
||||
|
||||
private void addBucketIndexRequest(long timestamp, boolean isInterim, BulkRequest bulkRequest) {
|
||||
IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX, DOC);
|
||||
double bucketScore = randomDoubleBetween(0.0, 100.0, true);
|
||||
bucketStats.report(bucketScore);
|
||||
indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"result_type\":\"bucket\", \"timestamp\": " + timestamp + "," +
|
||||
"\"bucket_span\": 3600,\"is_interim\": " + isInterim + ", \"anomaly_score\": " + bucketScore +
|
||||
", \"bucket_influencers\":[{\"job_id\": \"" + JOB_ID + "\", \"result_type\":\"bucket_influencer\", " +
|
||||
"\"influencer_field_name\": \"bucket_time\", \"timestamp\": " + timestamp + ", \"bucket_span\": 3600, " +
|
||||
"\"is_interim\": " + isInterim + "}]}", XContentType.JSON);
|
||||
bulkRequest.add(indexRequest);
|
||||
}
|
||||
|
||||
private void addRecordIndexRequests(long timestamp, boolean isInterim, BulkRequest bulkRequest) {
|
||||
if (randomBoolean()) {
|
||||
return;
|
||||
}
|
||||
int recordCount = randomIntBetween(1, 3);
|
||||
for (int i = 0; i < recordCount; ++i) {
|
||||
IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX, DOC);
|
||||
double recordScore = randomDoubleBetween(0.0, 100.0, true);
|
||||
double p = randomDoubleBetween(0.0, 0.05, false);
|
||||
indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"result_type\":\"record\", \"timestamp\": " + timestamp + "," +
|
||||
"\"bucket_span\": 3600,\"is_interim\": " + isInterim + ", \"record_score\": " + recordScore + ", \"probability\": "
|
||||
+ p + "}", XContentType.JSON);
|
||||
bulkRequest.add(indexRequest);
|
||||
}
|
||||
}
|
||||
|
||||
@After
|
||||
public void deleteJob() throws IOException {
|
||||
new MlRestTestStateCleaner(logger, client()).clearMlMetadata();
|
||||
}
|
||||
|
||||
public void testGetBuckets() throws IOException {
|
||||
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
|
||||
|
||||
{
|
||||
GetBucketsRequest request = new GetBucketsRequest(JOB_ID);
|
||||
|
||||
GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync);
|
||||
|
||||
assertThat(response.count(), equalTo(241L));
|
||||
assertThat(response.buckets().size(), equalTo(100));
|
||||
assertThat(response.buckets().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS));
|
||||
}
|
||||
{
|
||||
GetBucketsRequest request = new GetBucketsRequest(JOB_ID);
|
||||
request.setTimestamp("1533081600000");
|
||||
|
||||
GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync);
|
||||
|
||||
assertThat(response.count(), equalTo(1L));
|
||||
assertThat(response.buckets().size(), equalTo(1));
|
||||
assertThat(response.buckets().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS));
|
||||
}
|
||||
{
|
||||
GetBucketsRequest request = new GetBucketsRequest(JOB_ID);
|
||||
request.setAnomalyScore(75.0);
|
||||
|
||||
GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync);
|
||||
|
||||
assertThat(response.count(), equalTo(bucketStats.criticalCount));
|
||||
assertThat(response.buckets().size(), equalTo((int) Math.min(100, bucketStats.criticalCount)));
|
||||
assertThat(response.buckets().stream().anyMatch(b -> b.getAnomalyScore() < 75.0), is(false));
|
||||
}
|
||||
{
|
||||
GetBucketsRequest request = new GetBucketsRequest(JOB_ID);
|
||||
request.setExcludeInterim(true);
|
||||
|
||||
GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync);
|
||||
|
||||
assertThat(response.count(), equalTo(240L));
|
||||
}
|
||||
{
|
||||
GetBucketsRequest request = new GetBucketsRequest(JOB_ID);
|
||||
request.setStart("1533081600000");
|
||||
request.setEnd("1533092400000");
|
||||
|
||||
GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync);
|
||||
|
||||
assertThat(response.count(), equalTo(3L));
|
||||
assertThat(response.buckets().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS));
|
||||
assertThat(response.buckets().get(1).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 3600000L));
|
||||
assertThat(response.buckets().get(2).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 2 * + 3600000L));
|
||||
}
|
||||
{
|
||||
GetBucketsRequest request = new GetBucketsRequest(JOB_ID);
|
||||
request.setPageParams(new PageParams(3, 3));
|
||||
|
||||
GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync);
|
||||
|
||||
assertThat(response.buckets().size(), equalTo(3));
|
||||
assertThat(response.buckets().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 3 * 3600000L));
|
||||
assertThat(response.buckets().get(1).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 4 * 3600000L));
|
||||
assertThat(response.buckets().get(2).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 5 * 3600000L));
|
||||
}
|
||||
{
|
||||
GetBucketsRequest request = new GetBucketsRequest(JOB_ID);
|
||||
request.setSort("anomaly_score");
|
||||
request.setDescending(true);
|
||||
|
||||
GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync);
|
||||
|
||||
double previousScore = 100.0;
|
||||
for (Bucket bucket : response.buckets()) {
|
||||
assertThat(bucket.getAnomalyScore(), lessThanOrEqualTo(previousScore));
|
||||
previousScore = bucket.getAnomalyScore();
|
||||
}
|
||||
}
|
||||
{
|
||||
GetBucketsRequest request = new GetBucketsRequest(JOB_ID);
|
||||
// Make sure we get all buckets
|
||||
request.setPageParams(new PageParams(0, 10000));
|
||||
request.setExpand(true);
|
||||
|
||||
GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync);
|
||||
|
||||
assertThat(response.buckets().stream().anyMatch(b -> b.getRecords().size() > 0), is(true));
|
||||
}
|
||||
}
|
||||
|
||||
private static class BucketStats {
|
||||
// score < 50.0
|
||||
private long minorCount;
|
||||
|
||||
// score < 75.0
|
||||
private long majorCount;
|
||||
|
||||
// score > 75.0
|
||||
private long criticalCount;
|
||||
|
||||
private void report(double anomalyScore) {
|
||||
if (anomalyScore < 50.0) {
|
||||
minorCount++;
|
||||
} else if (anomalyScore < 75.0) {
|
||||
majorCount++;
|
||||
} else {
|
||||
criticalCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -19,10 +19,13 @@
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator;
|
||||
import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.protocol.xpack.ml.CloseJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.CloseJobResponse;
|
||||
import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse;
|
||||
import org.elasticsearch.protocol.xpack.ml.GetJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.GetJobResponse;
|
||||
import org.elasticsearch.protocol.xpack.ml.OpenJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.OpenJobResponse;
|
||||
import org.elasticsearch.protocol.xpack.ml.PutJobRequest;
|
||||
@ -31,15 +34,25 @@ import org.elasticsearch.protocol.xpack.ml.job.config.AnalysisConfig;
|
||||
import org.elasticsearch.protocol.xpack.ml.job.config.DataDescription;
|
||||
import org.elasticsearch.protocol.xpack.ml.job.config.Detector;
|
||||
import org.elasticsearch.protocol.xpack.ml.job.config.Job;
|
||||
import org.junit.After;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.hasItems;
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32993")
|
||||
public class MachineLearningIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
@After
|
||||
public void cleanUp() throws IOException {
|
||||
new MlRestTestStateCleaner(logger, client()).clearMlMetadata();
|
||||
}
|
||||
|
||||
public void testPutJob() throws Exception {
|
||||
String jobId = randomValidJobId();
|
||||
Job job = buildJob(jobId);
|
||||
@ -52,6 +65,41 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
|
||||
assertThat(createdJob.getJobType(), is(Job.ANOMALY_DETECTOR_JOB_TYPE));
|
||||
}
|
||||
|
||||
public void testGetJob() throws Exception {
|
||||
String jobId1 = randomValidJobId();
|
||||
String jobId2 = randomValidJobId();
|
||||
|
||||
Job job1 = buildJob(jobId1);
|
||||
Job job2 = buildJob(jobId2);
|
||||
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
|
||||
machineLearningClient.putJob(new PutJobRequest(job1), RequestOptions.DEFAULT);
|
||||
machineLearningClient.putJob(new PutJobRequest(job2), RequestOptions.DEFAULT);
|
||||
|
||||
GetJobRequest request = new GetJobRequest(jobId1, jobId2);
|
||||
|
||||
// Test getting specific jobs
|
||||
GetJobResponse response = execute(request, machineLearningClient::getJob, machineLearningClient::getJobAsync);
|
||||
|
||||
assertEquals(2, response.count());
|
||||
assertThat(response.jobs(), hasSize(2));
|
||||
assertThat(response.jobs().stream().map(Job::getId).collect(Collectors.toList()), containsInAnyOrder(jobId1, jobId2));
|
||||
|
||||
// Test getting all jobs explicitly
|
||||
request = GetJobRequest.getAllJobsRequest();
|
||||
response = execute(request, machineLearningClient::getJob, machineLearningClient::getJobAsync);
|
||||
|
||||
assertTrue(response.count() >= 2L);
|
||||
assertTrue(response.jobs().size() >= 2L);
|
||||
assertThat(response.jobs().stream().map(Job::getId).collect(Collectors.toList()), hasItems(jobId1, jobId2));
|
||||
|
||||
// Test getting all jobs implicitly
|
||||
response = execute(new GetJobRequest(), machineLearningClient::getJob, machineLearningClient::getJobAsync);
|
||||
|
||||
assertTrue(response.count() >= 2L);
|
||||
assertTrue(response.jobs().size() >= 2L);
|
||||
assertThat(response.jobs().stream().map(Job::getId).collect(Collectors.toList()), hasItems(jobId1, jobId2));
|
||||
}
|
||||
|
||||
public void testDeleteJob() throws Exception {
|
||||
String jobId = randomValidJobId();
|
||||
Job job = buildJob(jobId);
|
||||
@ -77,6 +125,19 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
|
||||
assertTrue(response.isOpened());
|
||||
}
|
||||
|
||||
public void testCloseJob() throws Exception {
|
||||
String jobId = randomValidJobId();
|
||||
Job job = buildJob(jobId);
|
||||
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
|
||||
machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
|
||||
machineLearningClient.openJob(new OpenJobRequest(jobId), RequestOptions.DEFAULT);
|
||||
|
||||
CloseJobResponse response = execute(new CloseJobRequest(jobId),
|
||||
machineLearningClient::closeJob,
|
||||
machineLearningClient::closeJobAsync);
|
||||
assertTrue(response.isClosed());
|
||||
}
|
||||
|
||||
public static String randomValidJobId() {
|
||||
CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz0123456789".toCharArray());
|
||||
return generator.ofCodePointsLength(random(), 10, 10);
|
||||
|
@ -0,0 +1,109 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* This is temporarily duplicated from the server side.
|
||||
* @TODO Replace with an implementation using the HLRC once
|
||||
* the APIs for managing datafeeds are implemented.
|
||||
*/
|
||||
public class MlRestTestStateCleaner {
|
||||
|
||||
private final Logger logger;
|
||||
private final RestClient adminClient;
|
||||
|
||||
public MlRestTestStateCleaner(Logger logger, RestClient adminClient) {
|
||||
this.logger = logger;
|
||||
this.adminClient = adminClient;
|
||||
}
|
||||
|
||||
public void clearMlMetadata() throws IOException {
|
||||
deleteAllDatafeeds();
|
||||
deleteAllJobs();
|
||||
// indices will be deleted by the ESRestTestCase class
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void deleteAllDatafeeds() throws IOException {
|
||||
final Request datafeedsRequest = new Request("GET", "/_xpack/ml/datafeeds");
|
||||
datafeedsRequest.addParameter("filter_path", "datafeeds");
|
||||
final Response datafeedsResponse = adminClient.performRequest(datafeedsRequest);
|
||||
final List<Map<String, Object>> datafeeds =
|
||||
(List<Map<String, Object>>) XContentMapValues.extractValue("datafeeds", ESRestTestCase.entityAsMap(datafeedsResponse));
|
||||
if (datafeeds == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
adminClient.performRequest(new Request("POST", "/_xpack/ml/datafeeds/_all/_stop"));
|
||||
} catch (Exception e1) {
|
||||
logger.warn("failed to stop all datafeeds. Forcing stop", e1);
|
||||
try {
|
||||
adminClient.performRequest(new Request("POST", "/_xpack/ml/datafeeds/_all/_stop?force=true"));
|
||||
} catch (Exception e2) {
|
||||
logger.warn("Force-closing all data feeds failed", e2);
|
||||
}
|
||||
throw new RuntimeException(
|
||||
"Had to resort to force-stopping datafeeds, something went wrong?", e1);
|
||||
}
|
||||
|
||||
for (Map<String, Object> datafeed : datafeeds) {
|
||||
String datafeedId = (String) datafeed.get("datafeed_id");
|
||||
adminClient.performRequest(new Request("DELETE", "/_xpack/ml/datafeeds/" + datafeedId));
|
||||
}
|
||||
}
|
||||
|
||||
private void deleteAllJobs() throws IOException {
|
||||
final Request jobsRequest = new Request("GET", "/_xpack/ml/anomaly_detectors");
|
||||
jobsRequest.addParameter("filter_path", "jobs");
|
||||
final Response response = adminClient.performRequest(jobsRequest);
|
||||
@SuppressWarnings("unchecked")
|
||||
final List<Map<String, Object>> jobConfigs =
|
||||
(List<Map<String, Object>>) XContentMapValues.extractValue("jobs", ESRestTestCase.entityAsMap(response));
|
||||
if (jobConfigs == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
adminClient.performRequest(new Request("POST", "/_xpack/ml/anomaly_detectors/_all/_close"));
|
||||
} catch (Exception e1) {
|
||||
logger.warn("failed to close all jobs. Forcing closed", e1);
|
||||
try {
|
||||
adminClient.performRequest(new Request("POST", "/_xpack/ml/anomaly_detectors/_all/_close?force=true"));
|
||||
} catch (Exception e2) {
|
||||
logger.warn("Force-closing all jobs failed", e2);
|
||||
}
|
||||
throw new RuntimeException("Had to resort to force-closing jobs, something went wrong?",
|
||||
e1);
|
||||
}
|
||||
|
||||
for (Map<String, Object> jobConfig : jobConfigs) {
|
||||
String jobId = (String) jobConfig.get("job_id");
|
||||
adminClient.performRequest(new Request("DELETE", "/_xpack/ml/anomaly_detectors/" + jobId));
|
||||
}
|
||||
}
|
||||
}
|
@ -118,6 +118,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.RandomCreateIndexGenerator;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.index.query.TermQueryBuilder;
|
||||
import org.elasticsearch.index.rankeval.PrecisionAtK;
|
||||
@ -128,6 +129,8 @@ import org.elasticsearch.index.rankeval.RestRankEvalAction;
|
||||
import org.elasticsearch.protocol.xpack.XPackInfoRequest;
|
||||
import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest;
|
||||
import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest;
|
||||
import org.elasticsearch.protocol.xpack.graph.Hop;
|
||||
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
|
||||
import org.elasticsearch.repositories.fs.FsRepository;
|
||||
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||
@ -2598,6 +2601,35 @@ public class RequestConvertersTests extends ESTestCase {
|
||||
request.getEntity().writeTo(bos);
|
||||
assertThat(bos.toString("UTF-8"), is(body));
|
||||
}
|
||||
|
||||
public void testGraphExplore() throws Exception {
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
|
||||
GraphExploreRequest graphExploreRequest = new GraphExploreRequest();
|
||||
graphExploreRequest.sampleDiversityField("diversity");
|
||||
graphExploreRequest.indices("index1", "index2");
|
||||
graphExploreRequest.types("type1", "type2");
|
||||
int timeout = randomIntBetween(10000, 20000);
|
||||
graphExploreRequest.timeout(TimeValue.timeValueMillis(timeout));
|
||||
graphExploreRequest.useSignificance(randomBoolean());
|
||||
int numHops = randomIntBetween(1, 5);
|
||||
for (int i = 0; i < numHops; i++) {
|
||||
int hopNumber = i + 1;
|
||||
QueryBuilder guidingQuery = null;
|
||||
if (randomBoolean()) {
|
||||
guidingQuery = new TermQueryBuilder("field" + hopNumber, "value" + hopNumber);
|
||||
}
|
||||
Hop hop = graphExploreRequest.createNextHop(guidingQuery);
|
||||
hop.addVertexRequest("field" + hopNumber);
|
||||
hop.getVertexRequest(0).addInclude("value" + hopNumber, hopNumber);
|
||||
}
|
||||
Request request = RequestConverters.xPackGraphExplore(graphExploreRequest);
|
||||
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/index1,index2/type1,type2/_xpack/graph/_explore", request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertThat(request.getEntity().getContentType().getValue(), is(XContentType.JSON.mediaTypeWithoutParameters()));
|
||||
assertToXContentBody(graphExploreRequest, request.getEntity());
|
||||
}
|
||||
|
||||
public void testXPackDeleteWatch() {
|
||||
DeleteWatchRequest deleteWatchRequest = new DeleteWatchRequest();
|
||||
|
@ -758,6 +758,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
||||
apiName.startsWith("license.") == false &&
|
||||
apiName.startsWith("machine_learning.") == false &&
|
||||
apiName.startsWith("watcher.") == false &&
|
||||
apiName.startsWith("graph.") == false &&
|
||||
apiName.startsWith("migration.") == false) {
|
||||
apiNotFound.add(apiName);
|
||||
}
|
||||
|
@ -256,7 +256,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
assertNull(searchResponse.getSuggest());
|
||||
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||
assertEquals(0, searchResponse.getHits().getHits().length);
|
||||
assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f);
|
||||
assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f);
|
||||
Terms termsAgg = searchResponse.getAggregations().get("agg1");
|
||||
assertEquals("agg1", termsAgg.getName());
|
||||
assertEquals(2, termsAgg.getBuckets().size());
|
||||
@ -293,7 +293,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||
assertEquals(5, searchResponse.getHits().totalHits);
|
||||
assertEquals(0, searchResponse.getHits().getHits().length);
|
||||
assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f);
|
||||
assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f);
|
||||
Range rangeAgg = searchResponse.getAggregations().get("agg1");
|
||||
assertEquals("agg1", rangeAgg.getName());
|
||||
assertEquals(2, rangeAgg.getBuckets().size());
|
||||
@ -323,7 +323,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
assertNull(searchResponse.getSuggest());
|
||||
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||
assertEquals(0, searchResponse.getHits().getHits().length);
|
||||
assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f);
|
||||
assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f);
|
||||
Terms termsAgg = searchResponse.getAggregations().get("agg1");
|
||||
assertEquals("agg1", termsAgg.getName());
|
||||
assertEquals(2, termsAgg.getBuckets().size());
|
||||
@ -375,7 +375,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||
assertEquals(5, searchResponse.getHits().totalHits);
|
||||
assertEquals(0, searchResponse.getHits().getHits().length);
|
||||
assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f);
|
||||
assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f);
|
||||
assertEquals(1, searchResponse.getAggregations().asList().size());
|
||||
MatrixStats matrixStats = searchResponse.getAggregations().get("agg1");
|
||||
assertEquals(5, matrixStats.getFieldCount("num"));
|
||||
@ -474,7 +474,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||
assertEquals(3, searchResponse.getHits().totalHits);
|
||||
assertEquals(0, searchResponse.getHits().getHits().length);
|
||||
assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f);
|
||||
assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f);
|
||||
assertEquals(1, searchResponse.getAggregations().asList().size());
|
||||
Terms terms = searchResponse.getAggregations().get("top-tags");
|
||||
assertEquals(0, terms.getDocCountError());
|
||||
@ -513,7 +513,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
assertNull(searchResponse.getAggregations());
|
||||
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||
assertEquals(0, searchResponse.getHits().totalHits);
|
||||
assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f);
|
||||
assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f);
|
||||
assertEquals(0, searchResponse.getHits().getHits().length);
|
||||
assertEquals(1, searchResponse.getSuggest().size());
|
||||
|
||||
|
@ -0,0 +1,125 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.index.query.TermQueryBuilder;
|
||||
import org.elasticsearch.protocol.xpack.graph.Connection;
|
||||
import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest;
|
||||
import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse;
|
||||
import org.elasticsearch.protocol.xpack.graph.Hop;
|
||||
import org.elasticsearch.protocol.xpack.graph.Vertex;
|
||||
import org.elasticsearch.protocol.xpack.graph.VertexRequest;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
|
||||
public class GraphDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
|
||||
@Before
|
||||
public void indexDocuments() throws IOException {
|
||||
// Create chain of doc IDs across indices 1->2->3
|
||||
Request doc1 = new Request(HttpPut.METHOD_NAME, "/index1/type/1");
|
||||
doc1.setJsonEntity("{ \"participants\":[1,2], \"text\":\"let's start projectx\", \"attachment_md5\":\"324FHDGHFDG4564\"}");
|
||||
client().performRequest(doc1);
|
||||
|
||||
Request doc2 = new Request(HttpPut.METHOD_NAME, "/index2/type/2");
|
||||
doc2.setJsonEntity("{\"participants\":[2,3,4], \"text\":\"got something you both may be interested in\"}");
|
||||
client().performRequest(doc2);
|
||||
|
||||
client().performRequest(new Request(HttpPost.METHOD_NAME, "/_refresh"));
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "system out is ok for a documentation example")
|
||||
public void testExplore() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
|
||||
|
||||
// tag::x-pack-graph-explore-request
|
||||
GraphExploreRequest request = new GraphExploreRequest();
|
||||
request.indices("index1", "index2");
|
||||
request.useSignificance(false);
|
||||
TermQueryBuilder startingQuery = new TermQueryBuilder("text", "projectx");
|
||||
|
||||
Hop hop1 = request.createNextHop(startingQuery); // <1>
|
||||
VertexRequest people = hop1.addVertexRequest("participants"); // <2>
|
||||
people.minDocCount(1);
|
||||
VertexRequest files = hop1.addVertexRequest("attachment_md5");
|
||||
files.minDocCount(1);
|
||||
|
||||
Hop hop2 = request.createNextHop(null); // <3>
|
||||
VertexRequest vr2 = hop2.addVertexRequest("participants");
|
||||
vr2.minDocCount(5);
|
||||
|
||||
GraphExploreResponse exploreResponse = client.graph().explore(request, RequestOptions.DEFAULT); // <4>
|
||||
// end::x-pack-graph-explore-request
|
||||
|
||||
|
||||
// tag::x-pack-graph-explore-response
|
||||
Collection<Vertex> v = exploreResponse.getVertices();
|
||||
Collection<Connection> c = exploreResponse.getConnections();
|
||||
for (Vertex vertex : v) {
|
||||
System.out.println(vertex.getField() + ":" + vertex.getTerm() + // <1>
|
||||
" discovered at hop depth " + vertex.getHopDepth());
|
||||
}
|
||||
for (Connection link : c) {
|
||||
System.out.println(link.getFrom() + " -> " + link.getTo() // <2>
|
||||
+ " evidenced by " + link.getDocCount() + " docs");
|
||||
}
|
||||
// end::x-pack-graph-explore-response
|
||||
|
||||
|
||||
Collection<Vertex> initialVertices = exploreResponse.getVertices();
|
||||
|
||||
// tag::x-pack-graph-explore-expand
|
||||
GraphExploreRequest expandRequest = new GraphExploreRequest();
|
||||
expandRequest.indices("index1", "index2");
|
||||
|
||||
|
||||
Hop expandHop1 = expandRequest.createNextHop(null); // <1>
|
||||
VertexRequest fromPeople = expandHop1.addVertexRequest("participants"); // <2>
|
||||
for (Vertex vertex : initialVertices) {
|
||||
if (vertex.getField().equals("participants")) {
|
||||
fromPeople.addInclude(vertex.getTerm(), 1f);
|
||||
}
|
||||
}
|
||||
|
||||
Hop expandHop2 = expandRequest.createNextHop(null);
|
||||
VertexRequest newPeople = expandHop2.addVertexRequest("participants"); // <3>
|
||||
for (Vertex vertex : initialVertices) {
|
||||
if (vertex.getField().equals("participants")) {
|
||||
newPeople.addExclude(vertex.getTerm());
|
||||
}
|
||||
}
|
||||
|
||||
GraphExploreResponse expandResponse = client.graph().explore(expandRequest, RequestOptions.DEFAULT);
|
||||
// end::x-pack-graph-explore-expand
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -20,13 +20,23 @@ package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.MachineLearningIT;
|
||||
import org.elasticsearch.client.MlRestTestStateCleaner;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.protocol.xpack.ml.CloseJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.CloseJobResponse;
|
||||
import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse;
|
||||
import org.elasticsearch.protocol.xpack.ml.GetBucketsRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.GetBucketsResponse;
|
||||
import org.elasticsearch.protocol.xpack.ml.GetJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.GetJobResponse;
|
||||
import org.elasticsearch.protocol.xpack.ml.OpenJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.OpenJobResponse;
|
||||
import org.elasticsearch.protocol.xpack.ml.PutJobRequest;
|
||||
@ -35,17 +45,29 @@ import org.elasticsearch.protocol.xpack.ml.job.config.AnalysisConfig;
|
||||
import org.elasticsearch.protocol.xpack.ml.job.config.DataDescription;
|
||||
import org.elasticsearch.protocol.xpack.ml.job.config.Detector;
|
||||
import org.elasticsearch.protocol.xpack.ml.job.config.Job;
|
||||
import org.elasticsearch.protocol.xpack.ml.job.results.Bucket;
|
||||
import org.elasticsearch.protocol.xpack.ml.job.util.PageParams;
|
||||
import org.junit.After;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
|
||||
public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
@After
|
||||
public void cleanUp() throws IOException {
|
||||
new MlRestTestStateCleaner(logger, client()).clearMlMetadata();
|
||||
}
|
||||
|
||||
public void testCreateJob() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
@ -124,6 +146,63 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetJob() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
String jobId = "get-machine-learning-job1";
|
||||
|
||||
Job job = MachineLearningIT.buildJob("get-machine-learning-job1");
|
||||
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
|
||||
|
||||
Job secondJob = MachineLearningIT.buildJob("get-machine-learning-job2");
|
||||
client.machineLearning().putJob(new PutJobRequest(secondJob), RequestOptions.DEFAULT);
|
||||
|
||||
{
|
||||
//tag::x-pack-ml-get-job-request
|
||||
GetJobRequest request = new GetJobRequest("get-machine-learning-job1", "get-machine-learning-job*"); //<1>
|
||||
request.setAllowNoJobs(true); //<2>
|
||||
//end::x-pack-ml-get-job-request
|
||||
|
||||
//tag::x-pack-ml-get-job-execute
|
||||
GetJobResponse response = client.machineLearning().getJob(request, RequestOptions.DEFAULT);
|
||||
long numberOfJobs = response.count(); //<1>
|
||||
List<Job> jobs = response.jobs(); //<2>
|
||||
//end::x-pack-ml-get-job-execute
|
||||
|
||||
assertEquals(2, response.count());
|
||||
assertThat(response.jobs(), hasSize(2));
|
||||
assertThat(response.jobs().stream().map(Job::getId).collect(Collectors.toList()),
|
||||
containsInAnyOrder(job.getId(), secondJob.getId()));
|
||||
}
|
||||
{
|
||||
GetJobRequest request = new GetJobRequest("get-machine-learning-job1", "get-machine-learning-job*");
|
||||
|
||||
// tag::x-pack-ml-get-job-listener
|
||||
ActionListener<GetJobResponse> listener = new ActionListener<GetJobResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetJobResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::x-pack-ml-get-job-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-ml-get-job-execute-async
|
||||
client.machineLearning().getJobAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::x-pack-ml-get-job-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testDeleteJob() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
@ -221,4 +300,158 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testCloseJob() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
Job job = MachineLearningIT.buildJob("closing-my-first-machine-learning-job");
|
||||
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
|
||||
client.machineLearning().openJob(new OpenJobRequest(job.getId()), RequestOptions.DEFAULT);
|
||||
|
||||
//tag::x-pack-ml-close-job-request
|
||||
CloseJobRequest closeJobRequest = new CloseJobRequest("closing-my-first-machine-learning-job", "otherjobs*"); //<1>
|
||||
closeJobRequest.setForce(false); //<2>
|
||||
closeJobRequest.setAllowNoJobs(true); //<3>
|
||||
closeJobRequest.setTimeout(TimeValue.timeValueMinutes(10)); //<4>
|
||||
//end::x-pack-ml-close-job-request
|
||||
|
||||
//tag::x-pack-ml-close-job-execute
|
||||
CloseJobResponse closeJobResponse = client.machineLearning().closeJob(closeJobRequest, RequestOptions.DEFAULT);
|
||||
boolean isClosed = closeJobResponse.isClosed(); //<1>
|
||||
//end::x-pack-ml-close-job-execute
|
||||
|
||||
}
|
||||
{
|
||||
Job job = MachineLearningIT.buildJob("closing-my-second-machine-learning-job");
|
||||
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
|
||||
client.machineLearning().openJob(new OpenJobRequest(job.getId()), RequestOptions.DEFAULT);
|
||||
|
||||
//tag::x-pack-ml-close-job-listener
|
||||
ActionListener<CloseJobResponse> listener = new ActionListener<CloseJobResponse>() {
|
||||
@Override
|
||||
public void onResponse(CloseJobResponse closeJobResponse) {
|
||||
//<1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
//end::x-pack-ml-close-job-listener
|
||||
CloseJobRequest closeJobRequest = new CloseJobRequest("closing-my-second-machine-learning-job");
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-ml-close-job-execute-async
|
||||
client.machineLearning().closeJobAsync(closeJobRequest, RequestOptions.DEFAULT, listener); //<1>
|
||||
// end::x-pack-ml-close-job-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetBuckets() throws IOException, InterruptedException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
String jobId = "test-get-buckets";
|
||||
Job job = MachineLearningIT.buildJob(jobId);
|
||||
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
|
||||
|
||||
// Let us index a bucket
|
||||
IndexRequest indexRequest = new IndexRequest(".ml-anomalies-shared", "doc");
|
||||
indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
||||
indexRequest.source("{\"job_id\":\"test-get-buckets\", \"result_type\":\"bucket\", \"timestamp\": 1533081600000," +
|
||||
"\"bucket_span\": 600,\"is_interim\": false, \"anomaly_score\": 80.0}", XContentType.JSON);
|
||||
client.index(indexRequest, RequestOptions.DEFAULT);
|
||||
|
||||
{
|
||||
// tag::x-pack-ml-get-buckets-request
|
||||
GetBucketsRequest request = new GetBucketsRequest(jobId); // <1>
|
||||
// end::x-pack-ml-get-buckets-request
|
||||
|
||||
// tag::x-pack-ml-get-buckets-timestamp
|
||||
request.setTimestamp("2018-08-17T00:00:00Z"); // <1>
|
||||
// end::x-pack-ml-get-buckets-timestamp
|
||||
|
||||
// Set timestamp to null as it is incompatible with other args
|
||||
request.setTimestamp(null);
|
||||
|
||||
// tag::x-pack-ml-get-buckets-anomaly-score
|
||||
request.setAnomalyScore(75.0); // <1>
|
||||
// end::x-pack-ml-get-buckets-anomaly-score
|
||||
|
||||
// tag::x-pack-ml-get-buckets-desc
|
||||
request.setDescending(true); // <1>
|
||||
// end::x-pack-ml-get-buckets-desc
|
||||
|
||||
// tag::x-pack-ml-get-buckets-end
|
||||
request.setEnd("2018-08-21T00:00:00Z"); // <1>
|
||||
// end::x-pack-ml-get-buckets-end
|
||||
|
||||
// tag::x-pack-ml-get-buckets-exclude-interim
|
||||
request.setExcludeInterim(true); // <1>
|
||||
// end::x-pack-ml-get-buckets-exclude-interim
|
||||
|
||||
// tag::x-pack-ml-get-buckets-expand
|
||||
request.setExpand(true); // <1>
|
||||
// end::x-pack-ml-get-buckets-expand
|
||||
|
||||
// tag::x-pack-ml-get-buckets-page
|
||||
request.setPageParams(new PageParams(100, 200)); // <1>
|
||||
// end::x-pack-ml-get-buckets-page
|
||||
|
||||
// Set page params back to null so the response contains the bucket we indexed
|
||||
request.setPageParams(null);
|
||||
|
||||
// tag::x-pack-ml-get-buckets-sort
|
||||
request.setSort("anomaly_score"); // <1>
|
||||
// end::x-pack-ml-get-buckets-sort
|
||||
|
||||
// tag::x-pack-ml-get-buckets-start
|
||||
request.setStart("2018-08-01T00:00:00Z"); // <1>
|
||||
// end::x-pack-ml-get-buckets-start
|
||||
|
||||
// tag::x-pack-ml-get-buckets-execute
|
||||
GetBucketsResponse response = client.machineLearning().getBuckets(request, RequestOptions.DEFAULT);
|
||||
// end::x-pack-ml-get-buckets-execute
|
||||
|
||||
// tag::x-pack-ml-get-buckets-response
|
||||
long count = response.count(); // <1>
|
||||
List<Bucket> buckets = response.buckets(); // <2>
|
||||
// end::x-pack-ml-get-buckets-response
|
||||
assertEquals(1, buckets.size());
|
||||
}
|
||||
{
|
||||
GetBucketsRequest request = new GetBucketsRequest(jobId);
|
||||
|
||||
// tag::x-pack-ml-get-buckets-listener
|
||||
ActionListener<GetBucketsResponse> listener =
|
||||
new ActionListener<GetBucketsResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetBucketsResponse getBucketsResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::x-pack-ml-get-buckets-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-ml-get-buckets-execute-async
|
||||
client.machineLearning().getBucketsAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::x-pack-ml-get-buckets-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,3 +1,5 @@
|
||||
import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
@ -16,9 +18,6 @@
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
apply plugin: 'nebula.maven-base-publish'
|
||||
apply plugin: 'nebula.maven-scm'
|
||||
@ -53,10 +52,9 @@ dependencies {
|
||||
testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}"
|
||||
}
|
||||
|
||||
forbiddenApisMain {
|
||||
tasks.withType(ForbiddenApisCliTask) {
|
||||
//client does not depend on server, so only jdk and http signatures should be checked
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'),
|
||||
PrecommitTasks.getResource('/forbidden/http-signatures.txt')]
|
||||
replaceSignatureFiles ('jdk-signatures', 'http-signatures')
|
||||
}
|
||||
|
||||
forbiddenPatterns {
|
||||
@ -67,9 +65,6 @@ forbiddenApisTest {
|
||||
//we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
|
||||
bundledSignatures -= 'jdk-non-portable'
|
||||
bundledSignatures += 'jdk-internal'
|
||||
//client does not depend on server, so only jdk signatures should be checked
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'),
|
||||
PrecommitTasks.getResource('/forbidden/http-signatures.txt')]
|
||||
}
|
||||
|
||||
// JarHell is part of es server, which we don't want to pull in
|
||||
|
@ -16,9 +16,6 @@
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
apply plugin: 'nebula.maven-base-publish'
|
||||
apply plugin: 'nebula.maven-scm'
|
||||
@ -55,7 +52,7 @@ dependencies {
|
||||
|
||||
forbiddenApisMain {
|
||||
//client does not depend on server, so only jdk signatures should be checked
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
replaceSignatureFiles 'jdk-signatures'
|
||||
}
|
||||
|
||||
forbiddenApisTest {
|
||||
@ -63,7 +60,7 @@ forbiddenApisTest {
|
||||
bundledSignatures -= 'jdk-non-portable'
|
||||
bundledSignatures += 'jdk-internal'
|
||||
//client does not depend on server, so only jdk signatures should be checked
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
replaceSignatureFiles 'jdk-signatures'
|
||||
}
|
||||
|
||||
dependencyLicenses {
|
||||
|
@ -16,10 +16,6 @@
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
import org.gradle.api.JavaVersion
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
|
||||
targetCompatibility = JavaVersion.VERSION_1_7
|
||||
@ -36,7 +32,7 @@ dependencies {
|
||||
|
||||
forbiddenApisMain {
|
||||
//client does not depend on core, so only jdk signatures should be checked
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
replaceSignatureFiles 'jdk-signatures'
|
||||
}
|
||||
|
||||
forbiddenApisTest {
|
||||
@ -44,7 +40,7 @@ forbiddenApisTest {
|
||||
bundledSignatures -= 'jdk-non-portable'
|
||||
bundledSignatures += 'jdk-internal'
|
||||
//client does not depend on core, so only jdk signatures should be checked
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
replaceSignatureFiles 'jdk-signatures'
|
||||
}
|
||||
|
||||
// JarHell is part of es server, which we don't want to pull in
|
||||
|
@ -16,9 +16,6 @@
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
apply plugin: 'nebula.maven-base-publish'
|
||||
apply plugin: 'nebula.maven-scm'
|
||||
@ -47,8 +44,7 @@ dependencyLicenses {
|
||||
forbiddenApisTest {
|
||||
// we don't use the core test-framework, no lucene classes present so we don't want the es-test-signatures to
|
||||
// be pulled in
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'),
|
||||
PrecommitTasks.getResource('/forbidden/es-all-signatures.txt')]
|
||||
replaceSignatureFiles 'jdk-signatures', 'es-all-signatures'
|
||||
}
|
||||
|
||||
namingConventions {
|
||||
|
@ -1,11 +1,11 @@
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
|
||||
targetCompatibility = JavaVersion.VERSION_1_7
|
||||
|
||||
// java_version_checker do not depend on core so only JDK signatures should be checked
|
||||
forbiddenApisMain.signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
forbiddenApisMain {
|
||||
replaceSignatureFiles 'jdk-signatures'
|
||||
}
|
||||
|
||||
test.enabled = false
|
||||
namingConventions.enabled = false
|
||||
|
@ -17,8 +17,9 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
import org.gradle.api.JavaVersion
|
||||
|
||||
|
||||
import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
|
||||
@ -31,10 +32,9 @@ dependencies {
|
||||
|
||||
archivesBaseName = 'elasticsearch-launchers'
|
||||
|
||||
// java_version_checker do not depend on core so only JDK signatures should be checked
|
||||
List jdkSignatures = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
forbiddenApisMain.signaturesURLs = jdkSignatures
|
||||
forbiddenApisTest.signaturesURLs = jdkSignatures
|
||||
tasks.withType(ForbiddenApisCliTask) {
|
||||
replaceSignatureFiles 'jdk-signatures'
|
||||
}
|
||||
|
||||
namingConventions {
|
||||
testClass = 'org.elasticsearch.tools.launchers.LaunchersTestCase'
|
||||
|
53
docs/java-rest/high-level/graph/explore.asciidoc
Normal file
53
docs/java-rest/high-level/graph/explore.asciidoc
Normal file
@ -0,0 +1,53 @@
|
||||
[[java-rest-high-x-pack-graph-explore]]
|
||||
=== X-Pack Graph explore API
|
||||
|
||||
[[java-rest-high-x-pack-graph-explore-execution]]
|
||||
==== Initial request
|
||||
|
||||
Graph queries are executed using the `explore()` method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/GraphDocumentationIT.java[x-pack-graph-explore-request]
|
||||
--------------------------------------------------
|
||||
<1> In this example we seed the exploration with a query to find messages mentioning the mysterious `projectx`
|
||||
<2> What we want to discover in these messages are the ids of `participants` in the communications and the md5 hashes
|
||||
of any attached files. In each case, we want to find people or files that have had at least one document connecting them
|
||||
to projectx.
|
||||
<3> The next "hop" in the graph exploration is to find the people who have shared several messages with the people or files
|
||||
discovered in the previous hop (the projectx conspirators). The `minDocCount` control is used here to ensure the people
|
||||
discovered have had at least 5 communications with projectx entities. Note we could also supply a "guiding query" here e.g. a
|
||||
date range to consider only recent communications but we pass null to consider all connections.
|
||||
<4> Finally we call the graph explore API with the GraphExploreRequest object.
|
||||
|
||||
|
||||
==== Response
|
||||
|
||||
Graph responses consist of Vertex and Connection objects (aka "nodes" and "edges" respectively):
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/GraphDocumentationIT.java[x-pack-graph-explore-response]
|
||||
--------------------------------------------------
|
||||
<1> Each Vertex is a unique term (a combination of fieldname and term value). The "hopDepth" property tells us at which point in the
|
||||
requested exploration this term was first discovered.
|
||||
<2> Each Connection is a pair of Vertex objects and includes a docCount property telling us how many times these two
|
||||
Vertex terms have been sighted together
|
||||
|
||||
|
||||
[[java-rest-high-x-pack-graph-expand-execution]]
|
||||
==== Expanding a client-side Graph
|
||||
|
||||
Typically once an application has rendered an initial GraphExploreResponse as a collection of vertices and connecting lines (graph visualization toolkits such as D3, sigma.js or Keylines help here) the next step a user may want to do is "expand". This involves finding new vertices that might be connected to the existing ones currently shown.
|
||||
|
||||
To do this we use the same `explore` method but our request contains details about which vertices to expand from and which vertices to avoid re-discovering.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/GraphDocumentationIT.java[x-pack-graph-explore-expand]
|
||||
--------------------------------------------------
|
||||
<1> Unlike the initial request we do not need to pass a starting query
|
||||
<2> In the first hop which represents our "from" vertices we explicitly list the terms that we already have on-screen and want to expand by using the `addInclude` filter.
|
||||
We can supply a boost for those terms that are considered more important to follow than others but here we select a common value of 1 for all.
|
||||
<3> When defining the second hop which represents the "to" vertices we hope to discover we explicitly list the terms that we already know about using the `addExclude` filter
|
||||
|
59
docs/java-rest/high-level/ml/close-job.asciidoc
Normal file
59
docs/java-rest/high-level/ml/close-job.asciidoc
Normal file
@ -0,0 +1,59 @@
|
||||
[[java-rest-high-x-pack-ml-close-job]]
|
||||
=== Close Job API
|
||||
|
||||
The Close Job API provides the ability to close {ml} jobs in the cluster.
|
||||
It accepts a `CloseJobRequest` object and responds
|
||||
with a `CloseJobResponse` object.
|
||||
|
||||
[[java-rest-high-x-pack-ml-close-job-request]]
|
||||
==== Close Job Request
|
||||
|
||||
A `CloseJobRequest` object gets created with an existing non-null `jobId`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-close-job-request]
|
||||
--------------------------------------------------
|
||||
<1> Constructing a new request referencing existing job IDs
|
||||
<2> Optionally used to close a failed job, or to forcefully close a job
|
||||
which has not responded to its initial close request.
|
||||
<3> Optionally set to ignore if a wildcard expression matches no jobs.
|
||||
(This includes `_all` string or when no jobs have been specified)
|
||||
<4> Optionally setting the `timeout` value for how long the
|
||||
execution should wait for the job to be closed.
|
||||
|
||||
[[java-rest-high-x-pack-ml-close-job-execution]]
|
||||
==== Execution
|
||||
|
||||
The request can be executed through the `MachineLearningClient` contained
|
||||
in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-close-job-execute]
|
||||
--------------------------------------------------
|
||||
<1> `isClosed()` from the `CloseJobResponse` indicates if the job was successfully
|
||||
closed or not.
|
||||
|
||||
[[java-rest-high-x-pack-ml-close-job-execution-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The request can also be executed asynchronously:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-close-job-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `CloseJobRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The method does not block and returns immediately. The passed `ActionListener` is used
|
||||
to notify the caller of completion. A typical `ActionListener` for `CloseJobResponse` may
|
||||
look like
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-close-job-listener]
|
||||
--------------------------------------------------
|
||||
<1> `onResponse` is called back when the action is completed successfully
|
||||
<2> `onFailure` is called back when some unexpected error occurs
|
125
docs/java-rest/high-level/ml/get-buckets.asciidoc
Normal file
125
docs/java-rest/high-level/ml/get-buckets.asciidoc
Normal file
@ -0,0 +1,125 @@
|
||||
[[java-rest-high-x-pack-ml-get-buckets]]
|
||||
=== Get Buckets API
|
||||
|
||||
The Get Buckets API retrieves one or more bucket results.
|
||||
It accepts a `GetBucketsRequest` object and responds
|
||||
with a `GetBucketsResponse` object.
|
||||
|
||||
[[java-rest-high-x-pack-ml-get-buckets-request]]
|
||||
==== Get Buckets Request
|
||||
|
||||
A `GetBucketsRequest` object gets created with an existing non-null `jobId`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-request]
|
||||
--------------------------------------------------
|
||||
<1> Constructing a new request referencing an existing `jobId`
|
||||
|
||||
==== Optional Arguments
|
||||
The following arguments are optional:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-timestamp]
|
||||
--------------------------------------------------
|
||||
<1> The timestamp of the bucket to get. Otherwise it will return all buckets.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-anomaly-score]
|
||||
--------------------------------------------------
|
||||
<1> Buckets with anomaly scores greater or equal than this value will be returned.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-desc]
|
||||
--------------------------------------------------
|
||||
<1> If `true`, the buckets are sorted in descending order. Defaults to `false`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-end]
|
||||
--------------------------------------------------
|
||||
<1> Buckets with timestamps earlier than this time will be returned.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-exclude-interim]
|
||||
--------------------------------------------------
|
||||
<1> If `true`, interim results will be excluded. Defaults to `false`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-expand]
|
||||
--------------------------------------------------
|
||||
<1> If `true`, buckets will include their anomaly records. Defaults to `false`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-page]
|
||||
--------------------------------------------------
|
||||
<1> The page parameters `from` and `size`. `from` specifies the number of buckets to skip.
|
||||
`size` specifies the maximum number of buckets to get. Defaults to `0` and `100` respectively.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-sort]
|
||||
--------------------------------------------------
|
||||
<1> The field to sort buckets on. Defaults to `timestamp`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-end]
|
||||
--------------------------------------------------
|
||||
<1> Buckets with timestamps on or after this time will be returned.
|
||||
|
||||
[[java-rest-high-x-pack-ml-get-buckets-execution]]
|
||||
==== Execution
|
||||
|
||||
The request can be executed through the `MachineLearningClient` contained
|
||||
in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
|
||||
[[java-rest-high-x-pack-ml-get-buckets-execution-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The request can also be executed asynchronously:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `GetBucketsRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back with the `onResponse` method
|
||||
if the execution is successful or the `onFailure` method if the execution
|
||||
failed.
|
||||
|
||||
A typical listener for `GetBucketsResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-listener]
|
||||
--------------------------------------------------
|
||||
<1> `onResponse` is called back when the action is completed successfully
|
||||
<2> `onFailure` is called back when some unexpected error occurs
|
||||
|
||||
[[java-rest-high-snapshot-ml-get-buckets-response]]
|
||||
==== Get Buckets Response
|
||||
|
||||
The returned `GetBucketsResponse` contains the requested buckets:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-response]
|
||||
--------------------------------------------------
|
||||
<1> The count of buckets that were matched
|
||||
<2> The buckets retrieved
|
57
docs/java-rest/high-level/ml/get-job.asciidoc
Normal file
57
docs/java-rest/high-level/ml/get-job.asciidoc
Normal file
@ -0,0 +1,57 @@
|
||||
[[java-rest-high-x-pack-ml-get-job]]
|
||||
=== Get Job API
|
||||
|
||||
The Get Job API provides the ability to get {ml} jobs in the cluster.
|
||||
It accepts a `GetJobRequest` object and responds
|
||||
with a `GetJobResponse` object.
|
||||
|
||||
[[java-rest-high-x-pack-ml-get-job-request]]
|
||||
==== Get Job Request
|
||||
|
||||
A `GetJobRequest` object gets can have any number of `jobId` or `groupName`
|
||||
entries. However, they all must be non-null. An empty list is the same as
|
||||
requesting for all jobs.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-request]
|
||||
--------------------------------------------------
|
||||
<1> Constructing a new request referencing existing `jobIds`, can contain wildcards
|
||||
<2> Whether to ignore if a wildcard expression matches no jobs.
|
||||
(This includes `_all` string or when no jobs have been specified)
|
||||
|
||||
[[java-rest-high-x-pack-ml-get-job-execution]]
|
||||
==== Execution
|
||||
|
||||
The request can be executed through the `MachineLearningClient` contained
|
||||
in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-execute]
|
||||
--------------------------------------------------
|
||||
<1> `getCount()` from the `GetJobResponse` indicates the number of jobs found
|
||||
<2> `getJobs()` is the collection of {ml} `Job` objects found
|
||||
|
||||
[[java-rest-high-x-pack-ml-get-job-execution-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The request can also be executed asynchronously:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `GetJobRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The method does not block and returns immediately. The passed `ActionListener` is used
|
||||
to notify the caller of completion. A typical `ActionListener` for `GetJobResponse` may
|
||||
look like
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-listener]
|
||||
--------------------------------------------------
|
||||
<1> `onResponse` is called back when the action is completed successfully
|
||||
<2> `onFailure` is called back when some unexpected error occurs
|
@ -44,7 +44,7 @@ include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-open-job-exec
|
||||
the execution completes
|
||||
|
||||
The method does not block and returns immediately. The passed `ActionListener` is used
|
||||
to notify the caller of completion. A typical `ActionListner` for `OpenJobResponse` may
|
||||
to notify the caller of completion. A typical `ActionListener` for `OpenJobResponse` may
|
||||
look like
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
|
@ -205,12 +205,18 @@ include::licensing/delete-license.asciidoc[]
|
||||
The Java High Level REST Client supports the following Machine Learning APIs:
|
||||
|
||||
* <<java-rest-high-x-pack-ml-put-job>>
|
||||
* <<java-rest-high-x-pack-ml-get-job>>
|
||||
* <<java-rest-high-x-pack-ml-delete-job>>
|
||||
* <<java-rest-high-x-pack-ml-open-job>>
|
||||
* <<java-rest-high-x-pack-ml-close-job>>
|
||||
* <<java-rest-high-x-pack-ml-get-buckets>>
|
||||
|
||||
include::ml/put-job.asciidoc[]
|
||||
include::ml/get-job.asciidoc[]
|
||||
include::ml/delete-job.asciidoc[]
|
||||
include::ml/open-job.asciidoc[]
|
||||
include::ml/close-job.asciidoc[]
|
||||
include::ml/get-buckets.asciidoc[]
|
||||
|
||||
== Migration APIs
|
||||
|
||||
@ -229,3 +235,11 @@ The Java High Level REST Client supports the following Watcher APIs:
|
||||
|
||||
include::watcher/put-watch.asciidoc[]
|
||||
include::watcher/delete-watch.asciidoc[]
|
||||
|
||||
== Graph APIs
|
||||
|
||||
The Java High Level REST Client supports the following Graph APIs:
|
||||
|
||||
* <<java-rest-high-x-pack-graph-explore>>
|
||||
|
||||
include::graph/explore.asciidoc[]
|
||||
|
@ -26,7 +26,7 @@ The only variable that is available is `params`, which can be used to access use
|
||||
The result of the script is always converted to a string.
|
||||
If no context is specified then this context is used by default.
|
||||
|
||||
====== Example
|
||||
*Example*
|
||||
|
||||
Request:
|
||||
|
||||
@ -67,7 +67,7 @@ The following parameters may be specified in `context_setup` for a filter contex
|
||||
document:: Contains the document that will be temporarily indexed in-memory and is accessible from the script.
|
||||
index:: The name of an index containing a mapping that is compatable with the document being indexed.
|
||||
|
||||
====== Example
|
||||
*Example*
|
||||
|
||||
[source,js]
|
||||
----------------------------------------------------------------
|
||||
@ -125,7 +125,7 @@ document:: Contains the document that will be temporarily indexed in-memory and
|
||||
index:: The name of an index containing a mapping that is compatable with the document being indexed.
|
||||
query:: If `_score` is used in the script then a query can specified that will be used to compute a score.
|
||||
|
||||
====== Example
|
||||
*Example*
|
||||
|
||||
[source,js]
|
||||
----------------------------------------------------------------
|
||||
|
@ -144,7 +144,7 @@ Possible response:
|
||||
},
|
||||
"hits": {
|
||||
"total": 3,
|
||||
"max_score": 0.0,
|
||||
"max_score": null,
|
||||
"hits": []
|
||||
},
|
||||
"aggregations": {
|
||||
|
@ -25,7 +25,7 @@ the configured remote cluster alias.
|
||||
`num_nodes_connected`::
|
||||
The number of connected nodes in the remote cluster.
|
||||
|
||||
`max_connection_per_cluster`::
|
||||
`max_connections_per_cluster`::
|
||||
The maximum number of connections maintained for the remote cluster.
|
||||
|
||||
`initial_connect_timeout`::
|
||||
|
@ -30,6 +30,10 @@ in similar way to the <<query-dsl-multi-match-query,multi match query>>
|
||||
[WARNING]
|
||||
Note that the usage of `/_termvector` is deprecated in 2.0, and replaced by `/_termvectors`.
|
||||
|
||||
[WARNING]
|
||||
Term Vectors API doesn't work on nested fields. `/_termvectors` on a nested
|
||||
field and any sub-fields of a nested field returns empty results.
|
||||
|
||||
[float]
|
||||
=== Return values
|
||||
|
||||
|
@ -1141,7 +1141,7 @@ And the response (partially shown):
|
||||
},
|
||||
"hits" : {
|
||||
"total" : 1000,
|
||||
"max_score" : 0.0,
|
||||
"max_score" : null,
|
||||
"hits" : [ ]
|
||||
},
|
||||
"aggregations" : {
|
||||
|
@ -67,6 +67,13 @@ process equal to the size of the file being mapped. Before using this
|
||||
class, be sure you have allowed plenty of
|
||||
<<vm-max-map-count,virtual address space>>.
|
||||
|
||||
[[allow-mmapfs]]
|
||||
You can restrict the use of the `mmapfs` store type via the setting
|
||||
`node.store.allow_mmapfs`. This is a boolean setting indicating whether or not
|
||||
`mmapfs` is allowed. The default is to allow `mmapfs`. This setting is useful,
|
||||
for example, if you are in an environment where you can not control the ability
|
||||
to create a lot of memory maps so you need disable the ability to use `mmapfs`.
|
||||
|
||||
=== Pre-loading data into the file system cache
|
||||
|
||||
NOTE: This is an expert setting, the details of which may change in the future.
|
||||
|
@ -2,6 +2,9 @@
|
||||
=== `ignore_above`
|
||||
|
||||
Strings longer than the `ignore_above` setting will not be indexed or stored.
|
||||
For arrays of strings, `ignore_above` will be applied for each array element separately and string elements longer than `ignore_above` will not be indexed or stored.
|
||||
|
||||
NOTE: All strings/array elements will still be present in the `_source` field, if the latter is enabled which is the default in Elasticsearch.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -151,7 +151,7 @@ returns
|
||||
},
|
||||
"hits": {
|
||||
"total": 3,
|
||||
"max_score": 0.0,
|
||||
"max_score": null,
|
||||
"hits": []
|
||||
},
|
||||
"aggregations": {
|
||||
|
@ -100,3 +100,8 @@ and the context is only accepted if `path` points to a field with `geo_point` ty
|
||||
`max_concurrent_shard_requests` used to limit the total number of concurrent shard
|
||||
requests a single high level search request can execute. In 7.0 this changed to be the
|
||||
max number of concurrent shard requests per node. The default is now `5`.
|
||||
|
||||
==== `max_score` set to `null` when scores are not tracked
|
||||
|
||||
`max_score` used to be set to `0` whenever scores are not tracked. `null` is now used
|
||||
instead which is a more appropriate value for a scenario where scores are not available.
|
||||
|
@ -531,3 +531,16 @@ native realm:
|
||||
* <<security-api-enable-user,Enable users>>, <<security-api-disable-user,Disable users>>
|
||||
* <<security-api-change-password,Change passwords>>
|
||||
* <<security-api-get-user,Get users>>
|
||||
|
||||
[role="exclude",id="security-api-role-mapping"]
|
||||
=== Role mapping APIs
|
||||
|
||||
You can use the following APIs to add, remove, and retrieve role mappings:
|
||||
|
||||
* <<security-api-put-role-mapping,Add role mappings>>, <<security-api-delete-role-mapping,Delete role mappings>>
|
||||
* <<security-api-get-role-mapping,Get role mappings>>
|
||||
|
||||
[role="exclude",id="security-api-privileges"]
|
||||
=== Privilege APIs
|
||||
|
||||
See <<security-api-has-privileges>>.
|
||||
|
@ -86,6 +86,16 @@ The msearch's `max_concurrent_searches` request parameter can be used to control
|
||||
the maximum number of concurrent searches the multi search api will execute.
|
||||
This default is based on the number of data nodes and the default search thread pool size.
|
||||
|
||||
The request parameter `max_concurrent_shard_requests` can be used to control the
|
||||
maximum number of concurrent shard requests the each sub search request will execute.
|
||||
This parameter should be used to protect a single request from overloading a cluster
|
||||
(e.g., a default request will hit all indices in a cluster which could cause shard request rejections
|
||||
if the number of shards per node is high). This default is based on the number of
|
||||
data nodes in the cluster but at most `256`.In certain scenarios parallelism isn't achieved through
|
||||
concurrent request such that this protection will result in poor performance. For
|
||||
instance in an environment where only a very low number of concurrent search requests are expected
|
||||
it might help to increase this value to a higher number.
|
||||
|
||||
[float]
|
||||
[[msearch-security]]
|
||||
=== Security
|
||||
|
@ -161,7 +161,7 @@ be set to `true` in the response.
|
||||
},
|
||||
"hits": {
|
||||
"total": 1,
|
||||
"max_score": 0.0,
|
||||
"max_score": null,
|
||||
"hits": []
|
||||
}
|
||||
}
|
||||
|
@ -30,6 +30,27 @@ GET /_search
|
||||
|
||||
Doc value fields can work on fields that are not stored.
|
||||
|
||||
`*` can be used as a wild card, for example:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_search
|
||||
{
|
||||
"query" : {
|
||||
"match_all": {}
|
||||
},
|
||||
"docvalue_fields" : [
|
||||
{
|
||||
"field": "*field", <1>
|
||||
"format": "use_field_mapping" <2>
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
<1> Match all fields ending with `field`
|
||||
<2> Format to be applied to all matching fields.
|
||||
|
||||
Note that if the fields parameter specifies fields without docvalues it will try to load the value from the fielddata cache
|
||||
causing the terms for that field to be loaded to memory (cached), which will result in more memory consumption.
|
||||
|
||||
|
@ -258,7 +258,7 @@ Which should look like:
|
||||
},
|
||||
"hits": {
|
||||
"total" : 0,
|
||||
"max_score" : 0.0,
|
||||
"max_score" : null,
|
||||
"hits" : []
|
||||
},
|
||||
"suggest": {
|
||||
|
84
docs/reference/settings/security-hash-settings.asciidoc
Normal file
84
docs/reference/settings/security-hash-settings.asciidoc
Normal file
@ -0,0 +1,84 @@
|
||||
[float]
|
||||
[[hashing-settings]]
|
||||
==== User cache and password hash algorithms
|
||||
|
||||
Certain realms store user credentials in memory. To limit exposure
|
||||
to credential theft and mitigate credential compromise, the cache only stores
|
||||
a hashed version of the user credentials in memory. By default, the user cache
|
||||
is hashed with a salted `sha-256` hash algorithm. You can use a different
|
||||
hashing algorithm by setting the `cache.hash_algo` realm settings to any of the
|
||||
following values:
|
||||
|
||||
[[cache-hash-algo]]
|
||||
.Cache hash algorithms
|
||||
|=======================
|
||||
| Algorithm | | | Description
|
||||
| `ssha256` | | | Uses a salted `sha-256` algorithm (default).
|
||||
| `md5` | | | Uses `MD5` algorithm.
|
||||
| `sha1` | | | Uses `SHA1` algorithm.
|
||||
| `bcrypt` | | | Uses `bcrypt` algorithm with salt generated in 1024 rounds.
|
||||
| `bcrypt4` | | | Uses `bcrypt` algorithm with salt generated in 16 rounds.
|
||||
| `bcrypt5` | | | Uses `bcrypt` algorithm with salt generated in 32 rounds.
|
||||
| `bcrypt6` | | | Uses `bcrypt` algorithm with salt generated in 64 rounds.
|
||||
| `bcrypt7` | | | Uses `bcrypt` algorithm with salt generated in 128 rounds.
|
||||
| `bcrypt8` | | | Uses `bcrypt` algorithm with salt generated in 256 rounds.
|
||||
| `bcrypt9` | | | Uses `bcrypt` algorithm with salt generated in 512 rounds.
|
||||
| `pbkdf2` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a
|
||||
pseudorandom function using 10000 iterations.
|
||||
| `pbkdf2_1000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a
|
||||
pseudorandom function using 1000 iterations.
|
||||
| `pbkdf2_10000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a
|
||||
pseudorandom function using 10000 iterations.
|
||||
| `pbkdf2_50000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a
|
||||
pseudorandom function using 50000 iterations.
|
||||
| `pbkdf2_100000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a
|
||||
pseudorandom function using 100000 iterations.
|
||||
| `pbkdf2_500000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a
|
||||
pseudorandom function using 500000 iterations.
|
||||
| `pbkdf2_1000000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a
|
||||
pseudorandom function using 1000000 iterations.
|
||||
| `noop`,`clear_text` | | | Doesn't hash the credentials and keeps it in clear text in
|
||||
memory. CAUTION: keeping clear text is considered insecure
|
||||
and can be compromised at the OS level (for example through
|
||||
memory dumps and using `ptrace`).
|
||||
|=======================
|
||||
|
||||
Likewise, realms that store passwords hash them using cryptographically strong
|
||||
and password-specific salt values. You can configure the algorithm for password
|
||||
hashing by setting the `xpack.security.authc.password_hashing.algorithm` setting
|
||||
to one of the following:
|
||||
|
||||
[[password-hashing-algorithms]]
|
||||
.Password hashing algorithms
|
||||
|=======================
|
||||
| Algorithm | | | Description
|
||||
|
||||
| `bcrypt` | | | Uses `bcrypt` algorithm with salt generated in 1024 rounds. (default)
|
||||
| `bcrypt4` | | | Uses `bcrypt` algorithm with salt generated in 16 rounds.
|
||||
| `bcrypt5` | | | Uses `bcrypt` algorithm with salt generated in 32 rounds.
|
||||
| `bcrypt6` | | | Uses `bcrypt` algorithm with salt generated in 64 rounds.
|
||||
| `bcrypt7` | | | Uses `bcrypt` algorithm with salt generated in 128 rounds.
|
||||
| `bcrypt8` | | | Uses `bcrypt` algorithm with salt generated in 256 rounds.
|
||||
| `bcrypt9` | | | Uses `bcrypt` algorithm with salt generated in 512 rounds.
|
||||
| `bcrypt10` | | | Uses `bcrypt` algorithm with salt generated in 1024 rounds.
|
||||
| `bcrypt11` | | | Uses `bcrypt` algorithm with salt generated in 2048 rounds.
|
||||
| `bcrypt12` | | | Uses `bcrypt` algorithm with salt generated in 4096 rounds.
|
||||
| `bcrypt13` | | | Uses `bcrypt` algorithm with salt generated in 8192 rounds.
|
||||
| `bcrypt14` | | | Uses `bcrypt` algorithm with salt generated in 16384 rounds.
|
||||
| `pbkdf2` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a
|
||||
pseudorandom function using 10000 iterations.
|
||||
| `pbkdf2_1000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a
|
||||
pseudorandom function using 1000 iterations.
|
||||
| `pbkdf2_10000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a
|
||||
pseudorandom function using 10000 iterations.
|
||||
| `pbkdf2_50000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a
|
||||
pseudorandom function using 50000 iterations.
|
||||
| `pbkdf2_100000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a
|
||||
pseudorandom function using 100000 iterations.
|
||||
| `pbkdf2_500000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a
|
||||
pseudorandom function using 500000 iterations.
|
||||
| `pbkdf2_1000000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a
|
||||
pseudorandom function using 1000000 iterations.
|
||||
|=======================
|
||||
|
||||
|
@ -46,12 +46,21 @@ settings for the ad1 realm: `xpack.security.authc.realms.ad1.*`. The API already
|
||||
omits all `ssl` settings, `bind_dn`, and `bind_password` due to the
|
||||
sensitive nature of the information.
|
||||
|
||||
`xpack.security.fips_mode.enabled`::
|
||||
Enables fips mode of operation. Set this to `true` if you run this {es} instance in a FIPS 140-2 enabled JVM. For more information, see <<fips-140-compliance>>. Defaults to `false`.
|
||||
|
||||
[float]
|
||||
[[password-security-settings]]
|
||||
==== Default password security settings
|
||||
`xpack.security.authc.accept_default_password`::
|
||||
In `elasticsearch.yml`, set this to `false` to disable support for the default "changeme" password.
|
||||
|
||||
[[password-hashing-settings]]
|
||||
==== Password hashing settings
|
||||
`xpack.security.authc.password_hashing.algorithm`::
|
||||
Specifies the hashing algorithm that is used for secure user credential storage.
|
||||
See <<password-hashing-algorithms>>. Defaults to `bcrypt`.
|
||||
|
||||
[float]
|
||||
[[anonymous-access-settings]]
|
||||
==== Anonymous access settings
|
||||
@ -164,9 +173,8 @@ the standard {es} <<time-units,time units>>. Defaults to `20m`.
|
||||
cache at any given time. Defaults to 100,000.
|
||||
|
||||
`cache.hash_algo`:: (Expert Setting) The hashing algorithm that is used for the
|
||||
in-memory cached user credentials. For possible values, see
|
||||
{xpack-ref}/controlling-user-cache.html[Cache hash algorithms]. Defaults to
|
||||
`ssha256`.
|
||||
in-memory cached user credentials. For possible values, see <<cache-hash-algo>>.
|
||||
Defaults to `ssha256`.
|
||||
|
||||
|
||||
[[ref-users-settings]]
|
||||
@ -190,8 +198,7 @@ Defaults to 100,000.
|
||||
|
||||
`cache.hash_algo`::
|
||||
(Expert Setting) The hashing algorithm that is used for the in-memory cached
|
||||
user credentials. See the {xpack-ref}/controlling-user-cache.html#controlling-user-cache[Cache hash algorithms] table for
|
||||
all possible values. Defaults to `ssha256`.
|
||||
user credentials. See <<cache-hash-algo>>. Defaults to `ssha256`.
|
||||
|
||||
[[ref-ldap-settings]]
|
||||
[float]
|
||||
@ -444,8 +451,7 @@ Defaults to `100000`.
|
||||
|
||||
`cache.hash_algo`::
|
||||
(Expert Setting) Specifies the hashing algorithm that is used for the
|
||||
in-memory cached user credentials. See {xpack-ref}/controlling-user-cache.html#controlling-user-cache[Cache hash algorithms]
|
||||
table for all possible values. Defaults to `ssha256`.
|
||||
in-memory cached user credentials. See <<cache-hash-algo>>. Defaults to `ssha256`.
|
||||
|
||||
[[ref-ad-settings]]
|
||||
[float]
|
||||
@ -684,7 +690,7 @@ Defaults to `100000`.
|
||||
|
||||
`cache.hash_algo`::
|
||||
(Expert Setting) Specifies the hashing algorithm that is used for
|
||||
the in-memory cached user credentials (see {xpack-ref}/controlling-user-cache.html#controlling-user-cache[Cache hash algorithms] table for all possible values). Defaults to `ssha256`.
|
||||
the in-memory cached user credentials. See <<cache-hash-algo>>. Defaults to `ssha256`.
|
||||
|
||||
`follow_referrals`::
|
||||
If set to `true` {security} follows referrals returned by the LDAP server.
|
||||
@ -855,6 +861,15 @@ The maximum amount of skew that can be tolerated between the IdP's clock and the
|
||||
{es} node's clock.
|
||||
Defaults to `3m` (3 minutes).
|
||||
|
||||
`req_authn_context_class_ref`::
|
||||
A comma separated list of Authentication Context Class Reference values to be
|
||||
included in the Requested Authentication Context when requesting the IdP to
|
||||
authenticate the current user. The Authentication Context of the corresponding
|
||||
authentication response should contain at least one of the requested values.
|
||||
+
|
||||
For more information, see
|
||||
{stack-ov}/saml-guide-authentication.html#req-authn-context[Requesting specific authentication methods].
|
||||
|
||||
[float]
|
||||
[[ref-saml-signing-settings]]
|
||||
===== SAML realm signing settings
|
||||
@ -1121,7 +1136,12 @@ settings such as those for HTTP or Transport.
|
||||
`xpack.ssl.supported_protocols`::
|
||||
Supported protocols with versions. Valid protocols: `SSLv2Hello`,
|
||||
`SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`. Defaults to `TLSv1.2`, `TLSv1.1`,
|
||||
`TLSv1`.
|
||||
`TLSv1`.
|
||||
+
|
||||
--
|
||||
NOTE: If `xpack.security.fips_mode.enabled` is `true`, you cannot use `SSLv2Hello`
|
||||
or `SSLv3`. See <<fips-140-compliance>>.
|
||||
--
|
||||
|
||||
`xpack.ssl.client_authentication`::
|
||||
Controls the server's behavior in regard to requesting a certificate
|
||||
@ -1220,6 +1240,9 @@ Password to the truststore.
|
||||
`xpack.ssl.truststore.secure_password` (<<secure-settings,Secure>>)::
|
||||
Password to the truststore.
|
||||
|
||||
WARNING: If `xpack.security.fips_mode.enabled` is `true`, you cannot use Java
|
||||
keystore files. See <<fips-140-compliance>>.
|
||||
|
||||
[float]
|
||||
===== PKCS#12 files
|
||||
|
||||
@ -1258,6 +1281,9 @@ Password to the truststore.
|
||||
`xpack.ssl.truststore.secure_password` (<<secure-settings,Secure>>)::
|
||||
Password to the truststore.
|
||||
|
||||
WARNING: If `xpack.security.fips_mode.enabled` is `true`, you cannot use PKCS#12
|
||||
keystore files. See <<fips-140-compliance>>.
|
||||
|
||||
[[pkcs12-truststore-note]]
|
||||
[NOTE]
|
||||
Storing trusted certificates in a PKCS#12 file, although supported, is
|
||||
@ -1335,3 +1361,5 @@ List of IP addresses to allow for this profile.
|
||||
|
||||
`transport.profiles.$PROFILE.xpack.security.filter.deny`::
|
||||
List of IP addresses to deny for this profile.
|
||||
|
||||
include::security-hash-settings.asciidoc[]
|
@ -155,6 +155,11 @@ the kernel allows a process to have at least 262,144 memory-mapped areas
|
||||
and is enforced on Linux only. To pass the maximum map count check, you
|
||||
must configure `vm.max_map_count` via `sysctl` to be at least `262144`.
|
||||
|
||||
Alternatively, the maximum map count check is only needed if you are using
|
||||
`mmapfs` as the <<index-modules-store,store type>> for your indices. If you
|
||||
<<allow-mmapfs,do not allow>> the use of `mmapfs` then this bootstrap check will
|
||||
not be enforced.
|
||||
|
||||
=== Client JVM check
|
||||
|
||||
There are two different JVMs provided by OpenJDK-derived JVMs: the
|
||||
|
@ -41,6 +41,8 @@ Elasticsearch website or from our RPM repository.
|
||||
|
||||
`msi`::
|
||||
|
||||
beta[]
|
||||
+
|
||||
The `msi` package is suitable for installation on Windows 64-bit systems with at least
|
||||
.NET 4.5 framework installed, and is the easiest choice for getting started with
|
||||
Elasticsearch on Windows. MSIs may be downloaded from the Elasticsearch website.
|
||||
|
@ -91,9 +91,6 @@ using the `bin/elasticsearch-keystore add` command, call:
|
||||
[source,js]
|
||||
----
|
||||
POST _nodes/reload_secure_settings
|
||||
{
|
||||
"secure_settings_password": ""
|
||||
}
|
||||
----
|
||||
// CONSOLE
|
||||
This API will decrypt and re-read the entire keystore, on every cluster node,
|
||||
|
@ -16,9 +16,6 @@
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
apply plugin: 'nebula.optional-base'
|
||||
apply plugin: 'nebula.maven-base-publish'
|
||||
@ -34,5 +31,5 @@ test.enabled = false
|
||||
jarHell.enabled = false
|
||||
|
||||
forbiddenApisMain {
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
replaceSignatureFiles 'jdk-signatures'
|
||||
}
|
||||
|
@ -1,5 +1,3 @@
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
@ -91,7 +89,7 @@ dependencies {
|
||||
forbiddenApisMain {
|
||||
// :libs:core does not depend on server
|
||||
// TODO: Need to decide how we want to handle for forbidden signatures with the changes to server
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
replaceSignatureFiles 'jdk-signatures'
|
||||
}
|
||||
|
||||
if (isEclipse) {
|
||||
|
@ -1,5 +1,3 @@
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
@ -33,7 +31,7 @@ dependencies {
|
||||
}
|
||||
|
||||
forbiddenApisMain {
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
replaceSignatureFiles 'jdk-signatures'
|
||||
}
|
||||
|
||||
if (isEclipse) {
|
||||
|
@ -1,5 +1,3 @@
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
@ -34,7 +32,7 @@ dependencies {
|
||||
}
|
||||
|
||||
forbiddenApisMain {
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
replaceSignatureFiles 'jdk-signatures'
|
||||
}
|
||||
|
||||
if (isEclipse) {
|
||||
|
@ -16,9 +16,6 @@
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
apply plugin: 'nebula.maven-base-publish'
|
||||
apply plugin: 'nebula.maven-scm'
|
||||
|
||||
@ -62,5 +59,5 @@ if (isEclipse) {
|
||||
forbiddenApisMain {
|
||||
// nio does not depend on core, so only jdk signatures should be checked
|
||||
// es-all is not checked as we connect and accept sockets
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
replaceSignatureFiles 'jdk-signatures'
|
||||
}
|
||||
|
@ -16,9 +16,6 @@
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
apply plugin: 'nebula.maven-base-publish'
|
||||
apply plugin: 'nebula.maven-scm'
|
||||
|
||||
@ -47,7 +44,7 @@ dependencies {
|
||||
}
|
||||
|
||||
forbiddenApisMain {
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
replaceSignatureFiles 'jdk-signatures'
|
||||
}
|
||||
|
||||
if (isEclipse) {
|
||||
|
@ -1,5 +1,3 @@
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
@ -57,7 +55,7 @@ dependencies {
|
||||
forbiddenApisMain {
|
||||
// x-content does not depend on server
|
||||
// TODO: Need to decide how we want to handle for forbidden signatures with the changes to core
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
replaceSignatureFiles 'jdk-signatures'
|
||||
}
|
||||
|
||||
if (isEclipse) {
|
||||
|
@ -131,7 +131,7 @@ class ParentChildInnerHitContextBuilder extends InnerHitContextBuilder {
|
||||
for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) {
|
||||
intersect(weight, innerHitQueryWeight, totalHitCountCollector, ctx);
|
||||
}
|
||||
result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0);
|
||||
result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, Float.NaN);
|
||||
} else {
|
||||
int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc());
|
||||
TopDocsCollector<?> topDocsCollector;
|
||||
|
@ -21,11 +21,11 @@ package org.elasticsearch.http.netty4;
|
||||
|
||||
import io.netty.channel.Channel;
|
||||
import io.netty.channel.ChannelPromise;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.concurrent.CompletableContext;
|
||||
import org.elasticsearch.http.HttpChannel;
|
||||
import org.elasticsearch.http.HttpResponse;
|
||||
import org.elasticsearch.transport.netty4.Netty4Utils;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
|
||||
@ -42,7 +42,7 @@ public class Netty4HttpChannel implements HttpChannel {
|
||||
} else {
|
||||
Throwable cause = f.cause();
|
||||
if (cause instanceof Error) {
|
||||
Netty4Utils.maybeDie(cause);
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(cause);
|
||||
closeContext.completeExceptionally(new Exception(cause));
|
||||
} else {
|
||||
closeContext.completeExceptionally((Exception) cause);
|
||||
@ -59,7 +59,7 @@ public class Netty4HttpChannel implements HttpChannel {
|
||||
listener.onResponse(null);
|
||||
} else {
|
||||
final Throwable cause = f.cause();
|
||||
Netty4Utils.maybeDie(cause);
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(cause);
|
||||
if (cause instanceof Error) {
|
||||
listener.onFailure(new Exception(cause));
|
||||
} else {
|
||||
|
@ -27,7 +27,6 @@ import io.netty.handler.codec.http.DefaultFullHttpRequest;
|
||||
import io.netty.handler.codec.http.FullHttpRequest;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.http.HttpPipelinedRequest;
|
||||
import org.elasticsearch.transport.netty4.Netty4Utils;
|
||||
|
||||
@ChannelHandler.Sharable
|
||||
class Netty4HttpRequestHandler extends SimpleChannelInboundHandler<HttpPipelinedRequest<FullHttpRequest>> {
|
||||
@ -58,7 +57,7 @@ class Netty4HttpRequestHandler extends SimpleChannelInboundHandler<HttpPipelined
|
||||
if (request.decoderResult().isFailure()) {
|
||||
Throwable cause = request.decoderResult().cause();
|
||||
if (cause instanceof Error) {
|
||||
ExceptionsHelper.dieOnError(cause);
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(cause);
|
||||
serverTransport.incomingRequestError(httpRequest, channel, new Exception(cause));
|
||||
} else {
|
||||
serverTransport.incomingRequestError(httpRequest, channel, (Exception) cause);
|
||||
@ -74,7 +73,7 @@ class Netty4HttpRequestHandler extends SimpleChannelInboundHandler<HttpPipelined
|
||||
|
||||
@Override
|
||||
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
|
||||
Netty4Utils.maybeDie(cause);
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(cause);
|
||||
Netty4HttpChannel channel = ctx.channel().attr(Netty4HttpServerTransport.HTTP_CHANNEL_KEY).get();
|
||||
if (cause instanceof Error) {
|
||||
serverTransport.onException(channel, new Exception(cause));
|
||||
|
@ -20,10 +20,10 @@
|
||||
package org.elasticsearch.http.netty4;
|
||||
|
||||
import io.netty.channel.Channel;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.concurrent.CompletableContext;
|
||||
import org.elasticsearch.http.HttpServerChannel;
|
||||
import org.elasticsearch.transport.netty4.Netty4Utils;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
|
||||
@ -40,7 +40,7 @@ public class Netty4HttpServerChannel implements HttpServerChannel {
|
||||
} else {
|
||||
Throwable cause = f.cause();
|
||||
if (cause instanceof Error) {
|
||||
Netty4Utils.maybeDie(cause);
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(cause);
|
||||
closeContext.completeExceptionally(new Exception(cause));
|
||||
} else {
|
||||
closeContext.completeExceptionally((Exception) cause);
|
||||
|
@ -41,6 +41,7 @@ import io.netty.handler.codec.http.HttpResponseEncoder;
|
||||
import io.netty.handler.timeout.ReadTimeoutException;
|
||||
import io.netty.handler.timeout.ReadTimeoutHandler;
|
||||
import io.netty.util.AttributeKey;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.network.CloseableChannel;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
@ -338,7 +339,7 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
|
||||
|
||||
@Override
|
||||
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
|
||||
Netty4Utils.maybeDie(cause);
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(cause);
|
||||
super.exceptionCaught(ctx, cause);
|
||||
}
|
||||
}
|
||||
@ -354,7 +355,7 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
|
||||
|
||||
@Override
|
||||
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
|
||||
Netty4Utils.maybeDie(cause);
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(cause);
|
||||
Netty4HttpServerChannel httpServerChannel = ctx.channel().attr(HTTP_SERVER_CHANNEL_KEY).get();
|
||||
if (cause instanceof Error) {
|
||||
transport.onServerException(httpServerChannel, new Exception(cause));
|
||||
|
@ -68,7 +68,7 @@ final class Netty4MessageChannelHandler extends ChannelDuplexHandler {
|
||||
|
||||
@Override
|
||||
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
|
||||
Netty4Utils.maybeDie(cause);
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(cause);
|
||||
final Throwable unwrapped = ExceptionsHelper.unwrap(cause, ElasticsearchException.class);
|
||||
final Throwable newCause = unwrapped != null ? unwrapped : cause;
|
||||
Netty4TcpChannel tcpChannel = ctx.channel().attr(Netty4Transport.CHANNEL_KEY).get();
|
||||
|
@ -22,6 +22,7 @@ package org.elasticsearch.transport.netty4;
|
||||
import io.netty.channel.Channel;
|
||||
import io.netty.channel.ChannelOption;
|
||||
import io.netty.channel.ChannelPromise;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.concurrent.CompletableContext;
|
||||
@ -45,7 +46,7 @@ public class Netty4TcpChannel implements TcpChannel {
|
||||
} else {
|
||||
Throwable cause = f.cause();
|
||||
if (cause instanceof Error) {
|
||||
Netty4Utils.maybeDie(cause);
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(cause);
|
||||
closeContext.completeExceptionally(new Exception(cause));
|
||||
} else {
|
||||
closeContext.completeExceptionally((Exception) cause);
|
||||
@ -97,7 +98,7 @@ public class Netty4TcpChannel implements TcpChannel {
|
||||
listener.onResponse(null);
|
||||
} else {
|
||||
final Throwable cause = f.cause();
|
||||
Netty4Utils.maybeDie(cause);
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(cause);
|
||||
if (cause instanceof Error) {
|
||||
listener.onFailure(new Exception(cause));
|
||||
} else {
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.transport.netty4;
|
||||
|
||||
import io.netty.channel.Channel;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.concurrent.CompletableContext;
|
||||
import org.elasticsearch.transport.TcpServerChannel;
|
||||
@ -41,7 +42,7 @@ public class Netty4TcpServerChannel implements TcpServerChannel {
|
||||
} else {
|
||||
Throwable cause = f.cause();
|
||||
if (cause instanceof Error) {
|
||||
Netty4Utils.maybeDie(cause);
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(cause);
|
||||
closeContext.completeExceptionally(new Exception(cause));
|
||||
} else {
|
||||
closeContext.completeExceptionally((Exception) cause);
|
||||
|
@ -38,6 +38,7 @@ import io.netty.util.AttributeKey;
|
||||
import io.netty.util.concurrent.Future;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
@ -228,7 +229,7 @@ public class Netty4Transport extends TcpTransport {
|
||||
ChannelFuture channelFuture = bootstrap.connect(address);
|
||||
Channel channel = channelFuture.channel();
|
||||
if (channel == null) {
|
||||
Netty4Utils.maybeDie(channelFuture.cause());
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(channelFuture.cause());
|
||||
throw new IOException(channelFuture.cause());
|
||||
}
|
||||
addClosedExceptionLogger(channel);
|
||||
@ -242,7 +243,7 @@ public class Netty4Transport extends TcpTransport {
|
||||
} else {
|
||||
Throwable cause = f.cause();
|
||||
if (cause instanceof Error) {
|
||||
Netty4Utils.maybeDie(cause);
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(cause);
|
||||
listener.onFailure(new Exception(cause));
|
||||
} else {
|
||||
listener.onFailure((Exception) cause);
|
||||
@ -307,7 +308,7 @@ public class Netty4Transport extends TcpTransport {
|
||||
|
||||
@Override
|
||||
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
|
||||
Netty4Utils.maybeDie(cause);
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(cause);
|
||||
super.exceptionCaught(ctx, cause);
|
||||
}
|
||||
}
|
||||
@ -333,7 +334,7 @@ public class Netty4Transport extends TcpTransport {
|
||||
|
||||
@Override
|
||||
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
|
||||
Netty4Utils.maybeDie(cause);
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(cause);
|
||||
super.exceptionCaught(ctx, cause);
|
||||
}
|
||||
}
|
||||
@ -351,7 +352,7 @@ public class Netty4Transport extends TcpTransport {
|
||||
|
||||
@Override
|
||||
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
|
||||
Netty4Utils.maybeDie(cause);
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(cause);
|
||||
Netty4TcpServerChannel serverChannel = ctx.channel().attr(SERVER_CHANNEL_KEY).get();
|
||||
if (cause instanceof Error) {
|
||||
onServerException(serverChannel, new Exception(cause));
|
||||
|
@ -27,20 +27,16 @@ import io.netty.channel.ChannelFuture;
|
||||
import io.netty.util.NettyRuntime;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
import io.netty.util.internal.logging.InternalLoggerFactory;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefIterator;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
public class Netty4Utils {
|
||||
@ -161,34 +157,4 @@ public class Netty4Utils {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If the specified cause is an unrecoverable error, this method will rethrow the cause on a separate thread so that it can not be
|
||||
* caught and bubbles up to the uncaught exception handler.
|
||||
*
|
||||
* @param cause the throwable to test
|
||||
*/
|
||||
public static void maybeDie(final Throwable cause) {
|
||||
final Logger logger = ESLoggerFactory.getLogger(Netty4Utils.class);
|
||||
final Optional<Error> maybeError = ExceptionsHelper.maybeError(cause, logger);
|
||||
if (maybeError.isPresent()) {
|
||||
/*
|
||||
* Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, Netty wraps too many
|
||||
* invocations of user-code in try/catch blocks that swallow all throwables. This means that a rethrow here will not bubble up
|
||||
* to where we want it to. So, we fork a thread and throw the exception from there where Netty can not get to it. We do not wrap
|
||||
* the exception so as to not lose the original cause during exit.
|
||||
*/
|
||||
try {
|
||||
// try to log the current stack trace
|
||||
final String formatted = ExceptionsHelper.formatStackTrace(Thread.currentThread().getStackTrace());
|
||||
logger.error("fatal error on the network layer\n{}", formatted);
|
||||
} finally {
|
||||
new Thread(
|
||||
() -> {
|
||||
throw maybeError.get();
|
||||
})
|
||||
.start();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,3 +1,5 @@
|
||||
import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
@ -22,7 +24,7 @@ esplugin {
|
||||
classname 'org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin'
|
||||
}
|
||||
|
||||
forbiddenApis {
|
||||
tasks.withType(ForbiddenApisCliTask) {
|
||||
signatures += [
|
||||
"com.ibm.icu.text.Collator#getInstance() @ Don't use default locale, use getInstance(ULocale) instead"
|
||||
]
|
||||
|
@ -139,7 +139,7 @@ public class HttpReadWriteHandler implements ReadWriteHandler {
|
||||
if (request.decoderResult().isFailure()) {
|
||||
Throwable cause = request.decoderResult().cause();
|
||||
if (cause instanceof Error) {
|
||||
ExceptionsHelper.dieOnError(cause);
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(cause);
|
||||
transport.incomingRequestError(httpRequest, nioHttpChannel, new Exception(cause));
|
||||
} else {
|
||||
transport.incomingRequestError(httpRequest, nioHttpChannel, (Exception) cause);
|
||||
|
@ -73,7 +73,7 @@ public class NettyAdaptor implements AutoCloseable {
|
||||
closeFuture.await();
|
||||
if (closeFuture.isSuccess() == false) {
|
||||
Throwable cause = closeFuture.cause();
|
||||
ExceptionsHelper.dieOnError(cause);
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(cause);
|
||||
throw (Exception) cause;
|
||||
}
|
||||
}
|
||||
@ -84,7 +84,7 @@ public class NettyAdaptor implements AutoCloseable {
|
||||
listener.accept(null, null);
|
||||
} else {
|
||||
final Throwable cause = f.cause();
|
||||
ExceptionsHelper.dieOnError(cause);
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(cause);
|
||||
assert cause instanceof Exception;
|
||||
listener.accept(null, (Exception) cause);
|
||||
}
|
||||
|
@ -223,7 +223,7 @@ public class NettyListener implements BiConsumer<Void, Exception>, ChannelPromis
|
||||
biConsumer.accept(null, null);
|
||||
} else {
|
||||
if (cause instanceof Error) {
|
||||
ExceptionsHelper.dieOnError(cause);
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(cause);
|
||||
biConsumer.accept(null, new Exception(cause));
|
||||
} else {
|
||||
biConsumer.accept(null, (Exception) cause);
|
||||
|
@ -21,5 +21,5 @@ apply plugin: 'elasticsearch.rest-test'
|
||||
apply plugin: 'elasticsearch.test-with-dependencies'
|
||||
|
||||
dependencies {
|
||||
testCompile project(path: ':client:rest-high-level', configuration: 'shadow')
|
||||
testCompile "org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}"
|
||||
}
|
||||
|
@ -90,14 +90,14 @@ public class DieWithDignityIT extends ESRestTestCase {
|
||||
|
||||
final Iterator<String> it = lines.iterator();
|
||||
|
||||
boolean fatalErrorOnTheNetworkLayer = false;
|
||||
boolean fatalError = false;
|
||||
boolean fatalErrorInThreadExiting = false;
|
||||
|
||||
while (it.hasNext() && (fatalErrorOnTheNetworkLayer == false || fatalErrorInThreadExiting == false)) {
|
||||
while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) {
|
||||
final String line = it.next();
|
||||
if (line.contains("fatal error on the network layer")) {
|
||||
fatalErrorOnTheNetworkLayer = true;
|
||||
} else if (line.matches(".*\\[ERROR\\]\\[o.e.b.ElasticsearchUncaughtExceptionHandler\\] \\[node-0\\]"
|
||||
if (line.matches(".*\\[ERROR\\]\\[o\\.e\\.ExceptionsHelper\\s*\\] \\[node-0\\] fatal error")) {
|
||||
fatalError = true;
|
||||
} else if (line.matches(".*\\[ERROR\\]\\[o\\.e\\.b\\.ElasticsearchUncaughtExceptionHandler\\] \\[node-0\\]"
|
||||
+ " fatal error in thread \\[Thread-\\d+\\], exiting$")) {
|
||||
fatalErrorInThreadExiting = true;
|
||||
assertTrue(it.hasNext());
|
||||
@ -105,7 +105,7 @@ public class DieWithDignityIT extends ESRestTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
assertTrue(fatalErrorOnTheNetworkLayer);
|
||||
assertTrue(fatalError);
|
||||
assertTrue(fatalErrorInThreadExiting);
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,3 @@
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
@ -69,9 +67,7 @@ esvagrant {
|
||||
}
|
||||
|
||||
forbiddenApisMain {
|
||||
signaturesURLs = [
|
||||
PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')
|
||||
]
|
||||
replaceSignatureFiles 'jdk-signatures'
|
||||
}
|
||||
|
||||
// we don't have additional tests for the tests themselves
|
||||
|
@ -33,6 +33,11 @@
|
||||
"type" : "number",
|
||||
"description" : "A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on it's rewrite method ie. if date filters are mandatory to match but the shard bounds and the query are disjoint.",
|
||||
"default" : 128
|
||||
},
|
||||
"max_concurrent_shard_requests" : {
|
||||
"type" : "number",
|
||||
"description" : "The number of concurrent shard requests each sub search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests",
|
||||
"default" : "The default grows with the number of nodes in the cluster but is at most 256."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -61,3 +61,35 @@ setup:
|
||||
- match: { responses.3.error.root_cause.0.reason: "/no.such.index/" }
|
||||
- match: { responses.3.error.root_cause.0.index: index_3 }
|
||||
- match: { responses.4.hits.total: 4 }
|
||||
|
||||
---
|
||||
"Least impact smoke test":
|
||||
# only passing these parameters to make sure they are consumed
|
||||
- do:
|
||||
max_concurrent_shard_requests: 1
|
||||
max_concurrent_searches: 1
|
||||
msearch:
|
||||
body:
|
||||
- index: index_*
|
||||
- query:
|
||||
match: {foo: foo}
|
||||
- index: index_2
|
||||
- query:
|
||||
match_all: {}
|
||||
- index: index_1
|
||||
- query:
|
||||
match: {foo: foo}
|
||||
- index: index_3
|
||||
- query:
|
||||
match_all: {}
|
||||
- type: test
|
||||
- query:
|
||||
match_all: {}
|
||||
|
||||
- match: { responses.0.hits.total: 2 }
|
||||
- match: { responses.1.hits.total: 1 }
|
||||
- match: { responses.2.hits.total: 1 }
|
||||
- match: { responses.3.error.root_cause.0.type: index_not_found_exception }
|
||||
- match: { responses.3.error.root_cause.0.reason: "/no.such.index/" }
|
||||
- match: { responses.3.error.root_cause.0.index: index_3 }
|
||||
- match: { responses.4.hits.total: 4 }
|
||||
|
@ -233,3 +233,51 @@
|
||||
query:
|
||||
match_all: {}
|
||||
size: 0
|
||||
|
||||
---
|
||||
"Scroll max_score is null":
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: max_score was set to 0 rather than null before 7.0
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_scroll
|
||||
- do:
|
||||
index:
|
||||
index: test_scroll
|
||||
type: test
|
||||
id: 42
|
||||
body: { foo: 1 }
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test_scroll
|
||||
type: test
|
||||
id: 43
|
||||
body: { foo: 2 }
|
||||
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test_scroll
|
||||
size: 1
|
||||
scroll: 1m
|
||||
sort: foo
|
||||
body:
|
||||
query:
|
||||
match_all: {}
|
||||
|
||||
- set: {_scroll_id: scroll_id}
|
||||
- length: {hits.hits: 1 }
|
||||
- match: { hits.max_score: null }
|
||||
|
||||
- do:
|
||||
scroll:
|
||||
scroll_id: $scroll_id
|
||||
scroll: 1m
|
||||
|
||||
- length: {hits.hits: 1 }
|
||||
- match: { hits.max_score: null }
|
||||
|
@ -244,6 +244,23 @@ setup:
|
||||
- match: { hits.total: 6 }
|
||||
- length: { hits.hits: 0 }
|
||||
|
||||
---
|
||||
"no hits and inner_hits max_score null":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: max_score was set to 0 rather than null before 7.0
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
size: 0
|
||||
collapse: { field: numeric_group, inner_hits: { name: sub_hits, size: 1} }
|
||||
sort: [{ sort: desc }]
|
||||
|
||||
- match: { hits.max_score: null }
|
||||
|
||||
---
|
||||
"field collapsing and multiple inner_hits":
|
||||
|
||||
|
@ -128,7 +128,6 @@ setup:
|
||||
- match: { hits.total: 2 }
|
||||
- match: { aggregations.some_agg.doc_count: 3 }
|
||||
|
||||
|
||||
- do:
|
||||
search:
|
||||
pre_filter_shard_size: 1
|
||||
|
@ -39,6 +39,7 @@ setup:
|
||||
df: text
|
||||
|
||||
- match: {hits.total: 1}
|
||||
- match: {hits.max_score: 1}
|
||||
- match: {hits.hits.0._score: 1}
|
||||
|
||||
- do:
|
||||
@ -52,6 +53,7 @@ setup:
|
||||
boost: 2
|
||||
|
||||
- match: {hits.total: 1}
|
||||
- match: {hits.max_score: 2}
|
||||
- match: {hits.hits.0._score: 2}
|
||||
|
||||
- do:
|
||||
@ -61,6 +63,7 @@ setup:
|
||||
df: text
|
||||
|
||||
- match: {hits.total: 1}
|
||||
- match: {hits.max_score: 1}
|
||||
- match: {hits.hits.0._score: 1}
|
||||
|
||||
---
|
||||
|
@ -29,6 +29,7 @@
|
||||
query_weight: 5
|
||||
rescore_query_weight: 10
|
||||
|
||||
- match: {hits.max_score: 15}
|
||||
- match: { hits.hits.0._score: 15 }
|
||||
- match: { hits.hits.0._explanation.value: 15 }
|
||||
|
||||
|
@ -0,0 +1,49 @@
|
||||
setup:
|
||||
- do:
|
||||
indices.create:
|
||||
index: testidx
|
||||
body:
|
||||
mappings:
|
||||
_doc:
|
||||
properties:
|
||||
nested1:
|
||||
type : nested
|
||||
properties:
|
||||
nested1-text:
|
||||
type: text
|
||||
object1:
|
||||
properties:
|
||||
object1-text:
|
||||
type: text
|
||||
object1-nested1:
|
||||
type: nested
|
||||
properties:
|
||||
object1-nested1-text:
|
||||
type: text
|
||||
- do:
|
||||
index:
|
||||
index: testidx
|
||||
type: _doc
|
||||
id: 1
|
||||
body:
|
||||
"nested1" : [{ "nested1-text": "text1" }]
|
||||
"object1" : [{ "object1-text": "text2" }, "object1-nested1" : [{"object1-nested1-text" : "text3"}]]
|
||||
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
---
|
||||
"Termvectors on nested fields should return empty results":
|
||||
|
||||
- do:
|
||||
termvectors:
|
||||
index: testidx
|
||||
type: _doc
|
||||
id: 1
|
||||
fields: ["nested1", "nested1.nested1-text", "object1.object1-nested1", "object1.object1-nested1.object1-nested1-text", "object1.object1-text"]
|
||||
|
||||
- is_false: term_vectors.nested1
|
||||
- is_false: term_vectors.nested1\.nested1-text # escaping as the field name contains dot
|
||||
- is_false: term_vectors.object1\.object1-nested1
|
||||
- is_false: term_vectors.object1\.object1-nested1\.object1-nested1-text
|
||||
- is_true: term_vectors.object1\.object1-text
|
@ -136,42 +136,6 @@ public final class ExceptionsHelper {
|
||||
return Arrays.stream(stackTrace).skip(1).map(e -> "\tat " + e).collect(Collectors.joining("\n"));
|
||||
}
|
||||
|
||||
static final int MAX_ITERATIONS = 1024;
|
||||
|
||||
/**
|
||||
* Unwrap the specified throwable looking for any suppressed errors or errors as a root cause of the specified throwable.
|
||||
*
|
||||
* @param cause the root throwable
|
||||
*
|
||||
* @return an optional error if one is found suppressed or a root cause in the tree rooted at the specified throwable
|
||||
*/
|
||||
public static Optional<Error> maybeError(final Throwable cause, final Logger logger) {
|
||||
// early terminate if the cause is already an error
|
||||
if (cause instanceof Error) {
|
||||
return Optional.of((Error) cause);
|
||||
}
|
||||
|
||||
final Queue<Throwable> queue = new LinkedList<>();
|
||||
queue.add(cause);
|
||||
int iterations = 0;
|
||||
while (!queue.isEmpty()) {
|
||||
iterations++;
|
||||
if (iterations > MAX_ITERATIONS) {
|
||||
logger.warn("giving up looking for fatal errors", cause);
|
||||
break;
|
||||
}
|
||||
final Throwable current = queue.remove();
|
||||
if (current instanceof Error) {
|
||||
return Optional.of((Error) current);
|
||||
}
|
||||
Collections.addAll(queue, current.getSuppressed());
|
||||
if (current.getCause() != null) {
|
||||
queue.add(current.getCause());
|
||||
}
|
||||
}
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
/**
|
||||
* Rethrows the first exception in the list and adds all remaining to the suppressed list.
|
||||
* If the given list is empty no exception is thrown
|
||||
@ -243,20 +207,57 @@ public final class ExceptionsHelper {
|
||||
return true;
|
||||
}
|
||||
|
||||
static final int MAX_ITERATIONS = 1024;
|
||||
|
||||
/**
|
||||
* Unwrap the specified throwable looking for any suppressed errors or errors as a root cause of the specified throwable.
|
||||
*
|
||||
* @param cause the root throwable
|
||||
* @return an optional error if one is found suppressed or a root cause in the tree rooted at the specified throwable
|
||||
*/
|
||||
public static Optional<Error> maybeError(final Throwable cause, final Logger logger) {
|
||||
// early terminate if the cause is already an error
|
||||
if (cause instanceof Error) {
|
||||
return Optional.of((Error) cause);
|
||||
}
|
||||
|
||||
final Queue<Throwable> queue = new LinkedList<>();
|
||||
queue.add(cause);
|
||||
int iterations = 0;
|
||||
while (queue.isEmpty() == false) {
|
||||
iterations++;
|
||||
// this is a guard against deeply nested or circular chains of exceptions
|
||||
if (iterations > MAX_ITERATIONS) {
|
||||
logger.warn("giving up looking for fatal errors", cause);
|
||||
break;
|
||||
}
|
||||
final Throwable current = queue.remove();
|
||||
if (current instanceof Error) {
|
||||
return Optional.of((Error) current);
|
||||
}
|
||||
Collections.addAll(queue, current.getSuppressed());
|
||||
if (current.getCause() != null) {
|
||||
queue.add(current.getCause());
|
||||
}
|
||||
}
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
/**
|
||||
* If the specified cause is an unrecoverable error, this method will rethrow the cause on a separate thread so that it can not be
|
||||
* caught and bubbles up to the uncaught exception handler.
|
||||
* caught and bubbles up to the uncaught exception handler. Note that the cause tree is examined for any {@link Error}. See
|
||||
* {@link #maybeError(Throwable, Logger)} for the semantics.
|
||||
*
|
||||
* @param throwable the throwable to test
|
||||
* @param throwable the throwable to possibly throw on another thread
|
||||
*/
|
||||
public static void dieOnError(Throwable throwable) {
|
||||
final Optional<Error> maybeError = ExceptionsHelper.maybeError(throwable, logger);
|
||||
if (maybeError.isPresent()) {
|
||||
public static void maybeDieOnAnotherThread(final Throwable throwable) {
|
||||
ExceptionsHelper.maybeError(throwable, logger).ifPresent(error -> {
|
||||
/*
|
||||
* Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, Netty wraps too many
|
||||
* invocations of user-code in try/catch blocks that swallow all throwables. This means that a rethrow here will not bubble up
|
||||
* to where we want it to. So, we fork a thread and throw the exception from there where Netty can not get to it. We do not wrap
|
||||
* the exception so as to not lose the original cause during exit.
|
||||
* Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, sometimes the stack
|
||||
* contains statements that catch any throwable (e.g., Netty, and the JDK futures framework). This means that a rethrow here
|
||||
* will not bubble up to where we want it to. So, we fork a thread and throw the exception from there where we are sure the
|
||||
* stack does not contain statements that catch any throwable. We do not wrap the exception so as to not lose the original cause
|
||||
* during exit.
|
||||
*/
|
||||
try {
|
||||
// try to log the current stack trace
|
||||
@ -264,12 +265,12 @@ public final class ExceptionsHelper {
|
||||
logger.error("fatal error\n{}", formatted);
|
||||
} finally {
|
||||
new Thread(
|
||||
() -> {
|
||||
throw maybeError.get();
|
||||
})
|
||||
.start();
|
||||
() -> {
|
||||
throw error;
|
||||
})
|
||||
.start();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -122,6 +122,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
||||
public static final Version V_5_6_10 = new Version(V_5_6_10_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
|
||||
public static final int V_5_6_11_ID = 5061199;
|
||||
public static final Version V_5_6_11 = new Version(V_5_6_11_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
|
||||
public static final int V_5_6_12_ID = 5061299;
|
||||
public static final Version V_5_6_12 = new Version(V_5_6_12_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
|
||||
public static final int V_6_0_0_alpha1_ID = 6000001;
|
||||
public static final Version V_6_0_0_alpha1 =
|
||||
new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
||||
@ -174,10 +176,10 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
||||
public static final Version V_6_3_1 = new Version(V_6_3_1_ID, org.apache.lucene.util.Version.LUCENE_7_3_1);
|
||||
public static final int V_6_3_2_ID = 6030299;
|
||||
public static final Version V_6_3_2 = new Version(V_6_3_2_ID, org.apache.lucene.util.Version.LUCENE_7_3_1);
|
||||
public static final int V_6_3_3_ID = 6030399;
|
||||
public static final Version V_6_3_3 = new Version(V_6_3_3_ID, org.apache.lucene.util.Version.LUCENE_7_3_1);
|
||||
public static final int V_6_4_0_ID = 6040099;
|
||||
public static final Version V_6_4_0 = new Version(V_6_4_0_ID, org.apache.lucene.util.Version.LUCENE_7_4_0);
|
||||
public static final int V_6_4_1_ID = 6040199;
|
||||
public static final Version V_6_4_1 = new Version(V_6_4_1_ID, org.apache.lucene.util.Version.LUCENE_7_4_0);
|
||||
public static final int V_6_5_0_ID = 6050099;
|
||||
public static final Version V_6_5_0 = new Version(V_6_5_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0);
|
||||
public static final int V_7_0_0_alpha1_ID = 7000001;
|
||||
@ -200,10 +202,10 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
||||
return V_7_0_0_alpha1;
|
||||
case V_6_5_0_ID:
|
||||
return V_6_5_0;
|
||||
case V_6_4_1_ID:
|
||||
return V_6_4_1;
|
||||
case V_6_4_0_ID:
|
||||
return V_6_4_0;
|
||||
case V_6_3_3_ID:
|
||||
return V_6_3_3;
|
||||
case V_6_3_2_ID:
|
||||
return V_6_3_2;
|
||||
case V_6_3_1_ID:
|
||||
@ -246,6 +248,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
||||
return V_6_0_0_alpha2;
|
||||
case V_6_0_0_alpha1_ID:
|
||||
return V_6_0_0_alpha1;
|
||||
case V_5_6_12_ID:
|
||||
return V_5_6_12;
|
||||
case V_5_6_11_ID:
|
||||
return V_5_6_11;
|
||||
case V_5_6_10_ID:
|
||||
|
@ -521,7 +521,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
void processBulkIndexIngestRequest(Task task, BulkRequest original, ActionListener<BulkResponse> listener) {
|
||||
long ingestStartTimeInNanos = System.nanoTime();
|
||||
BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original);
|
||||
ingestService.getPipelineExecutionService().executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> {
|
||||
ingestService.executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> {
|
||||
logger.debug(() -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]",
|
||||
indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()), exception);
|
||||
bulkRequestModifier.markCurrentItemAsFailed(exception);
|
||||
|
@ -27,26 +27,23 @@ import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.ingest.PipelineStore;
|
||||
import org.elasticsearch.node.NodeService;
|
||||
import org.elasticsearch.ingest.IngestService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
public class DeletePipelineTransportAction extends TransportMasterNodeAction<DeletePipelineRequest, AcknowledgedResponse> {
|
||||
|
||||
private final PipelineStore pipelineStore;
|
||||
private final ClusterService clusterService;
|
||||
private final IngestService ingestService;
|
||||
|
||||
@Inject
|
||||
public DeletePipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
public DeletePipelineTransportAction(Settings settings, ThreadPool threadPool, IngestService ingestService,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService) {
|
||||
super(settings, DeletePipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, DeletePipelineRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.pipelineStore = nodeService.getIngestService().getPipelineStore();
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, DeletePipelineAction.NAME, transportService, ingestService.getClusterService(),
|
||||
threadPool, actionFilters, indexNameExpressionResolver, DeletePipelineRequest::new);
|
||||
this.ingestService = ingestService;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -60,8 +57,9 @@ public class DeletePipelineTransportAction extends TransportMasterNodeAction<Del
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(DeletePipelineRequest request, ClusterState state, ActionListener<AcknowledgedResponse> listener) throws Exception {
|
||||
pipelineStore.delete(clusterService, request, listener);
|
||||
protected void masterOperation(DeletePipelineRequest request, ClusterState state,
|
||||
ActionListener<AcknowledgedResponse> listener) throws Exception {
|
||||
ingestService.delete(request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -29,21 +29,17 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.ingest.PipelineStore;
|
||||
import org.elasticsearch.node.NodeService;
|
||||
import org.elasticsearch.ingest.IngestService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
public class GetPipelineTransportAction extends TransportMasterNodeReadAction<GetPipelineRequest, GetPipelineResponse> {
|
||||
|
||||
private final PipelineStore pipelineStore;
|
||||
|
||||
|
||||
@Inject
|
||||
public GetPipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService) {
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, GetPipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, GetPipelineRequest::new, indexNameExpressionResolver);
|
||||
this.pipelineStore = nodeService.getIngestService().getPipelineStore();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -58,7 +54,7 @@ public class GetPipelineTransportAction extends TransportMasterNodeReadAction<Ge
|
||||
|
||||
@Override
|
||||
protected void masterOperation(GetPipelineRequest request, ClusterState state, ActionListener<GetPipelineResponse> listener) throws Exception {
|
||||
listener.onResponse(new GetPipelineResponse(pipelineStore.getPipelines(state, request.getIds())));
|
||||
listener.onResponse(new GetPipelineResponse(IngestService.getPipelines(state, request.getIds())));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -32,12 +32,10 @@ import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.ingest.PipelineStore;
|
||||
import org.elasticsearch.ingest.IngestService;
|
||||
import org.elasticsearch.ingest.IngestInfo;
|
||||
import org.elasticsearch.node.NodeService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
@ -46,19 +44,19 @@ import java.util.Map;
|
||||
|
||||
public class PutPipelineTransportAction extends TransportMasterNodeAction<PutPipelineRequest, AcknowledgedResponse> {
|
||||
|
||||
private final PipelineStore pipelineStore;
|
||||
private final ClusterService clusterService;
|
||||
private final IngestService ingestService;
|
||||
private final NodeClient client;
|
||||
|
||||
@Inject
|
||||
public PutPipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService,
|
||||
NodeClient client) {
|
||||
super(settings, PutPipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutPipelineRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
public PutPipelineTransportAction(Settings settings, ThreadPool threadPool, TransportService transportService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
IngestService ingestService, NodeClient client) {
|
||||
super(
|
||||
settings, PutPipelineAction.NAME, transportService, ingestService.getClusterService(),
|
||||
threadPool, actionFilters, indexNameExpressionResolver, PutPipelineRequest::new
|
||||
);
|
||||
this.client = client;
|
||||
this.pipelineStore = nodeService.getIngestService().getPipelineStore();
|
||||
this.ingestService = ingestService;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -84,7 +82,7 @@ public class PutPipelineTransportAction extends TransportMasterNodeAction<PutPip
|
||||
for (NodeInfo nodeInfo : nodeInfos.getNodes()) {
|
||||
ingestInfos.put(nodeInfo.getNode(), nodeInfo.getIngest());
|
||||
}
|
||||
pipelineStore.put(clusterService, ingestInfos, request, listener);
|
||||
ingestService.putPipeline(ingestInfos, request, listener);
|
||||
} catch (Exception e) {
|
||||
onFailure(e);
|
||||
}
|
||||
|
@ -32,8 +32,8 @@ import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.ingest.ConfigurationUtils;
|
||||
import org.elasticsearch.ingest.IngestDocument;
|
||||
import org.elasticsearch.ingest.IngestService;
|
||||
import org.elasticsearch.ingest.Pipeline;
|
||||
import org.elasticsearch.ingest.PipelineStore;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
@ -164,14 +164,13 @@ public class SimulatePipelineRequest extends ActionRequest implements ToXContent
|
||||
}
|
||||
}
|
||||
|
||||
private static final Pipeline.Factory PIPELINE_FACTORY = new Pipeline.Factory();
|
||||
static final String SIMULATED_PIPELINE_ID = "_simulate_pipeline";
|
||||
|
||||
static Parsed parseWithPipelineId(String pipelineId, Map<String, Object> config, boolean verbose, PipelineStore pipelineStore) {
|
||||
static Parsed parseWithPipelineId(String pipelineId, Map<String, Object> config, boolean verbose, IngestService ingestService) {
|
||||
if (pipelineId == null) {
|
||||
throw new IllegalArgumentException("param [pipeline] is null");
|
||||
}
|
||||
Pipeline pipeline = pipelineStore.get(pipelineId);
|
||||
Pipeline pipeline = ingestService.getPipeline(pipelineId);
|
||||
if (pipeline == null) {
|
||||
throw new IllegalArgumentException("pipeline [" + pipelineId + "] does not exist");
|
||||
}
|
||||
@ -179,9 +178,9 @@ public class SimulatePipelineRequest extends ActionRequest implements ToXContent
|
||||
return new Parsed(pipeline, ingestDocumentList, verbose);
|
||||
}
|
||||
|
||||
static Parsed parse(Map<String, Object> config, boolean verbose, PipelineStore pipelineStore) throws Exception {
|
||||
static Parsed parse(Map<String, Object> config, boolean verbose, IngestService pipelineStore) throws Exception {
|
||||
Map<String, Object> pipelineConfig = ConfigurationUtils.readMap(null, null, config, Fields.PIPELINE);
|
||||
Pipeline pipeline = PIPELINE_FACTORY.create(SIMULATED_PIPELINE_ID, pipelineConfig, pipelineStore.getProcessorFactories());
|
||||
Pipeline pipeline = Pipeline.create(SIMULATED_PIPELINE_ID, pipelineConfig, pipelineStore.getProcessorFactories());
|
||||
List<IngestDocument> ingestDocumentList = parseDocs(config);
|
||||
return new Parsed(pipeline, ingestDocumentList, verbose);
|
||||
}
|
||||
|
@ -26,8 +26,7 @@ import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.ingest.PipelineStore;
|
||||
import org.elasticsearch.node.NodeService;
|
||||
import org.elasticsearch.ingest.IngestService;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
@ -36,15 +35,15 @@ import java.util.Map;
|
||||
|
||||
public class SimulatePipelineTransportAction extends HandledTransportAction<SimulatePipelineRequest, SimulatePipelineResponse> {
|
||||
|
||||
private final PipelineStore pipelineStore;
|
||||
private final IngestService ingestService;
|
||||
private final SimulateExecutionService executionService;
|
||||
|
||||
@Inject
|
||||
public SimulatePipelineTransportAction(Settings settings, ThreadPool threadPool, TransportService transportService,
|
||||
ActionFilters actionFilters, NodeService nodeService) {
|
||||
ActionFilters actionFilters, IngestService ingestService) {
|
||||
super(settings, SimulatePipelineAction.NAME, transportService, actionFilters,
|
||||
(Writeable.Reader<SimulatePipelineRequest>) SimulatePipelineRequest::new);
|
||||
this.pipelineStore = nodeService.getIngestService().getPipelineStore();
|
||||
this.ingestService = ingestService;
|
||||
this.executionService = new SimulateExecutionService(threadPool);
|
||||
}
|
||||
|
||||
@ -55,9 +54,9 @@ public class SimulatePipelineTransportAction extends HandledTransportAction<Simu
|
||||
final SimulatePipelineRequest.Parsed simulateRequest;
|
||||
try {
|
||||
if (request.getId() != null) {
|
||||
simulateRequest = SimulatePipelineRequest.parseWithPipelineId(request.getId(), source, request.isVerbose(), pipelineStore);
|
||||
simulateRequest = SimulatePipelineRequest.parseWithPipelineId(request.getId(), source, request.isVerbose(), ingestService);
|
||||
} else {
|
||||
simulateRequest = SimulatePipelineRequest.parse(source, request.isVerbose(), pipelineStore);
|
||||
simulateRequest = SimulatePipelineRequest.parse(source, request.isVerbose(), ingestService);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.transport.BoundTransportAddress;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.index.IndexModule;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.monitor.process.ProcessProbe;
|
||||
import org.elasticsearch.node.NodeValidationException;
|
||||
@ -393,17 +394,22 @@ final class BootstrapChecks {
|
||||
|
||||
static class MaxMapCountCheck implements BootstrapCheck {
|
||||
|
||||
private static final long LIMIT = 1 << 18;
|
||||
static final long LIMIT = 1 << 18;
|
||||
|
||||
@Override
|
||||
public BootstrapCheckResult check(BootstrapContext context) {
|
||||
if (getMaxMapCount() != -1 && getMaxMapCount() < LIMIT) {
|
||||
final String message = String.format(
|
||||
Locale.ROOT,
|
||||
"max virtual memory areas vm.max_map_count [%d] is too low, increase to at least [%d]",
|
||||
getMaxMapCount(),
|
||||
LIMIT);
|
||||
return BootstrapCheckResult.failure(message);
|
||||
public BootstrapCheckResult check(final BootstrapContext context) {
|
||||
// we only enforce the check if mmapfs is an allowed store type
|
||||
if (IndexModule.NODE_STORE_ALLOW_MMAPFS.get(context.settings)) {
|
||||
if (getMaxMapCount() != -1 && getMaxMapCount() < LIMIT) {
|
||||
final String message = String.format(
|
||||
Locale.ROOT,
|
||||
"max virtual memory areas vm.max_map_count [%d] is too low, increase to at least [%d]",
|
||||
getMaxMapCount(),
|
||||
LIMIT);
|
||||
return BootstrapCheckResult.failure(message);
|
||||
} else {
|
||||
return BootstrapCheckResult.success();
|
||||
}
|
||||
} else {
|
||||
return BootstrapCheckResult.success();
|
||||
}
|
||||
|
@ -25,6 +25,8 @@ import org.apache.lucene.util.BitUtil;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
|
||||
import static org.apache.lucene.geo.GeoUtils.MAX_LAT_INCL;
|
||||
|
||||
/**
|
||||
* Utilities for converting to/from the GeoHash standard
|
||||
*
|
||||
@ -48,6 +50,8 @@ public class GeoHashUtils {
|
||||
private static final double LAT_SCALE = (0x1L<<BITS)/180.0D;
|
||||
private static final double LON_SCALE = (0x1L<<BITS)/360.0D;
|
||||
private static final short MORTON_OFFSET = (BITS<<1) - (PRECISION*5);
|
||||
/** Bit encoded representation of the latitude of north pole */
|
||||
private static final long MAX_LAT_BITS = (0x1L << (PRECISION * 5 / 2)) - 1;
|
||||
|
||||
// No instance:
|
||||
private GeoHashUtils() {
|
||||
@ -218,12 +222,19 @@ public class GeoHashUtils {
|
||||
long ghLong = longEncode(geohash, len);
|
||||
// shift away the level
|
||||
ghLong >>>= 4;
|
||||
// deinterleave and add 1 to lat and lon to get topRight
|
||||
long lat = BitUtil.deinterleave(ghLong >>> 1) + 1;
|
||||
long lon = BitUtil.deinterleave(ghLong) + 1;
|
||||
GeoPoint topRight = GeoPoint.fromGeohash(BitUtil.interleave((int)lon, (int)lat) << 4 | len);
|
||||
|
||||
return new Rectangle(bottomLeft.lat(), topRight.lat(), bottomLeft.lon(), topRight.lon());
|
||||
// deinterleave
|
||||
long lon = BitUtil.deinterleave(ghLong >>> 1);
|
||||
long lat = BitUtil.deinterleave(ghLong);
|
||||
if (lat < MAX_LAT_BITS) {
|
||||
// add 1 to lat and lon to get topRight
|
||||
GeoPoint topRight = GeoPoint.fromGeohash(BitUtil.interleave((int)(lat + 1), (int)(lon + 1)) << 4 | len);
|
||||
return new Rectangle(bottomLeft.lat(), topRight.lat(), bottomLeft.lon(), topRight.lon());
|
||||
} else {
|
||||
// We cannot go north of north pole, so just using 90 degrees instead of calculating it using
|
||||
// add 1 to lon to get lon of topRight, we are going to use 90 for lat
|
||||
GeoPoint topRight = GeoPoint.fromGeohash(BitUtil.interleave((int)lat, (int)(lon + 1)) << 4 | len);
|
||||
return new Rectangle(bottomLeft.lat(), MAX_LAT_INCL, bottomLeft.lon(), topRight.lon());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user