Merge branch 'master' into index-lifecycle
This commit is contained in:
commit
e2c1beb1be
60
build.gradle
60
build.gradle
|
@ -20,16 +20,12 @@
|
|||
import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.elasticsearch.gradle.Version
|
||||
import org.elasticsearch.gradle.VersionCollection
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.elasticsearch.gradle.plugin.PluginBuildPlugin
|
||||
import org.gradle.plugins.ide.eclipse.model.SourceFolder
|
||||
|
||||
import java.nio.file.Files
|
||||
import java.nio.file.Path
|
||||
|
||||
plugins {
|
||||
id 'com.gradle.build-scan' version '1.13.2'
|
||||
}
|
||||
|
@ -576,62 +572,6 @@ wrapper {
|
|||
}
|
||||
}
|
||||
|
||||
static void assertLinesInFile(final Path path, final List<String> expectedLines) {
|
||||
final List<String> actualLines = Files.readAllLines(path)
|
||||
int line = 0
|
||||
for (final String expectedLine : expectedLines) {
|
||||
final String actualLine = actualLines.get(line)
|
||||
if (expectedLine != actualLine) {
|
||||
throw new GradleException("expected line [${line + 1}] in [${path}] to be [${expectedLine}] but was [${actualLine}]")
|
||||
}
|
||||
line++
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that all generated JARs have our NOTICE.txt and an appropriate
|
||||
* LICENSE.txt in them. We configurate this in gradle but we'd like to
|
||||
* be extra paranoid.
|
||||
*/
|
||||
subprojects { project ->
|
||||
project.tasks.withType(Jar).whenTaskAdded { jarTask ->
|
||||
final Task extract = project.task("extract${jarTask.name.capitalize()}", type: LoggedExec) {
|
||||
dependsOn jarTask
|
||||
ext.destination = project.buildDir.toPath().resolve("jar-extracted/${jarTask.name}")
|
||||
commandLine "${->new File(rootProject.compilerJavaHome, 'bin/jar')}",
|
||||
'xf', "${-> jarTask.outputs.files.singleFile}", 'META-INF/LICENSE.txt', 'META-INF/NOTICE.txt'
|
||||
workingDir destination
|
||||
onlyIf {jarTask.enabled}
|
||||
doFirst {
|
||||
project.delete(destination)
|
||||
Files.createDirectories(destination)
|
||||
}
|
||||
}
|
||||
|
||||
final Task checkNotice = project.task("verify${jarTask.name.capitalize()}Notice") {
|
||||
dependsOn extract
|
||||
onlyIf {jarTask.enabled}
|
||||
doLast {
|
||||
final List<String> noticeLines = Files.readAllLines(project.noticeFile.toPath())
|
||||
final Path noticePath = extract.destination.resolve('META-INF/NOTICE.txt')
|
||||
assertLinesInFile(noticePath, noticeLines)
|
||||
}
|
||||
}
|
||||
project.check.dependsOn checkNotice
|
||||
|
||||
final Task checkLicense = project.task("verify${jarTask.name.capitalize()}License") {
|
||||
dependsOn extract
|
||||
onlyIf {jarTask.enabled}
|
||||
doLast {
|
||||
final List<String> licenseLines = Files.readAllLines(project.licenseFile.toPath())
|
||||
final Path licensePath = extract.destination.resolve('META-INF/LICENSE.txt')
|
||||
assertLinesInFile(licensePath, licenseLines)
|
||||
}
|
||||
}
|
||||
project.check.dependsOn checkLicense
|
||||
}
|
||||
}
|
||||
|
||||
/* Remove assemble/dependenciesInfo on all qa projects because we don't need to publish
|
||||
* artifacts for them. */
|
||||
gradle.projectsEvaluated {
|
||||
|
|
|
@ -24,15 +24,6 @@ plugins {
|
|||
id 'groovy'
|
||||
}
|
||||
|
||||
gradlePlugin {
|
||||
plugins {
|
||||
simplePlugin {
|
||||
id = 'elasticsearch.clusterformation'
|
||||
implementationClass = 'org.elasticsearch.gradle.clusterformation.ClusterformationPlugin'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group = 'org.elasticsearch.gradle'
|
||||
|
||||
String minimumGradleVersion = file('src/main/resources/minimumGradleVersion').text.trim()
|
||||
|
|
|
@ -56,6 +56,7 @@ import org.gradle.util.GradleVersion
|
|||
import java.nio.charset.StandardCharsets
|
||||
import java.time.ZoneOffset
|
||||
import java.time.ZonedDateTime
|
||||
|
||||
/**
|
||||
* Encapsulates build configuration for elasticsearch projects.
|
||||
*/
|
||||
|
@ -739,6 +740,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
}
|
||||
from(project.noticeFile.parent) {
|
||||
include project.noticeFile.name
|
||||
rename { 'NOTICE.txt' }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
implementation-class=org.elasticsearch.gradle.clusterformation.ClusterformationPlugin
|
|
@ -0,0 +1,76 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.gradle;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.elasticsearch.gradle.test.GradleIntegrationTestCase;
|
||||
import org.gradle.testkit.runner.BuildResult;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.zip.ZipEntry;
|
||||
import java.util.zip.ZipFile;
|
||||
|
||||
public class BuildPluginIT extends GradleIntegrationTestCase {
|
||||
|
||||
public void testPluginCanBeApplied() {
|
||||
BuildResult result = getGradleRunner("elasticsearch.build")
|
||||
.withArguments("hello", "-s")
|
||||
.build();
|
||||
assertTaskSuccessful(result, ":hello");
|
||||
assertOutputContains("build plugin can be applied");
|
||||
}
|
||||
|
||||
public void testCheckTask() {
|
||||
BuildResult result = getGradleRunner("elasticsearch.build")
|
||||
.withArguments("check", "assemble", "-s", "-Dlocal.repo.path=" + getLocalTestRepoPath())
|
||||
.build();
|
||||
assertTaskSuccessful(result, ":check");
|
||||
}
|
||||
|
||||
public void testLicenseAndNotice() throws IOException {
|
||||
BuildResult result = getGradleRunner("elasticsearch.build")
|
||||
.withArguments("clean", "assemble", "-s", "-Dlocal.repo.path=" + getLocalTestRepoPath())
|
||||
.build();
|
||||
|
||||
assertTaskSuccessful(result, ":assemble");
|
||||
|
||||
assertBuildFileExists(result, "elasticsearch.build", "distributions/elasticsearch.build.jar");
|
||||
|
||||
try (ZipFile zipFile = new ZipFile(new File(
|
||||
getBuildDir("elasticsearch.build"), "distributions/elasticsearch.build.jar"
|
||||
))) {
|
||||
ZipEntry licenseEntry = zipFile.getEntry("META-INF/LICENSE.txt");
|
||||
ZipEntry noticeEntry = zipFile.getEntry("META-INF/NOTICE.txt");
|
||||
assertNotNull("Jar does not have META-INF/LICENSE.txt", licenseEntry);
|
||||
assertNotNull("Jar does not have META-INF/NOTICE.txt", noticeEntry);
|
||||
try (
|
||||
InputStream license = zipFile.getInputStream(licenseEntry);
|
||||
InputStream notice = zipFile.getInputStream(noticeEntry)
|
||||
) {
|
||||
assertEquals("this is a test license file", IOUtils.toString(license, StandardCharsets.UTF_8.name()));
|
||||
assertEquals("this is a test notice file", IOUtils.toString(notice, StandardCharsets.UTF_8.name()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -2,74 +2,57 @@ package org.elasticsearch.gradle.precommit;
|
|||
|
||||
import org.elasticsearch.gradle.test.GradleIntegrationTestCase;
|
||||
import org.gradle.testkit.runner.BuildResult;
|
||||
import org.gradle.testkit.runner.GradleRunner;
|
||||
import org.gradle.testkit.runner.TaskOutcome;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
|
||||
public class NamingConventionsTaskIT extends GradleIntegrationTestCase {
|
||||
|
||||
public void testPluginCanBeApplied() {
|
||||
BuildResult result = GradleRunner.create()
|
||||
.withProjectDir(getProjectDir("namingConventionsSelfTest"))
|
||||
.withArguments("hello", "-s", "-PcheckForTestsInMain=false")
|
||||
.withPluginClasspath()
|
||||
.build();
|
||||
|
||||
assertEquals(TaskOutcome.SUCCESS, result.task(":hello").getOutcome());
|
||||
String output = result.getOutput();
|
||||
assertTrue(output, output.contains("build plugin can be applied"));
|
||||
}
|
||||
|
||||
public void testNameCheckFailsAsItShould() {
|
||||
BuildResult result = GradleRunner.create()
|
||||
.withProjectDir(getProjectDir("namingConventionsSelfTest"))
|
||||
BuildResult result = getGradleRunner("namingConventionsSelfTest")
|
||||
.withArguments("namingConventions", "-s", "-PcheckForTestsInMain=false")
|
||||
.withPluginClasspath()
|
||||
.buildAndFail();
|
||||
|
||||
assertNotNull("task did not run", result.task(":namingConventions"));
|
||||
assertEquals(TaskOutcome.FAILED, result.task(":namingConventions").getOutcome());
|
||||
String output = result.getOutput();
|
||||
for (String line : Arrays.asList(
|
||||
"Found inner classes that are tests, which are excluded from the test runner:",
|
||||
"* org.elasticsearch.test.NamingConventionsCheckInMainIT$InternalInvalidTests",
|
||||
"Classes ending with [Tests] must subclass [UnitTestCase]:",
|
||||
"* org.elasticsearch.test.NamingConventionsCheckInMainTests",
|
||||
"* org.elasticsearch.test.NamingConventionsCheckInMainIT",
|
||||
"Not all subclasses of UnitTestCase match the naming convention. Concrete classes must end with [Tests]:",
|
||||
"* org.elasticsearch.test.WrongName")) {
|
||||
assertTrue(
|
||||
"expected: '" + line + "' but it was not found in the output:\n" + output,
|
||||
output.contains(line)
|
||||
);
|
||||
}
|
||||
assertTaskFailed(result, ":namingConventions");
|
||||
assertOutputContains(
|
||||
result.getOutput(),
|
||||
// TODO: java9 Set.of
|
||||
new HashSet<>(
|
||||
Arrays.asList(
|
||||
"Not all subclasses of UnitTestCase match the naming convention. Concrete classes must end with [Tests]:",
|
||||
"* org.elasticsearch.test.WrongName",
|
||||
"Found inner classes that are tests, which are excluded from the test runner:",
|
||||
"* org.elasticsearch.test.NamingConventionsCheckInMainIT$InternalInvalidTests",
|
||||
"Classes ending with [Tests] must subclass [UnitTestCase]:",
|
||||
"* org.elasticsearch.test.NamingConventionsCheckInMainTests",
|
||||
"* org.elasticsearch.test.NamingConventionsCheckInMainIT"
|
||||
)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
public void testNameCheckFailsAsItShouldWithMain() {
|
||||
BuildResult result = GradleRunner.create()
|
||||
.withProjectDir(getProjectDir("namingConventionsSelfTest"))
|
||||
BuildResult result = getGradleRunner("namingConventionsSelfTest")
|
||||
.withArguments("namingConventions", "-s", "-PcheckForTestsInMain=true")
|
||||
.withPluginClasspath()
|
||||
.buildAndFail();
|
||||
|
||||
assertNotNull("task did not run", result.task(":namingConventions"));
|
||||
assertEquals(TaskOutcome.FAILED, result.task(":namingConventions").getOutcome());
|
||||
|
||||
String output = result.getOutput();
|
||||
for (String line : Arrays.asList(
|
||||
"Classes ending with [Tests] or [IT] or extending [UnitTestCase] must be in src/test/java:",
|
||||
"* org.elasticsearch.test.NamingConventionsCheckBadClasses$DummyInterfaceTests",
|
||||
"* org.elasticsearch.test.NamingConventionsCheckBadClasses$DummyAbstractTests",
|
||||
"* org.elasticsearch.test.NamingConventionsCheckBadClasses$InnerTests",
|
||||
"* org.elasticsearch.test.NamingConventionsCheckBadClasses$NotImplementingTests",
|
||||
"* org.elasticsearch.test.NamingConventionsCheckBadClasses$WrongNameTheSecond",
|
||||
"* org.elasticsearch.test.NamingConventionsCheckBadClasses$WrongName")) {
|
||||
assertTrue(
|
||||
"expected: '" + line + "' but it was not found in the output:\n"+output,
|
||||
output.contains(line)
|
||||
);
|
||||
}
|
||||
assertTaskFailed(result, ":namingConventions");
|
||||
assertOutputContains(
|
||||
result.getOutput(),
|
||||
// TODO: java9 Set.of
|
||||
new HashSet<>(
|
||||
Arrays.asList(
|
||||
"Classes ending with [Tests] or [IT] or extending [UnitTestCase] must be in src/test/java:",
|
||||
"* org.elasticsearch.test.NamingConventionsCheckBadClasses$DummyInterfaceTests",
|
||||
"* org.elasticsearch.test.NamingConventionsCheckBadClasses$DummyInterfaceTests",
|
||||
"* org.elasticsearch.test.NamingConventionsCheckBadClasses$DummyAbstractTests",
|
||||
"* org.elasticsearch.test.NamingConventionsCheckBadClasses$InnerTests",
|
||||
"* org.elasticsearch.test.NamingConventionsCheckBadClasses$NotImplementingTests",
|
||||
"* org.elasticsearch.test.NamingConventionsCheckBadClasses$WrongNameTheSecond",
|
||||
"* org.elasticsearch.test.NamingConventionsCheckBadClasses$WrongName"
|
||||
)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import java.nio.file.Files;
|
|||
import java.nio.file.Path;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
|
@ -47,6 +48,12 @@ public abstract class GradleIntegrationTestCase extends GradleUnitTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
protected void assertOutputContains(String output, Set<String> lines) {
|
||||
for (String line : lines) {
|
||||
assertOutputContains(output, line);
|
||||
}
|
||||
}
|
||||
|
||||
protected void assertOutputContains(String output, String line) {
|
||||
assertTrue(
|
||||
"Expected the following line in output:\n\n" + line + "\n\nOutput is:\n" + output,
|
||||
|
@ -82,7 +89,7 @@ public abstract class GradleIntegrationTestCase extends GradleUnitTestCase {
|
|||
"\n\nOutput is:\n" + result.getOutput());
|
||||
}
|
||||
assertEquals(
|
||||
"Expected task to be successful but it was: " + task.getOutcome() +
|
||||
"Expected task `" + taskName +"` to be successful but it was: " + task.getOutcome() +
|
||||
taskOutcome + "\n\nOutput is:\n" + result.getOutput() ,
|
||||
taskOutcome,
|
||||
task.getOutcome()
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
this is a test license file
|
|
@ -0,0 +1 @@
|
|||
this is a test notice file
|
|
@ -0,0 +1,36 @@
|
|||
plugins {
|
||||
id 'java'
|
||||
id 'elasticsearch.build'
|
||||
}
|
||||
|
||||
ext.licenseFile = file("LICENSE")
|
||||
ext.noticeFile = file("NOTICE")
|
||||
|
||||
dependencies {
|
||||
compile "junit:junit:${versions.junit}"
|
||||
// missing classes in thirdparty audit
|
||||
compile 'org.hamcrest:hamcrest-core:1.3'
|
||||
}
|
||||
|
||||
repositories {
|
||||
mavenCentral()
|
||||
repositories {
|
||||
maven {
|
||||
url System.getProperty("local.repo.path")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// todo remove offending rules
|
||||
forbiddenApisMain.enabled = false
|
||||
forbiddenApisTest.enabled = false
|
||||
// requires dependency on testing fw
|
||||
jarHell.enabled = false
|
||||
// we don't have tests for now
|
||||
test.enabled = false
|
||||
|
||||
task hello {
|
||||
doFirst {
|
||||
println "build plugin can be applied"
|
||||
}
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
42a25dc3219429f0e5d060061f71acb49bf010a0
|
|
@ -0,0 +1 @@
|
|||
2973d150c0dc1fefe998f834810d68f278ea58ec
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch;
|
||||
|
||||
/**
|
||||
* This is just a test class
|
||||
*/
|
||||
public class SampleClass {
|
||||
|
||||
}
|
|
@ -13,14 +13,8 @@ thirdPartyAudit.enabled = false
|
|||
ext.licenseFile = file("$buildDir/dummy/license")
|
||||
ext.noticeFile = file("$buildDir/dummy/notice")
|
||||
|
||||
task hello {
|
||||
doFirst {
|
||||
println "build plugin can be applied"
|
||||
}
|
||||
}
|
||||
|
||||
dependencies {
|
||||
compile "junit:junit:${versions.junit}"
|
||||
compile "junit:junit:4.12"
|
||||
}
|
||||
|
||||
namingConventions {
|
||||
|
|
|
@ -28,10 +28,12 @@ import org.elasticsearch.client.ml.CloseJobRequest;
|
|||
import org.elasticsearch.client.ml.DeleteJobRequest;
|
||||
import org.elasticsearch.client.ml.GetBucketsRequest;
|
||||
import org.elasticsearch.client.ml.GetJobRequest;
|
||||
import org.elasticsearch.client.ml.GetJobStatsRequest;
|
||||
import org.elasticsearch.client.ml.GetRecordsRequest;
|
||||
import org.elasticsearch.client.ml.OpenJobRequest;
|
||||
import org.elasticsearch.client.ml.PutJobRequest;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.client.ml.FlushJobRequest;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -126,6 +128,36 @@ final class MLRequestConverters {
|
|||
return request;
|
||||
}
|
||||
|
||||
static Request flushJob(FlushJobRequest flushJobRequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack")
|
||||
.addPathPartAsIs("ml")
|
||||
.addPathPartAsIs("anomaly_detectors")
|
||||
.addPathPart(flushJobRequest.getJobId())
|
||||
.addPathPartAsIs("_flush")
|
||||
.build();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
|
||||
request.setEntity(createEntity(flushJobRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request getJobStats(GetJobStatsRequest getJobStatsRequest) {
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack")
|
||||
.addPathPartAsIs("ml")
|
||||
.addPathPartAsIs("anomaly_detectors")
|
||||
.addPathPart(Strings.collectionToCommaDelimitedString(getJobStatsRequest.getJobIds()))
|
||||
.addPathPartAsIs("_stats")
|
||||
.build();
|
||||
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
|
||||
|
||||
RequestConverters.Params params = new RequestConverters.Params(request);
|
||||
if (getJobStatsRequest.isAllowNoJobs() != null) {
|
||||
params.putParam("allow_no_jobs", Boolean.toString(getJobStatsRequest.isAllowNoJobs()));
|
||||
}
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request getRecords(GetRecordsRequest getRecordsRequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack")
|
||||
|
|
|
@ -19,6 +19,11 @@
|
|||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.client.ml.FlushJobRequest;
|
||||
import org.elasticsearch.client.ml.FlushJobResponse;
|
||||
import org.elasticsearch.client.ml.GetJobStatsRequest;
|
||||
import org.elasticsearch.client.ml.GetJobStatsResponse;
|
||||
import org.elasticsearch.client.ml.job.stats.JobStats;
|
||||
import org.elasticsearch.client.ml.CloseJobRequest;
|
||||
import org.elasticsearch.client.ml.CloseJobResponse;
|
||||
import org.elasticsearch.client.ml.DeleteJobRequest;
|
||||
|
@ -288,6 +293,101 @@ public final class MachineLearningClient {
|
|||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Flushes internally buffered data for the given Machine Learning Job ensuring all data sent to the has been processed.
|
||||
* This may cause new results to be calculated depending on the contents of the buffer
|
||||
*
|
||||
* Both flush and close operations are similar,
|
||||
* however the flush is more efficient if you are expecting to send more data for analysis.
|
||||
*
|
||||
* When flushing, the job remains open and is available to continue analyzing data.
|
||||
* A close operation additionally prunes and persists the model state to disk and the
|
||||
* job must be opened again before analyzing further data.
|
||||
*
|
||||
* <p>
|
||||
* For additional info
|
||||
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html">Flush ML job documentation</a>
|
||||
*
|
||||
* @param request The {@link FlushJobRequest} object enclosing the `jobId` and additional request options
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
*/
|
||||
public FlushJobResponse flushJob(FlushJobRequest request, RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(request,
|
||||
MLRequestConverters::flushJob,
|
||||
options,
|
||||
FlushJobResponse::fromXContent,
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Flushes internally buffered data for the given Machine Learning Job asynchronously ensuring all data sent to the has been processed.
|
||||
* This may cause new results to be calculated depending on the contents of the buffer
|
||||
*
|
||||
* Both flush and close operations are similar,
|
||||
* however the flush is more efficient if you are expecting to send more data for analysis.
|
||||
*
|
||||
* When flushing, the job remains open and is available to continue analyzing data.
|
||||
* A close operation additionally prunes and persists the model state to disk and the
|
||||
* job must be opened again before analyzing further data.
|
||||
*
|
||||
* <p>
|
||||
* For additional info
|
||||
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html">Flush ML job documentation</a>
|
||||
*
|
||||
* @param request The {@link FlushJobRequest} object enclosing the `jobId` and additional request options
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener Listener to be notified upon request completion
|
||||
*/
|
||||
public void flushJobAsync(FlushJobRequest request, RequestOptions options, ActionListener<FlushJobResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(request,
|
||||
MLRequestConverters::flushJob,
|
||||
options,
|
||||
FlushJobResponse::fromXContent,
|
||||
listener,
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets usage statistics for one or more Machine Learning jobs
|
||||
*
|
||||
* <p>
|
||||
* For additional info
|
||||
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-stats.html">Get Job stats docs</a>
|
||||
* </p>
|
||||
* @param request {@link GetJobStatsRequest} Request containing a list of jobId(s) and additional options
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return {@link GetJobStatsResponse} response object containing
|
||||
* the {@link JobStats} objects and the number of jobs found
|
||||
* @throws IOException when there is a serialization issue sending the request or receiving the response
|
||||
*/
|
||||
public GetJobStatsResponse getJobStats(GetJobStatsRequest request, RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(request,
|
||||
MLRequestConverters::getJobStats,
|
||||
options,
|
||||
GetJobStatsResponse::fromXContent,
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets one or more Machine Learning job configuration info, asynchronously.
|
||||
*
|
||||
* <p>
|
||||
* For additional info
|
||||
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-stats.html">Get Job stats docs</a>
|
||||
* </p>
|
||||
* @param request {@link GetJobStatsRequest} Request containing a list of jobId(s) and additional options
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener Listener to be notified with {@link GetJobStatsResponse} upon request completion
|
||||
*/
|
||||
public void getJobStatsAsync(GetJobStatsRequest request, RequestOptions options, ActionListener<GetJobStatsResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(request,
|
||||
MLRequestConverters::getJobStats,
|
||||
options,
|
||||
GetJobStatsResponse::fromXContent,
|
||||
listener,
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the records for a Machine Learning Job.
|
||||
* <p>
|
||||
|
|
|
@ -109,7 +109,9 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.rankeval.RankEvalRequest;
|
||||
import org.elasticsearch.index.reindex.AbstractBulkByScrollRequest;
|
||||
import org.elasticsearch.index.reindex.ReindexRequest;
|
||||
import org.elasticsearch.index.reindex.UpdateByQueryRequest;
|
||||
import org.elasticsearch.protocol.xpack.XPackInfoRequest;
|
||||
import org.elasticsearch.protocol.xpack.XPackUsageRequest;
|
||||
import org.elasticsearch.protocol.xpack.license.DeleteLicenseRequest;
|
||||
|
@ -843,6 +845,33 @@ final class RequestConverters {
|
|||
return request;
|
||||
}
|
||||
|
||||
static Request updateByQuery(UpdateByQueryRequest updateByQueryRequest) throws IOException {
|
||||
String endpoint =
|
||||
endpoint(updateByQueryRequest.indices(), updateByQueryRequest.getDocTypes(), "_update_by_query");
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
|
||||
Params params = new Params(request)
|
||||
.withRouting(updateByQueryRequest.getRouting())
|
||||
.withPipeline(updateByQueryRequest.getPipeline())
|
||||
.withRefresh(updateByQueryRequest.isRefresh())
|
||||
.withTimeout(updateByQueryRequest.getTimeout())
|
||||
.withWaitForActiveShards(updateByQueryRequest.getWaitForActiveShards())
|
||||
.withIndicesOptions(updateByQueryRequest.indicesOptions());
|
||||
if (updateByQueryRequest.isAbortOnVersionConflict() == false) {
|
||||
params.putParam("conflicts", "proceed");
|
||||
}
|
||||
if (updateByQueryRequest.getBatchSize() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_SIZE) {
|
||||
params.putParam("scroll_size", Integer.toString(updateByQueryRequest.getBatchSize()));
|
||||
}
|
||||
if (updateByQueryRequest.getScrollTime() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_TIMEOUT) {
|
||||
params.putParam("scroll", updateByQueryRequest.getScrollTime());
|
||||
}
|
||||
if (updateByQueryRequest.getSize() > 0) {
|
||||
params.putParam("size", Integer.toString(updateByQueryRequest.getSize()));
|
||||
}
|
||||
request.setEntity(createEntity(updateByQueryRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request rollover(RolloverRequest rolloverRequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder().addPathPart(rolloverRequest.getAlias()).addPathPartAsIs("_rollover")
|
||||
.addPathPart(rolloverRequest.getNewIndexName()).build();
|
||||
|
|
|
@ -66,6 +66,7 @@ import org.elasticsearch.index.rankeval.RankEvalRequest;
|
|||
import org.elasticsearch.index.rankeval.RankEvalResponse;
|
||||
import org.elasticsearch.index.reindex.BulkByScrollResponse;
|
||||
import org.elasticsearch.index.reindex.ReindexRequest;
|
||||
import org.elasticsearch.index.reindex.UpdateByQueryRequest;
|
||||
import org.elasticsearch.plugins.spi.NamedXContentProvider;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
@ -436,6 +437,35 @@ public class RestHighLevelClient implements Closeable {
|
|||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a update by query request.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html">
|
||||
* Update By Query API on elastic.co</a>
|
||||
* @param updateByQueryRequest the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return the response
|
||||
* @throws IOException in case there is a problem sending the request or parsing back the response
|
||||
*/
|
||||
public final BulkByScrollResponse updateByQuery(UpdateByQueryRequest updateByQueryRequest, RequestOptions options) throws IOException {
|
||||
return performRequestAndParseEntity(
|
||||
updateByQueryRequest, RequestConverters::updateByQuery, options, BulkByScrollResponse::fromXContent, emptySet()
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously executes an update by query request.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html">
|
||||
* Update By Query API on elastic.co</a>
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener the listener to be notified upon request completion
|
||||
*/
|
||||
public final void updateByQueryAsync(UpdateByQueryRequest reindexRequest, RequestOptions options,
|
||||
ActionListener<BulkByScrollResponse> listener) {
|
||||
performRequestAsyncAndParseEntity(
|
||||
reindexRequest, RequestConverters::updateByQuery, options, BulkByScrollResponse::fromXContent, listener, emptySet()
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Pings the remote Elasticsearch cluster and returns true if the ping succeeded, false otherwise
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
|
|
|
@ -0,0 +1,195 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.client.ml.job.config.Job;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Request object to flush a given Machine Learning job.
|
||||
*/
|
||||
public class FlushJobRequest extends ActionRequest implements ToXContentObject {
|
||||
|
||||
public static final ParseField CALC_INTERIM = new ParseField("calc_interim");
|
||||
public static final ParseField START = new ParseField("start");
|
||||
public static final ParseField END = new ParseField("end");
|
||||
public static final ParseField ADVANCE_TIME = new ParseField("advance_time");
|
||||
public static final ParseField SKIP_TIME = new ParseField("skip_time");
|
||||
|
||||
public static final ConstructingObjectParser<FlushJobRequest, Void> PARSER =
|
||||
new ConstructingObjectParser<>("flush_job_request", (a) -> new FlushJobRequest((String) a[0]));
|
||||
|
||||
static {
|
||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID);
|
||||
PARSER.declareBoolean(FlushJobRequest::setCalcInterim, CALC_INTERIM);
|
||||
PARSER.declareString(FlushJobRequest::setStart, START);
|
||||
PARSER.declareString(FlushJobRequest::setEnd, END);
|
||||
PARSER.declareString(FlushJobRequest::setAdvanceTime, ADVANCE_TIME);
|
||||
PARSER.declareString(FlushJobRequest::setSkipTime, SKIP_TIME);
|
||||
}
|
||||
|
||||
private final String jobId;
|
||||
private Boolean calcInterim;
|
||||
private String start;
|
||||
private String end;
|
||||
private String advanceTime;
|
||||
private String skipTime;
|
||||
|
||||
/**
|
||||
* Create new Flush job request
|
||||
*
|
||||
* @param jobId The job ID of the job to flush
|
||||
*/
|
||||
public FlushJobRequest(String jobId) {
|
||||
this.jobId = jobId;
|
||||
}
|
||||
|
||||
public String getJobId() {
|
||||
return jobId;
|
||||
}
|
||||
|
||||
public boolean getCalcInterim() {
|
||||
return calcInterim;
|
||||
}
|
||||
|
||||
/**
|
||||
* When {@code true} calculates the interim results for the most recent bucket or all buckets within the latency period.
|
||||
*
|
||||
* @param calcInterim defaults to {@code false}.
|
||||
*/
|
||||
public void setCalcInterim(boolean calcInterim) {
|
||||
this.calcInterim = calcInterim;
|
||||
}
|
||||
|
||||
public String getStart() {
|
||||
return start;
|
||||
}
|
||||
|
||||
/**
|
||||
* When used in conjunction with {@link FlushJobRequest#calcInterim},
|
||||
* specifies the start of the range of buckets on which to calculate interim results.
|
||||
*
|
||||
* @param start the beginning of the range of buckets; may be an epoch seconds, epoch millis or an ISO string
|
||||
*/
|
||||
public void setStart(String start) {
|
||||
this.start = start;
|
||||
}
|
||||
|
||||
public String getEnd() {
|
||||
return end;
|
||||
}
|
||||
|
||||
/**
|
||||
* When used in conjunction with {@link FlushJobRequest#calcInterim}, specifies the end of the range
|
||||
* of buckets on which to calculate interim results
|
||||
*
|
||||
* @param end the end of the range of buckets; may be an epoch seconds, epoch millis or an ISO string
|
||||
*/
|
||||
public void setEnd(String end) {
|
||||
this.end = end;
|
||||
}
|
||||
|
||||
public String getAdvanceTime() {
|
||||
return advanceTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Specifies to advance to a particular time value.
|
||||
* Results are generated and the model is updated for data from the specified time interval.
|
||||
*
|
||||
* @param advanceTime String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO string
|
||||
*/
|
||||
public void setAdvanceTime(String advanceTime) {
|
||||
this.advanceTime = advanceTime;
|
||||
}
|
||||
|
||||
public String getSkipTime() {
|
||||
return skipTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Specifies to skip to a particular time value.
|
||||
* Results are not generated and the model is not updated for data from the specified time interval.
|
||||
*
|
||||
* @param skipTime String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO string
|
||||
*/
|
||||
public void setSkipTime(String skipTime) {
|
||||
this.skipTime = skipTime;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(jobId, calcInterim, start, end, advanceTime, skipTime);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
FlushJobRequest other = (FlushJobRequest) obj;
|
||||
return Objects.equals(jobId, other.jobId) &&
|
||||
calcInterim == other.calcInterim &&
|
||||
Objects.equals(start, other.start) &&
|
||||
Objects.equals(end, other.end) &&
|
||||
Objects.equals(advanceTime, other.advanceTime) &&
|
||||
Objects.equals(skipTime, other.skipTime);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(Job.ID.getPreferredName(), jobId);
|
||||
if (calcInterim != null) {
|
||||
builder.field(CALC_INTERIM.getPreferredName(), calcInterim);
|
||||
}
|
||||
if (start != null) {
|
||||
builder.field(START.getPreferredName(), start);
|
||||
}
|
||||
if (end != null) {
|
||||
builder.field(END.getPreferredName(), end);
|
||||
}
|
||||
if (advanceTime != null) {
|
||||
builder.field(ADVANCE_TIME.getPreferredName(), advanceTime);
|
||||
}
|
||||
if (skipTime != null) {
|
||||
builder.field(SKIP_TIME.getPreferredName(), skipTime);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
return null;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,112 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Date;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Response object containing flush acknowledgement and additional data
|
||||
*/
|
||||
public class FlushJobResponse extends ActionResponse implements ToXContentObject {
|
||||
|
||||
public static final ParseField FLUSHED = new ParseField("flushed");
|
||||
public static final ParseField LAST_FINALIZED_BUCKET_END = new ParseField("last_finalized_bucket_end");
|
||||
|
||||
public static final ConstructingObjectParser<FlushJobResponse, Void> PARSER =
|
||||
new ConstructingObjectParser<>("flush_job_response",
|
||||
true,
|
||||
(a) -> {
|
||||
boolean flushed = (boolean) a[0];
|
||||
Date date = a[1] == null ? null : new Date((long) a[1]);
|
||||
return new FlushJobResponse(flushed, date);
|
||||
});
|
||||
|
||||
static {
|
||||
PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), FLUSHED);
|
||||
PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), LAST_FINALIZED_BUCKET_END);
|
||||
}
|
||||
|
||||
public static FlushJobResponse fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
private final boolean flushed;
|
||||
private final Date lastFinalizedBucketEnd;
|
||||
|
||||
public FlushJobResponse(boolean flushed, @Nullable Date lastFinalizedBucketEnd) {
|
||||
this.flushed = flushed;
|
||||
this.lastFinalizedBucketEnd = lastFinalizedBucketEnd;
|
||||
}
|
||||
|
||||
/**
|
||||
* Was the job successfully flushed or not
|
||||
*/
|
||||
public boolean isFlushed() {
|
||||
return flushed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides the timestamp (in milliseconds-since-the-epoch) of the end of the last bucket that was processed.
|
||||
*/
|
||||
@Nullable
|
||||
public Date getLastFinalizedBucketEnd() {
|
||||
return lastFinalizedBucketEnd;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(flushed, lastFinalizedBucketEnd);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (other == null || getClass() != other.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
FlushJobResponse that = (FlushJobResponse) other;
|
||||
return that.flushed == flushed && Objects.equals(lastFinalizedBucketEnd, that.lastFinalizedBucketEnd);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(FLUSHED.getPreferredName(), flushed);
|
||||
if (lastFinalizedBucketEnd != null) {
|
||||
builder.timeField(LAST_FINALIZED_BUCKET_END.getPreferredName(),
|
||||
LAST_FINALIZED_BUCKET_END.getPreferredName() + "_string", lastFinalizedBucketEnd.getTime());
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,146 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.client.ml.job.config.Job;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
|
||||
/**
|
||||
* Request object to get {@link org.elasticsearch.client.ml.job.stats.JobStats} by their respective jobIds
|
||||
*
|
||||
* `_all` explicitly gets all the jobs' statistics in the cluster
|
||||
* An empty request (no `jobId`s) implicitly gets all the jobs' statistics in the cluster
|
||||
*/
|
||||
public class GetJobStatsRequest extends ActionRequest implements ToXContentObject {
|
||||
|
||||
public static final ParseField ALLOW_NO_JOBS = new ParseField("allow_no_jobs");
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public static final ConstructingObjectParser<GetJobStatsRequest, Void> PARSER = new ConstructingObjectParser<>(
|
||||
"get_jobs_stats_request", a -> new GetJobStatsRequest((List<String>) a[0]));
|
||||
|
||||
static {
|
||||
PARSER.declareField(ConstructingObjectParser.constructorArg(),
|
||||
p -> Arrays.asList(Strings.commaDelimitedListToStringArray(p.text())),
|
||||
Job.ID, ObjectParser.ValueType.STRING_ARRAY);
|
||||
PARSER.declareBoolean(GetJobStatsRequest::setAllowNoJobs, ALLOW_NO_JOBS);
|
||||
}
|
||||
|
||||
private static final String ALL_JOBS = "_all";
|
||||
|
||||
private final List<String> jobIds;
|
||||
private Boolean allowNoJobs;
|
||||
|
||||
/**
|
||||
* Explicitly gets all jobs statistics
|
||||
*
|
||||
* @return a {@link GetJobStatsRequest} for all existing jobs
|
||||
*/
|
||||
public static GetJobStatsRequest getAllJobStatsRequest(){
|
||||
return new GetJobStatsRequest(ALL_JOBS);
|
||||
}
|
||||
|
||||
GetJobStatsRequest(List<String> jobIds) {
|
||||
if (jobIds.stream().anyMatch(Objects::isNull)) {
|
||||
throw new NullPointerException("jobIds must not contain null values");
|
||||
}
|
||||
this.jobIds = new ArrayList<>(jobIds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the specified Job's statistics via their unique jobIds
|
||||
*
|
||||
* @param jobIds must be non-null and each jobId must be non-null
|
||||
*/
|
||||
public GetJobStatsRequest(String... jobIds) {
|
||||
this(Arrays.asList(jobIds));
|
||||
}
|
||||
|
||||
/**
|
||||
* All the jobIds for which to get statistics
|
||||
*/
|
||||
public List<String> getJobIds() {
|
||||
return jobIds;
|
||||
}
|
||||
|
||||
public Boolean isAllowNoJobs() {
|
||||
return this.allowNoJobs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Whether to ignore if a wildcard expression matches no jobs.
|
||||
*
|
||||
* This includes `_all` string or when no jobs have been specified
|
||||
*
|
||||
* @param allowNoJobs When {@code true} ignore if wildcard or `_all` matches no jobs. Defaults to {@code true}
|
||||
*/
|
||||
public void setAllowNoJobs(boolean allowNoJobs) {
|
||||
this.allowNoJobs = allowNoJobs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(jobIds, allowNoJobs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (other == null || getClass() != other.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
GetJobStatsRequest that = (GetJobStatsRequest) other;
|
||||
return Objects.equals(jobIds, that.jobIds) &&
|
||||
Objects.equals(allowNoJobs, that.allowNoJobs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(Job.ID.getPreferredName(), Strings.collectionToCommaDelimitedString(jobIds));
|
||||
if (allowNoJobs != null) {
|
||||
builder.field(ALLOW_NO_JOBS.getPreferredName(), allowNoJobs);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.client.ml.job.stats.JobStats;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
|
||||
/**
|
||||
* Contains a {@link List} of the found {@link JobStats} objects and the total count found
|
||||
*/
|
||||
public class GetJobStatsResponse extends AbstractResultResponse<JobStats> {
|
||||
|
||||
public static final ParseField RESULTS_FIELD = new ParseField("jobs");
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public static final ConstructingObjectParser<GetJobStatsResponse, Void> PARSER =
|
||||
new ConstructingObjectParser<>("jobs_stats_response", true,
|
||||
a -> new GetJobStatsResponse((List<JobStats>) a[0], (long) a[1]));
|
||||
|
||||
static {
|
||||
PARSER.declareObjectArray(constructorArg(), JobStats.PARSER, RESULTS_FIELD);
|
||||
PARSER.declareLong(constructorArg(), COUNT);
|
||||
}
|
||||
|
||||
GetJobStatsResponse(List<JobStats> jobStats, long count) {
|
||||
super(RESULTS_FIELD, jobStats, count);
|
||||
}
|
||||
|
||||
/**
|
||||
* The collection of {@link JobStats} objects found in the query
|
||||
*/
|
||||
public List<JobStats> jobStats() {
|
||||
return results;
|
||||
}
|
||||
|
||||
public static GetJobStatsResponse fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(results, count);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
GetJobStatsResponse other = (GetJobStatsResponse) obj;
|
||||
return Objects.equals(results, other.results) && count == other.count;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final String toString() {
|
||||
return Strings.toString(this);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,150 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A Pojo class containing an Elastic Node's attributes
|
||||
*/
|
||||
public class NodeAttributes implements ToXContentObject {
|
||||
|
||||
public static final ParseField ID = new ParseField("id");
|
||||
public static final ParseField NAME = new ParseField("name");
|
||||
public static final ParseField EPHEMERAL_ID = new ParseField("ephemeral_id");
|
||||
public static final ParseField TRANSPORT_ADDRESS = new ParseField("transport_address");
|
||||
public static final ParseField ATTRIBUTES = new ParseField("attributes");
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public static final ConstructingObjectParser<NodeAttributes, Void> PARSER =
|
||||
new ConstructingObjectParser<>("node", true,
|
||||
(a) -> {
|
||||
int i = 0;
|
||||
String id = (String) a[i++];
|
||||
String name = (String) a[i++];
|
||||
String ephemeralId = (String) a[i++];
|
||||
String transportAddress = (String) a[i++];
|
||||
Map<String, String> attributes = (Map<String, String>) a[i];
|
||||
return new NodeAttributes(id, name, ephemeralId, transportAddress, attributes);
|
||||
});
|
||||
|
||||
static {
|
||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), ID);
|
||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME);
|
||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), EPHEMERAL_ID);
|
||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), TRANSPORT_ADDRESS);
|
||||
PARSER.declareField(ConstructingObjectParser.constructorArg(),
|
||||
(p, c) -> p.mapStrings(),
|
||||
ATTRIBUTES,
|
||||
ObjectParser.ValueType.OBJECT);
|
||||
}
|
||||
|
||||
private final String id;
|
||||
private final String name;
|
||||
private final String ephemeralId;
|
||||
private final String transportAddress;
|
||||
private final Map<String, String> attributes;
|
||||
|
||||
public NodeAttributes(String id, String name, String ephemeralId, String transportAddress, Map<String, String> attributes) {
|
||||
this.id = id;
|
||||
this.name = name;
|
||||
this.ephemeralId = ephemeralId;
|
||||
this.transportAddress = transportAddress;
|
||||
this.attributes = Collections.unmodifiableMap(attributes);
|
||||
}
|
||||
|
||||
/**
|
||||
* The unique identifier of the node.
|
||||
*/
|
||||
public String getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
/**
|
||||
* The node name.
|
||||
*/
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* The ephemeral id of the node.
|
||||
*/
|
||||
public String getEphemeralId() {
|
||||
return ephemeralId;
|
||||
}
|
||||
|
||||
/**
|
||||
* The host and port where transport HTTP connections are accepted.
|
||||
*/
|
||||
public String getTransportAddress() {
|
||||
return transportAddress;
|
||||
}
|
||||
|
||||
/**
|
||||
* Additional attributes related to this node e.g., {"ml.max_open_jobs": "10"}.
|
||||
*/
|
||||
public Map<String, String> getAttributes() {
|
||||
return attributes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(ID.getPreferredName(), id);
|
||||
builder.field(NAME.getPreferredName(), name);
|
||||
builder.field(EPHEMERAL_ID.getPreferredName(), ephemeralId);
|
||||
builder.field(TRANSPORT_ADDRESS.getPreferredName(), transportAddress);
|
||||
builder.field(ATTRIBUTES.getPreferredName(), attributes);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(id, name, ephemeralId, transportAddress, attributes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (other == null || getClass() != other.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
NodeAttributes that = (NodeAttributes) other;
|
||||
return Objects.equals(id, that.id) &&
|
||||
Objects.equals(name, that.name) &&
|
||||
Objects.equals(ephemeralId, that.ephemeralId) &&
|
||||
Objects.equals(transportAddress, that.transportAddress) &&
|
||||
Objects.equals(attributes, that.attributes);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml.job.config;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
/**
|
||||
* Jobs whether running or complete are in one of these states.
|
||||
* When a job is created it is initialised in the state closed
|
||||
* i.e. it is not running.
|
||||
*/
|
||||
public enum JobState {
|
||||
|
||||
CLOSING, CLOSED, OPENED, FAILED, OPENING;
|
||||
|
||||
public static JobState fromString(String name) {
|
||||
return valueOf(name.trim().toUpperCase(Locale.ROOT));
|
||||
}
|
||||
|
||||
public String value() {
|
||||
return name().toLowerCase(Locale.ROOT);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,174 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml.job.stats;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A class to hold statistics about forecasts.
|
||||
*/
|
||||
public class ForecastStats implements ToXContentObject {
|
||||
|
||||
public static final ParseField TOTAL = new ParseField("total");
|
||||
public static final ParseField FORECASTED_JOBS = new ParseField("forecasted_jobs");
|
||||
public static final ParseField MEMORY_BYTES = new ParseField("memory_bytes");
|
||||
public static final ParseField PROCESSING_TIME_MS = new ParseField("processing_time_ms");
|
||||
public static final ParseField RECORDS = new ParseField("records");
|
||||
public static final ParseField STATUS = new ParseField("status");
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public static final ConstructingObjectParser<ForecastStats, Void> PARSER =
|
||||
new ConstructingObjectParser<>("forecast_stats",
|
||||
true,
|
||||
(a) -> {
|
||||
int i = 0;
|
||||
long total = (long)a[i++];
|
||||
SimpleStats memoryStats = (SimpleStats)a[i++];
|
||||
SimpleStats recordStats = (SimpleStats)a[i++];
|
||||
SimpleStats runtimeStats = (SimpleStats)a[i++];
|
||||
Map<String, Long> statusCounts = (Map<String, Long>)a[i];
|
||||
return new ForecastStats(total, memoryStats, recordStats, runtimeStats, statusCounts);
|
||||
});
|
||||
|
||||
static {
|
||||
PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL);
|
||||
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), SimpleStats.PARSER, MEMORY_BYTES);
|
||||
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), SimpleStats.PARSER, RECORDS);
|
||||
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), SimpleStats.PARSER, PROCESSING_TIME_MS);
|
||||
PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(),
|
||||
p -> {
|
||||
Map<String, Long> counts = new HashMap<>();
|
||||
p.map().forEach((key, value) -> counts.put(key, ((Number)value).longValue()));
|
||||
return counts;
|
||||
}, STATUS, ObjectParser.ValueType.OBJECT);
|
||||
}
|
||||
|
||||
private final long total;
|
||||
private final long forecastedJobs;
|
||||
private SimpleStats memoryStats;
|
||||
private SimpleStats recordStats;
|
||||
private SimpleStats runtimeStats;
|
||||
private Map<String, Long> statusCounts;
|
||||
|
||||
public ForecastStats(long total,
|
||||
SimpleStats memoryStats,
|
||||
SimpleStats recordStats,
|
||||
SimpleStats runtimeStats,
|
||||
Map<String, Long> statusCounts) {
|
||||
this.total = total;
|
||||
this.forecastedJobs = total > 0 ? 1 : 0;
|
||||
if (total > 0) {
|
||||
this.memoryStats = Objects.requireNonNull(memoryStats);
|
||||
this.recordStats = Objects.requireNonNull(recordStats);
|
||||
this.runtimeStats = Objects.requireNonNull(runtimeStats);
|
||||
this.statusCounts = Collections.unmodifiableMap(statusCounts);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of forecasts currently available for this model.
|
||||
*/
|
||||
public long getTotal() {
|
||||
return total;
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of jobs that have at least one forecast.
|
||||
*/
|
||||
public long getForecastedJobs() {
|
||||
return forecastedJobs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Statistics about the memory usage: minimum, maximum, average and total.
|
||||
*/
|
||||
public SimpleStats getMemoryStats() {
|
||||
return memoryStats;
|
||||
}
|
||||
|
||||
/**
|
||||
* Statistics about the number of forecast records: minimum, maximum, average and total.
|
||||
*/
|
||||
public SimpleStats getRecordStats() {
|
||||
return recordStats;
|
||||
}
|
||||
|
||||
/**
|
||||
* Statistics about the forecast runtime in milliseconds: minimum, maximum, average and total
|
||||
*/
|
||||
public SimpleStats getRuntimeStats() {
|
||||
return runtimeStats;
|
||||
}
|
||||
|
||||
/**
|
||||
* Counts per forecast status, for example: {"finished" : 2}.
|
||||
*/
|
||||
public Map<String, Long> getStatusCounts() {
|
||||
return statusCounts;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(TOTAL.getPreferredName(), total);
|
||||
builder.field(FORECASTED_JOBS.getPreferredName(), forecastedJobs);
|
||||
|
||||
if (total > 0) {
|
||||
builder.field(MEMORY_BYTES.getPreferredName(), memoryStats);
|
||||
builder.field(RECORDS.getPreferredName(), recordStats);
|
||||
builder.field(PROCESSING_TIME_MS.getPreferredName(), runtimeStats);
|
||||
builder.field(STATUS.getPreferredName(), statusCounts);
|
||||
}
|
||||
return builder.endObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(total, forecastedJobs, memoryStats, recordStats, runtimeStats, statusCounts);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ForecastStats other = (ForecastStats) obj;
|
||||
return Objects.equals(total, other.total) &&
|
||||
Objects.equals(forecastedJobs, other.forecastedJobs) &&
|
||||
Objects.equals(memoryStats, other.memoryStats) &&
|
||||
Objects.equals(recordStats, other.recordStats) &&
|
||||
Objects.equals(runtimeStats, other.runtimeStats) &&
|
||||
Objects.equals(statusCounts, other.statusCounts);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,225 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml.job.stats;
|
||||
|
||||
import org.elasticsearch.client.ml.job.config.Job;
|
||||
import org.elasticsearch.client.ml.job.config.JobState;
|
||||
import org.elasticsearch.client.ml.job.process.DataCounts;
|
||||
import org.elasticsearch.client.ml.job.process.ModelSizeStats;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.client.ml.NodeAttributes;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Class containing the statistics for a Machine Learning job.
|
||||
*
|
||||
*/
|
||||
public class JobStats implements ToXContentObject {
|
||||
|
||||
private static final ParseField DATA_COUNTS = new ParseField("data_counts");
|
||||
private static final ParseField MODEL_SIZE_STATS = new ParseField("model_size_stats");
|
||||
private static final ParseField FORECASTS_STATS = new ParseField("forecasts_stats");
|
||||
private static final ParseField STATE = new ParseField("state");
|
||||
private static final ParseField NODE = new ParseField("node");
|
||||
private static final ParseField OPEN_TIME = new ParseField("open_time");
|
||||
private static final ParseField ASSIGNMENT_EXPLANATION = new ParseField("assignment_explanation");
|
||||
|
||||
public static final ConstructingObjectParser<JobStats, Void> PARSER =
|
||||
new ConstructingObjectParser<>("job_stats",
|
||||
true,
|
||||
(a) -> {
|
||||
int i = 0;
|
||||
String jobId = (String) a[i++];
|
||||
DataCounts dataCounts = (DataCounts) a[i++];
|
||||
JobState jobState = (JobState) a[i++];
|
||||
ModelSizeStats.Builder modelSizeStatsBuilder = (ModelSizeStats.Builder) a[i++];
|
||||
ModelSizeStats modelSizeStats = modelSizeStatsBuilder == null ? null : modelSizeStatsBuilder.build();
|
||||
ForecastStats forecastStats = (ForecastStats) a[i++];
|
||||
NodeAttributes node = (NodeAttributes) a[i++];
|
||||
String assignmentExplanation = (String) a[i++];
|
||||
TimeValue openTime = (TimeValue) a[i];
|
||||
return new JobStats(jobId,
|
||||
dataCounts,
|
||||
jobState,
|
||||
modelSizeStats,
|
||||
forecastStats,
|
||||
node,
|
||||
assignmentExplanation,
|
||||
openTime);
|
||||
});
|
||||
|
||||
static {
|
||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID);
|
||||
PARSER.declareObject(ConstructingObjectParser.constructorArg(), DataCounts.PARSER, DATA_COUNTS);
|
||||
PARSER.declareField(ConstructingObjectParser.constructorArg(),
|
||||
(p) -> JobState.fromString(p.text()),
|
||||
STATE,
|
||||
ObjectParser.ValueType.VALUE);
|
||||
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), ModelSizeStats.PARSER, MODEL_SIZE_STATS);
|
||||
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), ForecastStats.PARSER, FORECASTS_STATS);
|
||||
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), NodeAttributes.PARSER, NODE);
|
||||
PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), ASSIGNMENT_EXPLANATION);
|
||||
PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(),
|
||||
(p, c) -> TimeValue.parseTimeValue(p.textOrNull(), OPEN_TIME.getPreferredName()),
|
||||
OPEN_TIME,
|
||||
ObjectParser.ValueType.STRING_OR_NULL);
|
||||
}
|
||||
|
||||
|
||||
private final String jobId;
|
||||
private final DataCounts dataCounts;
|
||||
private final JobState state;
|
||||
private final ModelSizeStats modelSizeStats;
|
||||
private final ForecastStats forecastStats;
|
||||
private final NodeAttributes node;
|
||||
private final String assignmentExplanation;
|
||||
private final TimeValue openTime;
|
||||
|
||||
JobStats(String jobId, DataCounts dataCounts, JobState state, @Nullable ModelSizeStats modelSizeStats,
|
||||
@Nullable ForecastStats forecastStats, @Nullable NodeAttributes node,
|
||||
@Nullable String assignmentExplanation, @Nullable TimeValue opentime) {
|
||||
this.jobId = Objects.requireNonNull(jobId);
|
||||
this.dataCounts = Objects.requireNonNull(dataCounts);
|
||||
this.state = Objects.requireNonNull(state);
|
||||
this.modelSizeStats = modelSizeStats;
|
||||
this.forecastStats = forecastStats;
|
||||
this.node = node;
|
||||
this.assignmentExplanation = assignmentExplanation;
|
||||
this.openTime = opentime;
|
||||
}
|
||||
|
||||
/**
|
||||
* The jobId referencing the job for these statistics
|
||||
*/
|
||||
public String getJobId() {
|
||||
return jobId;
|
||||
}
|
||||
|
||||
/**
|
||||
* An object that describes the number of records processed and any related error counts
|
||||
* See {@link DataCounts}
|
||||
*/
|
||||
public DataCounts getDataCounts() {
|
||||
return dataCounts;
|
||||
}
|
||||
|
||||
/**
|
||||
* An object that provides information about the size and contents of the model.
|
||||
* See {@link ModelSizeStats}
|
||||
*/
|
||||
public ModelSizeStats getModelSizeStats() {
|
||||
return modelSizeStats;
|
||||
}
|
||||
|
||||
/**
|
||||
* An object that provides statistical information about forecasts of this job.
|
||||
* See {@link ForecastStats}
|
||||
*/
|
||||
public ForecastStats getForecastStats() {
|
||||
return forecastStats;
|
||||
}
|
||||
|
||||
/**
|
||||
* The status of the job
|
||||
* See {@link JobState}
|
||||
*/
|
||||
public JobState getState() {
|
||||
return state;
|
||||
}
|
||||
|
||||
/**
|
||||
* For open jobs only, contains information about the node where the job runs
|
||||
* See {@link NodeAttributes}
|
||||
*/
|
||||
public NodeAttributes getNode() {
|
||||
return node;
|
||||
}
|
||||
|
||||
/**
|
||||
* For open jobs only, contains messages relating to the selection of a node to run the job.
|
||||
*/
|
||||
public String getAssignmentExplanation() {
|
||||
return assignmentExplanation;
|
||||
}
|
||||
|
||||
/**
|
||||
* For open jobs only, the elapsed time for which the job has been open
|
||||
*/
|
||||
public TimeValue getOpenTime() {
|
||||
return openTime;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(Job.ID.getPreferredName(), jobId);
|
||||
builder.field(DATA_COUNTS.getPreferredName(), dataCounts);
|
||||
builder.field(STATE.getPreferredName(), state.toString());
|
||||
if (modelSizeStats != null) {
|
||||
builder.field(MODEL_SIZE_STATS.getPreferredName(), modelSizeStats);
|
||||
}
|
||||
if (forecastStats != null) {
|
||||
builder.field(FORECASTS_STATS.getPreferredName(), forecastStats);
|
||||
}
|
||||
if (node != null) {
|
||||
builder.field(NODE.getPreferredName(), node);
|
||||
}
|
||||
if (assignmentExplanation != null) {
|
||||
builder.field(ASSIGNMENT_EXPLANATION.getPreferredName(), assignmentExplanation);
|
||||
}
|
||||
if (openTime != null) {
|
||||
builder.field(OPEN_TIME.getPreferredName(), openTime.getStringRep());
|
||||
}
|
||||
return builder.endObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(jobId, dataCounts, modelSizeStats, forecastStats, state, node, assignmentExplanation, openTime);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
JobStats other = (JobStats) obj;
|
||||
return Objects.equals(jobId, other.jobId) &&
|
||||
Objects.equals(this.dataCounts, other.dataCounts) &&
|
||||
Objects.equals(this.modelSizeStats, other.modelSizeStats) &&
|
||||
Objects.equals(this.forecastStats, other.forecastStats) &&
|
||||
Objects.equals(this.state, other.state) &&
|
||||
Objects.equals(this.node, other.node) &&
|
||||
Objects.equals(this.assignmentExplanation, other.assignmentExplanation) &&
|
||||
Objects.equals(this.openTime, other.openTime);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml.job.stats;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Helper class for min, max, avg and total statistics for a quantity
|
||||
*/
|
||||
public class SimpleStats implements ToXContentObject {
|
||||
|
||||
public static final ParseField MIN = new ParseField("min");
|
||||
public static final ParseField MAX = new ParseField("max");
|
||||
public static final ParseField AVG = new ParseField("avg");
|
||||
public static final ParseField TOTAL = new ParseField("total");
|
||||
|
||||
public static final ConstructingObjectParser<SimpleStats, Void> PARSER = new ConstructingObjectParser<>("simple_stats", true,
|
||||
(a) -> {
|
||||
int i = 0;
|
||||
double total = (double)a[i++];
|
||||
double min = (double)a[i++];
|
||||
double max = (double)a[i++];
|
||||
double avg = (double)a[i++];
|
||||
return new SimpleStats(total, min, max, avg);
|
||||
});
|
||||
|
||||
static {
|
||||
PARSER.declareDouble(ConstructingObjectParser.constructorArg(), TOTAL);
|
||||
PARSER.declareDouble(ConstructingObjectParser.constructorArg(), MIN);
|
||||
PARSER.declareDouble(ConstructingObjectParser.constructorArg(), MAX);
|
||||
PARSER.declareDouble(ConstructingObjectParser.constructorArg(), AVG);
|
||||
}
|
||||
|
||||
private final double total;
|
||||
private final double min;
|
||||
private final double max;
|
||||
private final double avg;
|
||||
|
||||
SimpleStats(double total, double min, double max, double avg) {
|
||||
this.total = total;
|
||||
this.min = min;
|
||||
this.max = max;
|
||||
this.avg = avg;
|
||||
}
|
||||
|
||||
public double getMin() {
|
||||
return min;
|
||||
}
|
||||
|
||||
public double getMax() {
|
||||
return max;
|
||||
}
|
||||
|
||||
public double getAvg() {
|
||||
return avg;
|
||||
}
|
||||
|
||||
public double getTotal() {
|
||||
return total;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(total, min, max, avg);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
SimpleStats other = (SimpleStats) obj;
|
||||
return Objects.equals(total, other.total) &&
|
||||
Objects.equals(min, other.min) &&
|
||||
Objects.equals(avg, other.avg) &&
|
||||
Objects.equals(max, other.max);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(MIN.getPreferredName(), min);
|
||||
builder.field(MAX.getPreferredName(), max);
|
||||
builder.field(AVG.getPreferredName(), avg);
|
||||
builder.field(TOTAL.getPreferredName(), total);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
|
@ -51,6 +51,7 @@ import org.elasticsearch.index.get.GetResult;
|
|||
import org.elasticsearch.index.query.IdsQueryBuilder;
|
||||
import org.elasticsearch.index.reindex.BulkByScrollResponse;
|
||||
import org.elasticsearch.index.reindex.ReindexRequest;
|
||||
import org.elasticsearch.index.reindex.UpdateByQueryRequest;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
|
@ -691,6 +692,72 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testUpdateByQuery() throws IOException {
|
||||
final String sourceIndex = "source1";
|
||||
{
|
||||
// Prepare
|
||||
Settings settings = Settings.builder()
|
||||
.put("number_of_shards", 1)
|
||||
.put("number_of_replicas", 0)
|
||||
.build();
|
||||
createIndex(sourceIndex, settings);
|
||||
assertEquals(
|
||||
RestStatus.OK,
|
||||
highLevelClient().bulk(
|
||||
new BulkRequest()
|
||||
.add(new IndexRequest(sourceIndex, "type", "1")
|
||||
.source(Collections.singletonMap("foo", 1), XContentType.JSON))
|
||||
.add(new IndexRequest(sourceIndex, "type", "2")
|
||||
.source(Collections.singletonMap("foo", 2), XContentType.JSON))
|
||||
.setRefreshPolicy(RefreshPolicy.IMMEDIATE),
|
||||
RequestOptions.DEFAULT
|
||||
).status()
|
||||
);
|
||||
}
|
||||
{
|
||||
// test1: create one doc in dest
|
||||
UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest();
|
||||
updateByQueryRequest.indices(sourceIndex);
|
||||
updateByQueryRequest.setQuery(new IdsQueryBuilder().addIds("1").types("type"));
|
||||
updateByQueryRequest.setRefresh(true);
|
||||
BulkByScrollResponse bulkResponse =
|
||||
execute(updateByQueryRequest, highLevelClient()::updateByQuery, highLevelClient()::updateByQueryAsync);
|
||||
assertEquals(1, bulkResponse.getTotal());
|
||||
assertEquals(1, bulkResponse.getUpdated());
|
||||
assertEquals(0, bulkResponse.getNoops());
|
||||
assertEquals(0, bulkResponse.getVersionConflicts());
|
||||
assertEquals(1, bulkResponse.getBatches());
|
||||
assertTrue(bulkResponse.getTook().getMillis() > 0);
|
||||
assertEquals(1, bulkResponse.getBatches());
|
||||
assertEquals(0, bulkResponse.getBulkFailures().size());
|
||||
assertEquals(0, bulkResponse.getSearchFailures().size());
|
||||
}
|
||||
{
|
||||
// test2: update using script
|
||||
UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest();
|
||||
updateByQueryRequest.indices(sourceIndex);
|
||||
updateByQueryRequest.setScript(new Script("if (ctx._source.foo == 2) ctx._source.foo++;"));
|
||||
updateByQueryRequest.setRefresh(true);
|
||||
BulkByScrollResponse bulkResponse =
|
||||
execute(updateByQueryRequest, highLevelClient()::updateByQuery, highLevelClient()::updateByQueryAsync);
|
||||
assertEquals(2, bulkResponse.getTotal());
|
||||
assertEquals(2, bulkResponse.getUpdated());
|
||||
assertEquals(0, bulkResponse.getDeleted());
|
||||
assertEquals(0, bulkResponse.getNoops());
|
||||
assertEquals(0, bulkResponse.getVersionConflicts());
|
||||
assertEquals(1, bulkResponse.getBatches());
|
||||
assertTrue(bulkResponse.getTook().getMillis() > 0);
|
||||
assertEquals(1, bulkResponse.getBatches());
|
||||
assertEquals(0, bulkResponse.getBulkFailures().size());
|
||||
assertEquals(0, bulkResponse.getSearchFailures().size());
|
||||
assertEquals(
|
||||
3,
|
||||
(int) (highLevelClient().get(new GetRequest(sourceIndex, "type", "2"), RequestOptions.DEFAULT)
|
||||
.getSourceAsMap().get("foo"))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
public void testBulkProcessorIntegration() throws IOException {
|
||||
int nbItems = randomIntBetween(10, 100);
|
||||
boolean[] errors = new boolean[nbItems];
|
||||
|
|
|
@ -36,6 +36,8 @@ import org.elasticsearch.client.ml.job.util.PageParams;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.client.ml.FlushJobRequest;
|
||||
import org.elasticsearch.client.ml.GetJobStatsRequest;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
|
@ -139,6 +141,44 @@ public class MLRequestConvertersTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testFlushJob() throws Exception {
|
||||
String jobId = randomAlphaOfLength(10);
|
||||
FlushJobRequest flushJobRequest = new FlushJobRequest(jobId);
|
||||
|
||||
Request request = MLRequestConverters.flushJob(flushJobRequest);
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_flush", request.getEndpoint());
|
||||
assertEquals("{\"job_id\":\"" + jobId + "\"}", requestEntityToString(request));
|
||||
|
||||
flushJobRequest.setSkipTime("1000");
|
||||
flushJobRequest.setStart("105");
|
||||
flushJobRequest.setEnd("200");
|
||||
flushJobRequest.setAdvanceTime("100");
|
||||
flushJobRequest.setCalcInterim(true);
|
||||
request = MLRequestConverters.flushJob(flushJobRequest);
|
||||
assertEquals(
|
||||
"{\"job_id\":\"" + jobId + "\",\"calc_interim\":true,\"start\":\"105\"," +
|
||||
"\"end\":\"200\",\"advance_time\":\"100\",\"skip_time\":\"1000\"}",
|
||||
requestEntityToString(request));
|
||||
}
|
||||
|
||||
public void testGetJobStats() {
|
||||
GetJobStatsRequest getJobStatsRequestRequest = new GetJobStatsRequest();
|
||||
|
||||
Request request = MLRequestConverters.getJobStats(getJobStatsRequestRequest);
|
||||
|
||||
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/_xpack/ml/anomaly_detectors/_stats", request.getEndpoint());
|
||||
assertFalse(request.getParameters().containsKey("allow_no_jobs"));
|
||||
|
||||
getJobStatsRequestRequest = new GetJobStatsRequest("job1", "jobs*");
|
||||
getJobStatsRequestRequest.setAllowNoJobs(true);
|
||||
request = MLRequestConverters.getJobStats(getJobStatsRequestRequest);
|
||||
|
||||
assertEquals("/_xpack/ml/anomaly_detectors/job1,jobs*/_stats", request.getEndpoint());
|
||||
assertEquals(Boolean.toString(true), request.getParameters().get("allow_no_jobs"));
|
||||
}
|
||||
|
||||
private static Job createValidJob(String jobId) {
|
||||
AnalysisConfig.Builder analysisConfig = AnalysisConfig.builder(Collections.singletonList(
|
||||
Detector.builder().setFunction("count").build()));
|
||||
|
|
|
@ -19,6 +19,12 @@
|
|||
package org.elasticsearch.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator;
|
||||
import org.elasticsearch.ElasticsearchStatusException;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.client.ml.GetJobStatsRequest;
|
||||
import org.elasticsearch.client.ml.GetJobStatsResponse;
|
||||
import org.elasticsearch.client.ml.job.config.JobState;
|
||||
import org.elasticsearch.client.ml.job.stats.JobStats;
|
||||
import org.elasticsearch.client.ml.CloseJobRequest;
|
||||
import org.elasticsearch.client.ml.CloseJobResponse;
|
||||
import org.elasticsearch.client.ml.DeleteJobRequest;
|
||||
|
@ -34,6 +40,8 @@ import org.elasticsearch.client.ml.job.config.DataDescription;
|
|||
import org.elasticsearch.client.ml.job.config.Detector;
|
||||
import org.elasticsearch.client.ml.job.config.Job;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.client.ml.FlushJobRequest;
|
||||
import org.elasticsearch.client.ml.FlushJobResponse;
|
||||
import org.junit.After;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -41,6 +49,7 @@ import java.util.Arrays;
|
|||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.CoreMatchers.hasItems;
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
|
@ -138,6 +147,77 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
|
|||
assertTrue(response.isClosed());
|
||||
}
|
||||
|
||||
public void testFlushJob() throws Exception {
|
||||
String jobId = randomValidJobId();
|
||||
Job job = buildJob(jobId);
|
||||
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
|
||||
machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
|
||||
machineLearningClient.openJob(new OpenJobRequest(jobId), RequestOptions.DEFAULT);
|
||||
|
||||
FlushJobResponse response = execute(new FlushJobRequest(jobId),
|
||||
machineLearningClient::flushJob,
|
||||
machineLearningClient::flushJobAsync);
|
||||
assertTrue(response.isFlushed());
|
||||
}
|
||||
|
||||
public void testGetJobStats() throws Exception {
|
||||
String jobId1 = "ml-get-job-stats-test-id-1";
|
||||
String jobId2 = "ml-get-job-stats-test-id-2";
|
||||
|
||||
Job job1 = buildJob(jobId1);
|
||||
Job job2 = buildJob(jobId2);
|
||||
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
|
||||
machineLearningClient.putJob(new PutJobRequest(job1), RequestOptions.DEFAULT);
|
||||
machineLearningClient.putJob(new PutJobRequest(job2), RequestOptions.DEFAULT);
|
||||
|
||||
machineLearningClient.openJob(new OpenJobRequest(jobId1), RequestOptions.DEFAULT);
|
||||
|
||||
GetJobStatsRequest request = new GetJobStatsRequest(jobId1, jobId2);
|
||||
|
||||
// Test getting specific
|
||||
GetJobStatsResponse response = execute(request, machineLearningClient::getJobStats, machineLearningClient::getJobStatsAsync);
|
||||
|
||||
assertEquals(2, response.count());
|
||||
assertThat(response.jobStats(), hasSize(2));
|
||||
assertThat(response.jobStats().stream().map(JobStats::getJobId).collect(Collectors.toList()), containsInAnyOrder(jobId1, jobId2));
|
||||
for (JobStats stats : response.jobStats()) {
|
||||
if (stats.getJobId().equals(jobId1)) {
|
||||
assertEquals(JobState.OPENED, stats.getState());
|
||||
} else {
|
||||
assertEquals(JobState.CLOSED, stats.getState());
|
||||
}
|
||||
}
|
||||
|
||||
// Test getting all explicitly
|
||||
request = GetJobStatsRequest.getAllJobStatsRequest();
|
||||
response = execute(request, machineLearningClient::getJobStats, machineLearningClient::getJobStatsAsync);
|
||||
|
||||
assertTrue(response.count() >= 2L);
|
||||
assertTrue(response.jobStats().size() >= 2L);
|
||||
assertThat(response.jobStats().stream().map(JobStats::getJobId).collect(Collectors.toList()), hasItems(jobId1, jobId2));
|
||||
|
||||
// Test getting all implicitly
|
||||
response = execute(new GetJobStatsRequest(), machineLearningClient::getJobStats, machineLearningClient::getJobStatsAsync);
|
||||
|
||||
assertTrue(response.count() >= 2L);
|
||||
assertTrue(response.jobStats().size() >= 2L);
|
||||
assertThat(response.jobStats().stream().map(JobStats::getJobId).collect(Collectors.toList()), hasItems(jobId1, jobId2));
|
||||
|
||||
// Test getting all with wildcard
|
||||
request = new GetJobStatsRequest("ml-get-job-stats-test-id-*");
|
||||
response = execute(request, machineLearningClient::getJobStats, machineLearningClient::getJobStatsAsync);
|
||||
assertTrue(response.count() >= 2L);
|
||||
assertTrue(response.jobStats().size() >= 2L);
|
||||
assertThat(response.jobStats().stream().map(JobStats::getJobId).collect(Collectors.toList()), hasItems(jobId1, jobId2));
|
||||
|
||||
// Test when allow_no_jobs is false
|
||||
final GetJobStatsRequest erroredRequest = new GetJobStatsRequest("jobs-that-do-not-exist*");
|
||||
erroredRequest.setAllowNoJobs(false);
|
||||
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class,
|
||||
() -> execute(erroredRequest, machineLearningClient::getJobStats, machineLearningClient::getJobStatsAsync));
|
||||
assertThat(exception.status().getStatus(), equalTo(404));
|
||||
}
|
||||
|
||||
public static String randomValidJobId() {
|
||||
CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz0123456789".toCharArray());
|
||||
return generator.ofCodePointsLength(random(), 10, 10);
|
||||
|
|
|
@ -132,6 +132,7 @@ import org.elasticsearch.index.rankeval.RatedRequest;
|
|||
import org.elasticsearch.index.rankeval.RestRankEvalAction;
|
||||
import org.elasticsearch.index.reindex.ReindexRequest;
|
||||
import org.elasticsearch.index.reindex.RemoteInfo;
|
||||
import org.elasticsearch.index.reindex.UpdateByQueryRequest;
|
||||
import org.elasticsearch.protocol.xpack.XPackInfoRequest;
|
||||
import org.elasticsearch.protocol.xpack.indexlifecycle.ExplainLifecycleRequest;
|
||||
import org.elasticsearch.protocol.xpack.indexlifecycle.SetIndexLifecyclePolicyRequest;
|
||||
|
@ -144,6 +145,7 @@ import org.elasticsearch.protocol.xpack.graph.Hop;
|
|||
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
|
||||
import org.elasticsearch.repositories.fs.FsRepository;
|
||||
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
import org.elasticsearch.script.mustache.MultiSearchTemplateRequest;
|
||||
import org.elasticsearch.script.mustache.SearchTemplateRequest;
|
||||
|
@ -478,6 +480,60 @@ public class RequestConvertersTests extends ESTestCase {
|
|||
assertToXContentBody(reindexRequest, request.getEntity());
|
||||
}
|
||||
|
||||
public void testUpdateByQuery() throws IOException {
|
||||
UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest();
|
||||
updateByQueryRequest.indices(randomIndicesNames(1, 5));
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
if (randomBoolean()) {
|
||||
updateByQueryRequest.setDocTypes(generateRandomStringArray(5, 5, false, false));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
int batchSize = randomInt(100);
|
||||
updateByQueryRequest.setBatchSize(batchSize);
|
||||
expectedParams.put("scroll_size", Integer.toString(batchSize));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
updateByQueryRequest.setPipeline("my_pipeline");
|
||||
expectedParams.put("pipeline", "my_pipeline");
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
updateByQueryRequest.setRouting("=cat");
|
||||
expectedParams.put("routing", "=cat");
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
int size = randomIntBetween(100, 1000);
|
||||
updateByQueryRequest.setSize(size);
|
||||
expectedParams.put("size", Integer.toString(size));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
updateByQueryRequest.setAbortOnVersionConflict(false);
|
||||
expectedParams.put("conflicts", "proceed");
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
String ts = randomTimeValue();
|
||||
updateByQueryRequest.setScroll(TimeValue.parseTimeValue(ts, "scroll"));
|
||||
expectedParams.put("scroll", ts);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
updateByQueryRequest.setQuery(new TermQueryBuilder("foo", "fooval"));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
updateByQueryRequest.setScript(new Script("ctx._source.last = \"lastname\""));
|
||||
}
|
||||
setRandomIndicesOptions(updateByQueryRequest::setIndicesOptions, updateByQueryRequest::indicesOptions, expectedParams);
|
||||
setRandomTimeout(updateByQueryRequest::setTimeout, ReplicationRequest.DEFAULT_TIMEOUT, expectedParams);
|
||||
Request request = RequestConverters.updateByQuery(updateByQueryRequest);
|
||||
StringJoiner joiner = new StringJoiner("/", "/", "");
|
||||
joiner.add(String.join(",", updateByQueryRequest.indices()));
|
||||
if (updateByQueryRequest.getDocTypes().length > 0)
|
||||
joiner.add(String.join(",", updateByQueryRequest.getDocTypes()));
|
||||
joiner.add("_update_by_query");
|
||||
assertEquals(joiner.toString(), request.getEndpoint());
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertToXContentBody(updateByQueryRequest, request.getEntity());
|
||||
}
|
||||
|
||||
public void testPutMapping() throws IOException {
|
||||
PutMappingRequest putMappingRequest = new PutMappingRequest();
|
||||
|
||||
|
|
|
@ -664,8 +664,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
"render_search_template",
|
||||
"scripts_painless_execute",
|
||||
"tasks.get",
|
||||
"termvectors",
|
||||
"update_by_query"
|
||||
"termvectors"
|
||||
};
|
||||
//These API are not required for high-level client feature completeness
|
||||
String[] notRequiredApi = new String[] {
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.elasticsearch.action.get.MultiGetResponse;
|
|||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
|
||||
import org.elasticsearch.action.support.replication.ReplicationResponse;
|
||||
|
@ -67,6 +68,7 @@ import org.elasticsearch.index.reindex.BulkByScrollResponse;
|
|||
import org.elasticsearch.index.reindex.ReindexRequest;
|
||||
import org.elasticsearch.index.reindex.RemoteInfo;
|
||||
import org.elasticsearch.index.reindex.ScrollableHitSource;
|
||||
import org.elasticsearch.index.reindex.UpdateByQueryRequest;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
|
@ -899,6 +901,125 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testUpdateByQuery() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
String mapping =
|
||||
"\"doc\": {\n" +
|
||||
" \"properties\": {\n" +
|
||||
" \"user\": {\n" +
|
||||
" \"type\": \"text\"\n" +
|
||||
" },\n" +
|
||||
" \"field1\": {\n" +
|
||||
" \"type\": \"integer\"\n" +
|
||||
" },\n" +
|
||||
" \"field2\": {\n" +
|
||||
" \"type\": \"integer\"\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }";
|
||||
createIndex("source1", Settings.EMPTY, mapping);
|
||||
createIndex("source2", Settings.EMPTY, mapping);
|
||||
createPipeline("my_pipeline");
|
||||
}
|
||||
{
|
||||
// tag::update-by-query-request
|
||||
UpdateByQueryRequest request = new UpdateByQueryRequest("source1", "source2"); // <1>
|
||||
// end::update-by-query-request
|
||||
// tag::update-by-query-request-conflicts
|
||||
request.setConflicts("proceed"); // <1>
|
||||
// end::update-by-query-request-conflicts
|
||||
// tag::update-by-query-request-typeOrQuery
|
||||
request.setDocTypes("doc"); // <1>
|
||||
request.setQuery(new TermQueryBuilder("user", "kimchy")); // <2>
|
||||
// end::update-by-query-request-typeOrQuery
|
||||
// tag::update-by-query-request-size
|
||||
request.setSize(10); // <1>
|
||||
// end::update-by-query-request-size
|
||||
// tag::update-by-query-request-scrollSize
|
||||
request.setBatchSize(100); // <1>
|
||||
// end::update-by-query-request-scrollSize
|
||||
// tag::update-by-query-request-pipeline
|
||||
request.setPipeline("my_pipeline"); // <1>
|
||||
// end::update-by-query-request-pipeline
|
||||
// tag::update-by-query-request-script
|
||||
request.setScript(
|
||||
new Script(
|
||||
ScriptType.INLINE, "painless",
|
||||
"if (ctx._source.user == 'kimchy') {ctx._source.likes++;}",
|
||||
Collections.emptyMap())); // <1>
|
||||
// end::update-by-query-request-script
|
||||
// tag::update-by-query-request-timeout
|
||||
request.setTimeout(TimeValue.timeValueMinutes(2)); // <1>
|
||||
// end::update-by-query-request-timeout
|
||||
// tag::update-by-query-request-refresh
|
||||
request.setRefresh(true); // <1>
|
||||
// end::update-by-query-request-refresh
|
||||
// tag::update-by-query-request-slices
|
||||
request.setSlices(2); // <1>
|
||||
// end::update-by-query-request-slices
|
||||
// tag::update-by-query-request-scroll
|
||||
request.setScroll(TimeValue.timeValueMinutes(10)); // <1>
|
||||
// end::update-by-query-request-scroll
|
||||
// tag::update-by-query-request-routing
|
||||
request.setRouting("=cat"); // <1>
|
||||
// end::update-by-query-request-routing
|
||||
// tag::update-by-query-request-indicesOptions
|
||||
request.setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); // <1>
|
||||
// end::update-by-query-request-indicesOptions
|
||||
|
||||
// tag::update-by-query-execute
|
||||
BulkByScrollResponse bulkResponse = client.updateByQuery(request, RequestOptions.DEFAULT);
|
||||
// end::update-by-query-execute
|
||||
assertSame(0, bulkResponse.getSearchFailures().size());
|
||||
assertSame(0, bulkResponse.getBulkFailures().size());
|
||||
// tag::update-by-query-response
|
||||
TimeValue timeTaken = bulkResponse.getTook(); // <1>
|
||||
boolean timedOut = bulkResponse.isTimedOut(); // <2>
|
||||
long totalDocs = bulkResponse.getTotal(); // <3>
|
||||
long updatedDocs = bulkResponse.getUpdated(); // <4>
|
||||
long deletedDocs = bulkResponse.getDeleted(); // <5>
|
||||
long batches = bulkResponse.getBatches(); // <6>
|
||||
long noops = bulkResponse.getNoops(); // <7>
|
||||
long versionConflicts = bulkResponse.getVersionConflicts(); // <8>
|
||||
long bulkRetries = bulkResponse.getBulkRetries(); // <9>
|
||||
long searchRetries = bulkResponse.getSearchRetries(); // <10>
|
||||
TimeValue throttledMillis = bulkResponse.getStatus().getThrottled(); // <11>
|
||||
TimeValue throttledUntilMillis = bulkResponse.getStatus().getThrottledUntil(); // <12>
|
||||
List<ScrollableHitSource.SearchFailure> searchFailures = bulkResponse.getSearchFailures(); // <13>
|
||||
List<BulkItemResponse.Failure> bulkFailures = bulkResponse.getBulkFailures(); // <14>
|
||||
// end::update-by-query-response
|
||||
}
|
||||
{
|
||||
UpdateByQueryRequest request = new UpdateByQueryRequest();
|
||||
request.indices("source1");
|
||||
|
||||
// tag::update-by-query-execute-listener
|
||||
ActionListener<BulkByScrollResponse> listener = new ActionListener<BulkByScrollResponse>() {
|
||||
@Override
|
||||
public void onResponse(BulkByScrollResponse bulkResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::update-by-query-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::update-by-query-execute-async
|
||||
client.updateByQueryAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::update-by-query-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGet() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
|
|
|
@ -35,6 +35,8 @@ import org.elasticsearch.client.ml.GetBucketsRequest;
|
|||
import org.elasticsearch.client.ml.GetBucketsResponse;
|
||||
import org.elasticsearch.client.ml.GetJobRequest;
|
||||
import org.elasticsearch.client.ml.GetJobResponse;
|
||||
import org.elasticsearch.client.ml.GetJobStatsRequest;
|
||||
import org.elasticsearch.client.ml.GetJobStatsResponse;
|
||||
import org.elasticsearch.client.ml.GetRecordsRequest;
|
||||
import org.elasticsearch.client.ml.GetRecordsResponse;
|
||||
import org.elasticsearch.client.ml.OpenJobRequest;
|
||||
|
@ -50,6 +52,9 @@ import org.elasticsearch.client.ml.job.results.Bucket;
|
|||
import org.elasticsearch.client.ml.job.util.PageParams;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.client.ml.FlushJobRequest;
|
||||
import org.elasticsearch.client.ml.FlushJobResponse;
|
||||
import org.elasticsearch.client.ml.job.stats.JobStats;
|
||||
import org.junit.After;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -458,6 +463,127 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testFlushJob() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
Job job = MachineLearningIT.buildJob("flushing-my-first-machine-learning-job");
|
||||
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
|
||||
client.machineLearning().openJob(new OpenJobRequest(job.getId()), RequestOptions.DEFAULT);
|
||||
|
||||
Job secondJob = MachineLearningIT.buildJob("flushing-my-second-machine-learning-job");
|
||||
client.machineLearning().putJob(new PutJobRequest(secondJob), RequestOptions.DEFAULT);
|
||||
client.machineLearning().openJob(new OpenJobRequest(secondJob.getId()), RequestOptions.DEFAULT);
|
||||
|
||||
{
|
||||
//tag::x-pack-ml-flush-job-request
|
||||
FlushJobRequest flushJobRequest = new FlushJobRequest("flushing-my-first-machine-learning-job"); //<1>
|
||||
//end::x-pack-ml-flush-job-request
|
||||
|
||||
//tag::x-pack-ml-flush-job-request-options
|
||||
flushJobRequest.setCalcInterim(true); //<1>
|
||||
flushJobRequest.setAdvanceTime("2018-08-31T16:35:07+00:00"); //<2>
|
||||
flushJobRequest.setStart("2018-08-31T16:35:17+00:00"); //<3>
|
||||
flushJobRequest.setEnd("2018-08-31T16:35:27+00:00"); //<4>
|
||||
flushJobRequest.setSkipTime("2018-08-31T16:35:00+00:00"); //<5>
|
||||
//end::x-pack-ml-flush-job-request-options
|
||||
|
||||
//tag::x-pack-ml-flush-job-execute
|
||||
FlushJobResponse flushJobResponse = client.machineLearning().flushJob(flushJobRequest, RequestOptions.DEFAULT);
|
||||
//end::x-pack-ml-flush-job-execute
|
||||
|
||||
//tag::x-pack-ml-flush-job-response
|
||||
boolean isFlushed = flushJobResponse.isFlushed(); //<1>
|
||||
Date lastFinalizedBucketEnd = flushJobResponse.getLastFinalizedBucketEnd(); //<2>
|
||||
//end::x-pack-ml-flush-job-response
|
||||
|
||||
}
|
||||
{
|
||||
//tag::x-pack-ml-flush-job-listener
|
||||
ActionListener<FlushJobResponse> listener = new ActionListener<FlushJobResponse>() {
|
||||
@Override
|
||||
public void onResponse(FlushJobResponse FlushJobResponse) {
|
||||
//<1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
//end::x-pack-ml-flush-job-listener
|
||||
FlushJobRequest flushJobRequest = new FlushJobRequest("flushing-my-second-machine-learning-job");
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-ml-flush-job-execute-async
|
||||
client.machineLearning().flushJobAsync(flushJobRequest, RequestOptions.DEFAULT, listener); //<1>
|
||||
// end::x-pack-ml-flush-job-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testGetJobStats() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
Job job = MachineLearningIT.buildJob("get-machine-learning-job-stats1");
|
||||
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
|
||||
|
||||
Job secondJob = MachineLearningIT.buildJob("get-machine-learning-job-stats2");
|
||||
client.machineLearning().putJob(new PutJobRequest(secondJob), RequestOptions.DEFAULT);
|
||||
|
||||
{
|
||||
//tag::x-pack-ml-get-job-stats-request
|
||||
GetJobStatsRequest request = new GetJobStatsRequest("get-machine-learning-job-stats1", "get-machine-learning-job-*"); //<1>
|
||||
request.setAllowNoJobs(true); //<2>
|
||||
//end::x-pack-ml-get-job-stats-request
|
||||
|
||||
//tag::x-pack-ml-get-job-stats-execute
|
||||
GetJobStatsResponse response = client.machineLearning().getJobStats(request, RequestOptions.DEFAULT);
|
||||
//end::x-pack-ml-get-job-stats-execute
|
||||
|
||||
//tag::x-pack-ml-get-job-stats-response
|
||||
long numberOfJobStats = response.count(); //<1>
|
||||
List<JobStats> jobStats = response.jobStats(); //<2>
|
||||
//end::x-pack-ml-get-job-stats-response
|
||||
|
||||
assertEquals(2, response.count());
|
||||
assertThat(response.jobStats(), hasSize(2));
|
||||
assertThat(response.jobStats().stream().map(JobStats::getJobId).collect(Collectors.toList()),
|
||||
containsInAnyOrder(job.getId(), secondJob.getId()));
|
||||
}
|
||||
{
|
||||
GetJobStatsRequest request = new GetJobStatsRequest("get-machine-learning-job-stats1", "get-machine-learning-job-*");
|
||||
|
||||
// tag::x-pack-ml-get-job-stats-listener
|
||||
ActionListener<GetJobStatsResponse> listener = new ActionListener<GetJobStatsResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetJobStatsResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::x-pack-ml-get-job-stats-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-ml-get-job-stats-execute-async
|
||||
client.machineLearning().getJobStatsAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::x-pack-ml-get-job-stats-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetRecords() throws IOException, InterruptedException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class FlushJobRequestTests extends AbstractXContentTestCase<FlushJobRequest> {
|
||||
|
||||
@Override
|
||||
protected FlushJobRequest createTestInstance() {
|
||||
FlushJobRequest request = new FlushJobRequest(randomAlphaOfLengthBetween(1, 20));
|
||||
|
||||
if (randomBoolean()) {
|
||||
request.setCalcInterim(randomBoolean());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.setAdvanceTime(String.valueOf(randomLong()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.setStart(String.valueOf(randomLong()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.setEnd(String.valueOf(randomLong()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.setSkipTime(String.valueOf(randomLong()));
|
||||
}
|
||||
return request;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected FlushJobRequest doParseInstance(XContentParser parser) throws IOException {
|
||||
return FlushJobRequest.PARSER.apply(parser, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Date;
|
||||
|
||||
public class FlushJobResponseTests extends AbstractXContentTestCase<FlushJobResponse> {
|
||||
|
||||
@Override
|
||||
protected FlushJobResponse createTestInstance() {
|
||||
return new FlushJobResponse(randomBoolean(),
|
||||
randomBoolean() ? null : new Date(randomNonNegativeLong()));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected FlushJobResponse doParseInstance(XContentParser parser) throws IOException {
|
||||
return FlushJobResponse.PARSER.apply(parser, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return true;
|
||||
}
|
||||
}
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.test.AbstractXContentTestCase;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
public class GetJobResponseTests extends AbstractXContentTestCase<GetJobResponse> {
|
||||
|
||||
|
@ -46,8 +47,13 @@ public class GetJobResponseTests extends AbstractXContentTestCase<GetJobResponse
|
|||
return GetJobResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Predicate<String> getRandomFieldsExcludeFilter() {
|
||||
return field -> !field.isEmpty();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public class GetJobStatsRequestTests extends AbstractXContentTestCase<GetJobStatsRequest> {
|
||||
|
||||
public void testAllJobsRequest() {
|
||||
GetJobStatsRequest request = GetJobStatsRequest.getAllJobStatsRequest();
|
||||
|
||||
assertEquals(request.getJobIds().size(), 1);
|
||||
assertEquals(request.getJobIds().get(0), "_all");
|
||||
}
|
||||
|
||||
public void testNewWithJobId() {
|
||||
Exception exception = expectThrows(NullPointerException.class, () -> new GetJobStatsRequest("job", null));
|
||||
assertEquals(exception.getMessage(), "jobIds must not contain null values");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected GetJobStatsRequest createTestInstance() {
|
||||
int jobCount = randomIntBetween(0, 10);
|
||||
List<String> jobIds = new ArrayList<>(jobCount);
|
||||
|
||||
for (int i = 0; i < jobCount; i++) {
|
||||
jobIds.add(randomAlphaOfLength(10));
|
||||
}
|
||||
|
||||
GetJobStatsRequest request = new GetJobStatsRequest(jobIds);
|
||||
|
||||
if (randomBoolean()) {
|
||||
request.setAllowNoJobs(randomBoolean());
|
||||
}
|
||||
|
||||
return request;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected GetJobStatsRequest doParseInstance(XContentParser parser) throws IOException {
|
||||
return GetJobStatsRequest.PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.client.ml.job.stats.JobStats;
|
||||
import org.elasticsearch.client.ml.job.stats.JobStatsTests;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public class GetJobStatsResponseTests extends AbstractXContentTestCase<GetJobStatsResponse> {
|
||||
|
||||
@Override
|
||||
protected GetJobStatsResponse createTestInstance() {
|
||||
|
||||
int count = randomIntBetween(1, 5);
|
||||
List<JobStats> results = new ArrayList<>(count);
|
||||
for(int i = 0; i < count; i++) {
|
||||
results.add(JobStatsTests.createRandomInstance());
|
||||
}
|
||||
|
||||
return new GetJobStatsResponse(results, count);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected GetJobStatsResponse doParseInstance(XContentParser parser) throws IOException {
|
||||
return GetJobStatsResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
public class NodeAttributesTests extends AbstractXContentTestCase<NodeAttributes> {
|
||||
|
||||
public static NodeAttributes createRandom() {
|
||||
int numberOfAttributes = randomIntBetween(1, 10);
|
||||
Map<String, String> attributes = new HashMap<>(numberOfAttributes);
|
||||
for(int i = 0; i < numberOfAttributes; i++) {
|
||||
String val = randomAlphaOfLength(10);
|
||||
attributes.put("key-"+i, val);
|
||||
}
|
||||
return new NodeAttributes(randomAlphaOfLength(10),
|
||||
randomAlphaOfLength(10),
|
||||
randomAlphaOfLength(10),
|
||||
randomAlphaOfLength(10),
|
||||
attributes);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodeAttributes createTestInstance() {
|
||||
return createRandom();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodeAttributes doParseInstance(XContentParser parser) throws IOException {
|
||||
return NodeAttributes.PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Predicate<String> getRandomFieldsExcludeFilter() {
|
||||
return field -> !field.isEmpty();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return true;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml.job.stats;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
public class ForecastStatsTests extends AbstractXContentTestCase<ForecastStats> {
|
||||
|
||||
@Override
|
||||
public ForecastStats createTestInstance() {
|
||||
if (randomBoolean()) {
|
||||
return createRandom(1, 22);
|
||||
}
|
||||
return new ForecastStats(0, null,null,null,null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ForecastStats doParseInstance(XContentParser parser) throws IOException {
|
||||
return ForecastStats.PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Predicate<String> getRandomFieldsExcludeFilter() {
|
||||
return field -> !field.isEmpty();
|
||||
}
|
||||
|
||||
public static ForecastStats createRandom(long minTotal, long maxTotal) {
|
||||
return new ForecastStats(
|
||||
randomLongBetween(minTotal, maxTotal),
|
||||
SimpleStatsTests.createRandom(),
|
||||
SimpleStatsTests.createRandom(),
|
||||
SimpleStatsTests.createRandom(),
|
||||
createCountStats());
|
||||
}
|
||||
|
||||
private static Map<String, Long> createCountStats() {
|
||||
Map<String, Long> countStats = new HashMap<>();
|
||||
for (int i = 0; i < randomInt(10); ++i) {
|
||||
countStats.put(randomAlphaOfLengthBetween(1, 20), randomLongBetween(1L, 100L));
|
||||
}
|
||||
return countStats;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml.job.stats;
|
||||
|
||||
import org.elasticsearch.client.ml.NodeAttributes;
|
||||
import org.elasticsearch.client.ml.NodeAttributesTests;
|
||||
import org.elasticsearch.client.ml.job.process.DataCounts;
|
||||
import org.elasticsearch.client.ml.job.process.DataCountsTests;
|
||||
import org.elasticsearch.client.ml.job.process.ModelSizeStats;
|
||||
import org.elasticsearch.client.ml.job.process.ModelSizeStatsTests;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.client.ml.job.config.JobState;
|
||||
import org.elasticsearch.client.ml.job.config.JobTests;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
|
||||
public class JobStatsTests extends AbstractXContentTestCase<JobStats> {
|
||||
|
||||
public static JobStats createRandomInstance() {
|
||||
String jobId = JobTests.randomValidJobId();
|
||||
JobState state = randomFrom(JobState.CLOSING, JobState.CLOSED, JobState.OPENED, JobState.FAILED, JobState.OPENING);
|
||||
DataCounts dataCounts = DataCountsTests.createTestInstance(jobId);
|
||||
|
||||
ModelSizeStats modelSizeStats = randomBoolean() ? ModelSizeStatsTests.createRandomized() : null;
|
||||
ForecastStats forecastStats = randomBoolean() ? ForecastStatsTests.createRandom(1, 22) : null;
|
||||
NodeAttributes nodeAttributes = randomBoolean() ? NodeAttributesTests.createRandom() : null;
|
||||
String assigmentExplanation = randomBoolean() ? randomAlphaOfLength(10) : null;
|
||||
TimeValue openTime = randomBoolean() ? TimeValue.timeValueMillis(randomIntBetween(1, 10000)) : null;
|
||||
|
||||
return new JobStats(jobId, dataCounts, state, modelSizeStats, forecastStats, nodeAttributes, assigmentExplanation, openTime);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected JobStats createTestInstance() {
|
||||
return createRandomInstance();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected JobStats doParseInstance(XContentParser parser) throws IOException {
|
||||
return JobStats.PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Predicate<String> getRandomFieldsExcludeFilter() {
|
||||
return field -> !field.isEmpty();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return true;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml.job.stats;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
||||
public class SimpleStatsTests extends AbstractXContentTestCase<SimpleStats> {
|
||||
|
||||
@Override
|
||||
protected SimpleStats createTestInstance() {
|
||||
return createRandom();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SimpleStats doParseInstance(XContentParser parser) throws IOException {
|
||||
return SimpleStats.PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public static SimpleStats createRandom() {
|
||||
return new SimpleStats(randomDouble(), randomDouble(), randomDouble(), randomDouble());
|
||||
}
|
||||
}
|
|
@ -85,7 +85,7 @@ import static java.util.Collections.singletonList;
|
|||
* The hosts that are part of the cluster need to be provided at creation time, but can also be replaced later
|
||||
* by calling {@link #setNodes(Collection)}.
|
||||
* <p>
|
||||
* The method {@link #performRequest(String, String, Map, HttpEntity, Header...)} allows to send a request to the cluster. When
|
||||
* The method {@link #performRequest(Request)} allows to send a request to the cluster. When
|
||||
* sending a request, a host gets selected out of the provided ones in a round-robin fashion. Failing hosts are marked dead and
|
||||
* retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously
|
||||
* failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead nodes that
|
||||
|
@ -145,17 +145,6 @@ public class RestClient implements Closeable {
|
|||
return new RestClientBuilder(hostsToNodes(hosts));
|
||||
}
|
||||
|
||||
/**
|
||||
* Replaces the hosts with which the client communicates.
|
||||
*
|
||||
* @deprecated prefer {@link #setNodes(Collection)} because it allows you
|
||||
* to set metadata for use with {@link NodeSelector}s
|
||||
*/
|
||||
@Deprecated
|
||||
public void setHosts(HttpHost... hosts) {
|
||||
setNodes(hostsToNodes(hosts));
|
||||
}
|
||||
|
||||
/**
|
||||
* Replaces the nodes with which the client communicates.
|
||||
*/
|
||||
|
@ -251,234 +240,6 @@ public class RestClient implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response
|
||||
* to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without parameters
|
||||
* and request body.
|
||||
*
|
||||
* @param method the http method
|
||||
* @param endpoint the path of the request (without host and port)
|
||||
* @param headers the optional request headers
|
||||
* @return the response returned by Elasticsearch
|
||||
* @throws IOException in case of a problem or the connection was aborted
|
||||
* @throws ClientProtocolException in case of an http protocol error
|
||||
* @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
|
||||
* @deprecated prefer {@link #performRequest(Request)}
|
||||
*/
|
||||
@Deprecated
|
||||
public Response performRequest(String method, String endpoint, Header... headers) throws IOException {
|
||||
Request request = new Request(method, endpoint);
|
||||
addHeaders(request, headers);
|
||||
return performRequest(request);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response
|
||||
* to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without request body.
|
||||
*
|
||||
* @param method the http method
|
||||
* @param endpoint the path of the request (without host and port)
|
||||
* @param params the query_string parameters
|
||||
* @param headers the optional request headers
|
||||
* @return the response returned by Elasticsearch
|
||||
* @throws IOException in case of a problem or the connection was aborted
|
||||
* @throws ClientProtocolException in case of an http protocol error
|
||||
* @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
|
||||
* @deprecated prefer {@link #performRequest(Request)}
|
||||
*/
|
||||
@Deprecated
|
||||
public Response performRequest(String method, String endpoint, Map<String, String> params, Header... headers) throws IOException {
|
||||
Request request = new Request(method, endpoint);
|
||||
addParameters(request, params);
|
||||
addHeaders(request, headers);
|
||||
return performRequest(request);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response
|
||||
* to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, HttpAsyncResponseConsumerFactory, Header...)}
|
||||
* which doesn't require specifying an {@link HttpAsyncResponseConsumerFactory} instance,
|
||||
* {@link HttpAsyncResponseConsumerFactory} will be used to create the needed instances of {@link HttpAsyncResponseConsumer}.
|
||||
*
|
||||
* @param method the http method
|
||||
* @param endpoint the path of the request (without host and port)
|
||||
* @param params the query_string parameters
|
||||
* @param entity the body of the request, null if not applicable
|
||||
* @param headers the optional request headers
|
||||
* @return the response returned by Elasticsearch
|
||||
* @throws IOException in case of a problem or the connection was aborted
|
||||
* @throws ClientProtocolException in case of an http protocol error
|
||||
* @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
|
||||
* @deprecated prefer {@link #performRequest(Request)}
|
||||
*/
|
||||
@Deprecated
|
||||
public Response performRequest(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, Header... headers) throws IOException {
|
||||
Request request = new Request(method, endpoint);
|
||||
addParameters(request, params);
|
||||
request.setEntity(entity);
|
||||
addHeaders(request, headers);
|
||||
return performRequest(request);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the Elasticsearch cluster that the client points to. Blocks until the request is completed and returns
|
||||
* its response or fails by throwing an exception. Selects a host out of the provided ones in a round-robin fashion. Failing hosts
|
||||
* are marked dead and retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times
|
||||
* they previously failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead
|
||||
* nodes that deserve a retry) are retried until one responds or none of them does, in which case an {@link IOException} will be thrown.
|
||||
*
|
||||
* This method works by performing an asynchronous call and waiting
|
||||
* for the result. If the asynchronous call throws an exception we wrap
|
||||
* it and rethrow it so that the stack trace attached to the exception
|
||||
* contains the call site. While we attempt to preserve the original
|
||||
* exception this isn't always possible and likely haven't covered all of
|
||||
* the cases. You can get the original exception from
|
||||
* {@link Exception#getCause()}.
|
||||
*
|
||||
* @param method the http method
|
||||
* @param endpoint the path of the request (without host and port)
|
||||
* @param params the query_string parameters
|
||||
* @param entity the body of the request, null if not applicable
|
||||
* @param httpAsyncResponseConsumerFactory the {@link HttpAsyncResponseConsumerFactory} used to create one
|
||||
* {@link HttpAsyncResponseConsumer} callback per retry. Controls how the response body gets streamed from a non-blocking HTTP
|
||||
* connection on the client side.
|
||||
* @param headers the optional request headers
|
||||
* @return the response returned by Elasticsearch
|
||||
* @throws IOException in case of a problem or the connection was aborted
|
||||
* @throws ClientProtocolException in case of an http protocol error
|
||||
* @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
|
||||
* @deprecated prefer {@link #performRequest(Request)}
|
||||
*/
|
||||
@Deprecated
|
||||
public Response performRequest(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory,
|
||||
Header... headers) throws IOException {
|
||||
Request request = new Request(method, endpoint);
|
||||
addParameters(request, params);
|
||||
request.setEntity(entity);
|
||||
setOptions(request, httpAsyncResponseConsumerFactory, headers);
|
||||
return performRequest(request);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead
|
||||
* the provided {@link ResponseListener} will be notified upon completion or failure. Shortcut to
|
||||
* {@link #performRequestAsync(String, String, Map, HttpEntity, ResponseListener, Header...)} but without parameters and request body.
|
||||
*
|
||||
* @param method the http method
|
||||
* @param endpoint the path of the request (without host and port)
|
||||
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
|
||||
* @param headers the optional request headers
|
||||
* @deprecated prefer {@link #performRequestAsync(Request, ResponseListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public void performRequestAsync(String method, String endpoint, ResponseListener responseListener, Header... headers) {
|
||||
Request request;
|
||||
try {
|
||||
request = new Request(method, endpoint);
|
||||
addHeaders(request, headers);
|
||||
} catch (Exception e) {
|
||||
responseListener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
performRequestAsync(request, responseListener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead
|
||||
* the provided {@link ResponseListener} will be notified upon completion or failure. Shortcut to
|
||||
* {@link #performRequestAsync(String, String, Map, HttpEntity, ResponseListener, Header...)} but without request body.
|
||||
*
|
||||
* @param method the http method
|
||||
* @param endpoint the path of the request (without host and port)
|
||||
* @param params the query_string parameters
|
||||
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
|
||||
* @param headers the optional request headers
|
||||
* @deprecated prefer {@link #performRequestAsync(Request, ResponseListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public void performRequestAsync(String method, String endpoint, Map<String, String> params,
|
||||
ResponseListener responseListener, Header... headers) {
|
||||
Request request;
|
||||
try {
|
||||
request = new Request(method, endpoint);
|
||||
addParameters(request, params);
|
||||
addHeaders(request, headers);
|
||||
} catch (Exception e) {
|
||||
responseListener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
performRequestAsync(request, responseListener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead
|
||||
* the provided {@link ResponseListener} will be notified upon completion or failure.
|
||||
* Shortcut to {@link #performRequestAsync(String, String, Map, HttpEntity, HttpAsyncResponseConsumerFactory, ResponseListener,
|
||||
* Header...)} which doesn't require specifying an {@link HttpAsyncResponseConsumerFactory} instance,
|
||||
* {@link HttpAsyncResponseConsumerFactory} will be used to create the needed instances of {@link HttpAsyncResponseConsumer}.
|
||||
*
|
||||
* @param method the http method
|
||||
* @param endpoint the path of the request (without host and port)
|
||||
* @param params the query_string parameters
|
||||
* @param entity the body of the request, null if not applicable
|
||||
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
|
||||
* @param headers the optional request headers
|
||||
* @deprecated prefer {@link #performRequestAsync(Request, ResponseListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public void performRequestAsync(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, ResponseListener responseListener, Header... headers) {
|
||||
Request request;
|
||||
try {
|
||||
request = new Request(method, endpoint);
|
||||
addParameters(request, params);
|
||||
request.setEntity(entity);
|
||||
addHeaders(request, headers);
|
||||
} catch (Exception e) {
|
||||
responseListener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
performRequestAsync(request, responseListener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the Elasticsearch cluster that the client points to. The request is executed asynchronously
|
||||
* and the provided {@link ResponseListener} gets notified upon request completion or failure.
|
||||
* Selects a host out of the provided ones in a round-robin fashion. Failing hosts are marked dead and retried after a certain
|
||||
* amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously failed (the more failures,
|
||||
* the later they will be retried). In case of failures all of the alive nodes (or dead nodes that deserve a retry) are retried
|
||||
* until one responds or none of them does, in which case an {@link IOException} will be thrown.
|
||||
*
|
||||
* @param method the http method
|
||||
* @param endpoint the path of the request (without host and port)
|
||||
* @param params the query_string parameters
|
||||
* @param entity the body of the request, null if not applicable
|
||||
* @param httpAsyncResponseConsumerFactory the {@link HttpAsyncResponseConsumerFactory} used to create one
|
||||
* {@link HttpAsyncResponseConsumer} callback per retry. Controls how the response body gets streamed from a non-blocking HTTP
|
||||
* connection on the client side.
|
||||
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
|
||||
* @param headers the optional request headers
|
||||
* @deprecated prefer {@link #performRequestAsync(Request, ResponseListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public void performRequestAsync(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory,
|
||||
ResponseListener responseListener, Header... headers) {
|
||||
Request request;
|
||||
try {
|
||||
request = new Request(method, endpoint);
|
||||
addParameters(request, params);
|
||||
request.setEntity(entity);
|
||||
setOptions(request, httpAsyncResponseConsumerFactory, headers);
|
||||
} catch (Exception e) {
|
||||
responseListener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
performRequestAsync(request, responseListener);
|
||||
}
|
||||
|
||||
void performRequestAsyncNoCatch(Request request, ResponseListener listener) throws IOException {
|
||||
Map<String, String> requestParams = new HashMap<>(request.getParameters());
|
||||
//ignore is a special parameter supported by the clients, shouldn't be sent to es
|
||||
|
@ -1035,42 +796,4 @@ public class RestClient implements Closeable {
|
|||
itr.remove();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add all headers from the provided varargs argument to a {@link Request}. This only exists
|
||||
* to support methods that exist for backwards compatibility.
|
||||
*/
|
||||
@Deprecated
|
||||
private static void addHeaders(Request request, Header... headers) {
|
||||
setOptions(request, RequestOptions.DEFAULT.getHttpAsyncResponseConsumerFactory(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add all headers from the provided varargs argument to a {@link Request}. This only exists
|
||||
* to support methods that exist for backwards compatibility.
|
||||
*/
|
||||
@Deprecated
|
||||
private static void setOptions(Request request, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory,
|
||||
Header... headers) {
|
||||
Objects.requireNonNull(headers, "headers cannot be null");
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
for (Header header : headers) {
|
||||
Objects.requireNonNull(header, "header cannot be null");
|
||||
options.addHeader(header.getName(), header.getValue());
|
||||
}
|
||||
options.setHttpAsyncResponseConsumerFactory(httpAsyncResponseConsumerFactory);
|
||||
request.setOptions(options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add all parameters from a map to a {@link Request}. This only exists
|
||||
* to support methods that exist for backwards compatibility.
|
||||
*/
|
||||
@Deprecated
|
||||
private static void addParameters(Request request, Map<String, String> parameters) {
|
||||
Objects.requireNonNull(parameters, "parameters cannot be null");
|
||||
for (Map.Entry<String, String> entry : parameters.entrySet()) {
|
||||
request.addParameter(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,7 +45,6 @@ import java.io.OutputStream;
|
|||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -215,9 +214,15 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase {
|
|||
}
|
||||
final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header");
|
||||
final int statusCode = randomStatusCode(getRandom());
|
||||
Request request = new Request(method, "/" + statusCode);
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
for (Header header : requestHeaders) {
|
||||
options.addHeader(header.getName(), header.getValue());
|
||||
}
|
||||
request.setOptions(options);
|
||||
Response esResponse;
|
||||
try {
|
||||
esResponse = restClient.performRequest(method, "/" + statusCode, Collections.<String, String>emptyMap(), requestHeaders);
|
||||
esResponse = restClient.performRequest(request);
|
||||
} catch (ResponseException e) {
|
||||
esResponse = e.getResponse();
|
||||
}
|
||||
|
|
|
@ -59,7 +59,6 @@ import java.net.URI;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
@ -69,7 +68,6 @@ import static java.util.Collections.singletonList;
|
|||
import static org.elasticsearch.client.RestClientTestUtil.getAllErrorStatusCodes;
|
||||
import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods;
|
||||
import static org.elasticsearch.client.RestClientTestUtil.getOkStatusCodes;
|
||||
import static org.elasticsearch.client.RestClientTestUtil.randomHttpMethod;
|
||||
import static org.elasticsearch.client.RestClientTestUtil.randomStatusCode;
|
||||
import static org.elasticsearch.client.SyncResponseListenerTests.assertExceptionStackContainsCallingMethod;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
|
@ -192,7 +190,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
public void testOkStatusCodes() throws IOException {
|
||||
for (String method : getHttpMethods()) {
|
||||
for (int okStatusCode : getOkStatusCodes()) {
|
||||
Response response = performRequest(method, "/" + okStatusCode);
|
||||
Response response = restClient.performRequest(new Request(method, "/" + okStatusCode));
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode));
|
||||
}
|
||||
}
|
||||
|
@ -223,13 +221,11 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
//error status codes should cause an exception to be thrown
|
||||
for (int errorStatusCode : getAllErrorStatusCodes()) {
|
||||
try {
|
||||
Map<String, String> params;
|
||||
if (ignoreParam.isEmpty()) {
|
||||
params = Collections.emptyMap();
|
||||
} else {
|
||||
params = Collections.singletonMap("ignore", ignoreParam);
|
||||
Request request = new Request(method, "/" + errorStatusCode);
|
||||
if (false == ignoreParam.isEmpty()) {
|
||||
request.addParameter("ignore", ignoreParam);
|
||||
}
|
||||
Response response = performRequest(method, "/" + errorStatusCode, params);
|
||||
Response response = restClient.performRequest(request);
|
||||
if (expectedIgnores.contains(errorStatusCode)) {
|
||||
//no exception gets thrown although we got an error status code, as it was configured to be ignored
|
||||
assertEquals(errorStatusCode, response.getStatusLine().getStatusCode());
|
||||
|
@ -256,14 +252,14 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
for (String method : getHttpMethods()) {
|
||||
//IOExceptions should be let bubble up
|
||||
try {
|
||||
performRequest(method, "/coe");
|
||||
restClient.performRequest(new Request(method, "/coe"));
|
||||
fail("request should have failed");
|
||||
} catch(IOException e) {
|
||||
assertThat(e, instanceOf(ConnectTimeoutException.class));
|
||||
}
|
||||
failureListener.assertCalled(singletonList(node));
|
||||
try {
|
||||
performRequest(method, "/soe");
|
||||
restClient.performRequest(new Request(method, "/soe"));
|
||||
fail("request should have failed");
|
||||
} catch(IOException e) {
|
||||
assertThat(e, instanceOf(SocketTimeoutException.class));
|
||||
|
@ -313,48 +309,6 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests}.
|
||||
*/
|
||||
@Deprecated
|
||||
public void tesPerformRequestOldStyleNullHeaders() throws IOException {
|
||||
String method = randomHttpMethod(getRandom());
|
||||
int statusCode = randomStatusCode(getRandom());
|
||||
try {
|
||||
performRequest(method, "/" + statusCode, (Header[])null);
|
||||
fail("request should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("request headers must not be null", e.getMessage());
|
||||
}
|
||||
try {
|
||||
performRequest(method, "/" + statusCode, (Header)null);
|
||||
fail("request should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("request header must not be null", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddParameters()}.
|
||||
*/
|
||||
@Deprecated
|
||||
public void testPerformRequestOldStyleWithNullParams() throws IOException {
|
||||
String method = randomHttpMethod(getRandom());
|
||||
int statusCode = randomStatusCode(getRandom());
|
||||
try {
|
||||
restClient.performRequest(method, "/" + statusCode, (Map<String, String>)null);
|
||||
fail("request should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("parameters cannot be null", e.getMessage());
|
||||
}
|
||||
try {
|
||||
restClient.performRequest(method, "/" + statusCode, null, (HttpEntity)null);
|
||||
fail("request should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("parameters cannot be null", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* End to end test for request and response headers. Exercises the mock http client ability to send back
|
||||
* whatever headers it has received.
|
||||
|
@ -464,35 +418,4 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
}
|
||||
return expectedRequest;
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated prefer {@link RestClient#performRequest(Request)}.
|
||||
*/
|
||||
@Deprecated
|
||||
private Response performRequest(String method, String endpoint, Header... headers) throws IOException {
|
||||
return performRequest(method, endpoint, Collections.<String, String>emptyMap(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated prefer {@link RestClient#performRequest(Request)}.
|
||||
*/
|
||||
@Deprecated
|
||||
private Response performRequest(String method, String endpoint, Map<String, String> params, Header... headers) throws IOException {
|
||||
int methodSelector;
|
||||
if (params.isEmpty()) {
|
||||
methodSelector = randomIntBetween(0, 2);
|
||||
} else {
|
||||
methodSelector = randomIntBetween(1, 2);
|
||||
}
|
||||
switch(methodSelector) {
|
||||
case 0:
|
||||
return restClient.performRequest(method, endpoint, headers);
|
||||
case 1:
|
||||
return restClient.performRequest(method, endpoint, params, headers);
|
||||
case 2:
|
||||
return restClient.performRequest(method, endpoint, params, (HttpEntity)null, headers);
|
||||
default:
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,7 +42,6 @@ import java.util.concurrent.TimeUnit;
|
|||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertSame;
|
||||
|
@ -90,88 +89,6 @@ public class RestClientTests extends RestClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link #testPerformAsyncWithUnsupportedMethod()}.
|
||||
*/
|
||||
@Deprecated
|
||||
public void testPerformAsyncOldStyleWithUnsupportedMethod() throws Exception {
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.performRequestAsync("unsupported", randomAsciiLettersOfLength(5), new ResponseListener() {
|
||||
@Override
|
||||
public void onSuccess(Response response) {
|
||||
throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception exception) {
|
||||
try {
|
||||
assertThat(exception, instanceOf(UnsupportedOperationException.class));
|
||||
assertEquals("http method not supported: unsupported", exception.getMessage());
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
});
|
||||
assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddParameters()}.
|
||||
*/
|
||||
@Deprecated
|
||||
public void testPerformOldStyleAsyncWithNullParams() throws Exception {
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.performRequestAsync(randomAsciiLettersOfLength(5), randomAsciiLettersOfLength(5), null, new ResponseListener() {
|
||||
@Override
|
||||
public void onSuccess(Response response) {
|
||||
throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception exception) {
|
||||
try {
|
||||
assertThat(exception, instanceOf(NullPointerException.class));
|
||||
assertEquals("parameters cannot be null", exception.getMessage());
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
});
|
||||
assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests}.
|
||||
*/
|
||||
@Deprecated
|
||||
public void testPerformOldStyleAsyncWithNullHeaders() throws Exception {
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
ResponseListener listener = new ResponseListener() {
|
||||
@Override
|
||||
public void onSuccess(Response response) {
|
||||
throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception exception) {
|
||||
try {
|
||||
assertThat(exception, instanceOf(NullPointerException.class));
|
||||
assertEquals("header cannot be null", exception.getMessage());
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
};
|
||||
restClient.performRequestAsync("GET", randomAsciiLettersOfLength(5), listener, (Header) null);
|
||||
assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testPerformAsyncWithWrongEndpoint() throws Exception {
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
|
@ -195,33 +112,6 @@ public class RestClientTests extends RestClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link #testPerformAsyncWithWrongEndpoint()}.
|
||||
*/
|
||||
@Deprecated
|
||||
public void testPerformAsyncOldStyleWithWrongEndpoint() throws Exception {
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.performRequestAsync("GET", "::http:///", new ResponseListener() {
|
||||
@Override
|
||||
public void onSuccess(Response response) {
|
||||
throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception exception) {
|
||||
try {
|
||||
assertThat(exception, instanceOf(IllegalArgumentException.class));
|
||||
assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage());
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
});
|
||||
assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testBuildUriLeavesPathUntouched() {
|
||||
final Map<String, String> emptyMap = Collections.emptyMap();
|
||||
{
|
||||
|
@ -259,34 +149,6 @@ public class RestClientTests extends RestClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public void testSetHostsWrongArguments() throws IOException {
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.setHosts((HttpHost[]) null);
|
||||
fail("setHosts should have failed");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals("hosts must not be null nor empty", e.getMessage());
|
||||
}
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.setHosts();
|
||||
fail("setHosts should have failed");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals("hosts must not be null nor empty", e.getMessage());
|
||||
}
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.setHosts((HttpHost) null);
|
||||
fail("setHosts should have failed");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals("host cannot be null", e.getMessage());
|
||||
}
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201));
|
||||
fail("setHosts should have failed");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals("host cannot be null", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testSetNodesWrongArguments() throws IOException {
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.setNodes(null);
|
||||
|
@ -348,23 +210,6 @@ public class RestClientTests extends RestClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testConstructor()}.
|
||||
*/
|
||||
@Deprecated
|
||||
public void testNullPath() throws IOException {
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
for (String method : getHttpMethods()) {
|
||||
try {
|
||||
restClient.performRequest(method, null);
|
||||
fail("path set to null should fail!");
|
||||
} catch (NullPointerException e) {
|
||||
assertEquals("endpoint cannot be null", e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testSelectHosts() throws IOException {
|
||||
Node n1 = new Node(new HttpHost("1"), null, null, "1", null, null);
|
||||
Node n2 = new Node(new HttpHost("2"), null, null, "2", null, null);
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.gradle.NoticeTask
|
|||
import org.elasticsearch.gradle.test.RunTask
|
||||
import org.apache.tools.ant.filters.FixCrLfFilter
|
||||
|
||||
import java.nio.file.Files
|
||||
import java.nio.file.Path
|
||||
|
||||
Collection distributions = project('archives').subprojects + project('packages').subprojects
|
||||
|
@ -504,4 +505,16 @@ subprojects {
|
|||
}
|
||||
return result
|
||||
}
|
||||
|
||||
ext.assertLinesInFile = { Path path, List<String> expectedLines ->
|
||||
final List<String> actualLines = Files.readAllLines(path)
|
||||
int line = 0
|
||||
for (final String expectedLine : expectedLines) {
|
||||
final String actualLine = actualLines.get(line)
|
||||
if (expectedLine != actualLine) {
|
||||
throw new GradleException("expected line [${line + 1}] in [${path}] to be [${expectedLine}] but was [${actualLine}]")
|
||||
}
|
||||
line++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,22 @@
|
|||
|
||||
apply plugin: 'elasticsearch.docs-test'
|
||||
|
||||
/* List of files that have snippets that will not work until platinum tests can occur ... */
|
||||
buildRestTests.expectedUnconvertedCandidates = [
|
||||
'reference/ml/transforms.asciidoc',
|
||||
'reference/ml/apis/delete-calendar-event.asciidoc',
|
||||
'reference/ml/apis/get-bucket.asciidoc',
|
||||
'reference/ml/apis/get-category.asciidoc',
|
||||
'reference/ml/apis/get-influencer.asciidoc',
|
||||
'reference/ml/apis/get-job-stats.asciidoc',
|
||||
'reference/ml/apis/get-overall-buckets.asciidoc',
|
||||
'reference/ml/apis/get-record.asciidoc',
|
||||
'reference/ml/apis/get-snapshot.asciidoc',
|
||||
'reference/ml/apis/post-data.asciidoc',
|
||||
'reference/ml/apis/revert-snapshot.asciidoc',
|
||||
'reference/ml/apis/update-snapshot.asciidoc',
|
||||
]
|
||||
|
||||
integTestCluster {
|
||||
/* Enable regexes in painless so our tests don't complain about example
|
||||
* snippets that use them. */
|
||||
|
@ -74,6 +90,17 @@ buildRestTests.docs = fileTree(projectDir) {
|
|||
exclude 'build'
|
||||
// Just syntax examples
|
||||
exclude 'README.asciidoc'
|
||||
// Broken code snippet tests
|
||||
exclude 'reference/rollup/rollup-getting-started.asciidoc'
|
||||
exclude 'reference/rollup/apis/rollup-job-config.asciidoc'
|
||||
exclude 'reference/rollup/apis/rollup-index-caps.asciidoc'
|
||||
exclude 'reference/rollup/apis/put-job.asciidoc'
|
||||
exclude 'reference/rollup/apis/stop-job.asciidoc'
|
||||
exclude 'reference/rollup/apis/start-job.asciidoc'
|
||||
exclude 'reference/rollup/apis/rollup-search.asciidoc'
|
||||
exclude 'reference/rollup/apis/delete-job.asciidoc'
|
||||
exclude 'reference/rollup/apis/get-job.asciidoc'
|
||||
exclude 'reference/rollup/apis/rollup-caps.asciidoc'
|
||||
}
|
||||
|
||||
listSnippets.docs = buildRestTests.docs
|
||||
|
@ -594,3 +621,480 @@ buildRestTests.setups['library'] = '''
|
|||
{"name": "The Moon is a Harsh Mistress", "author": "Robert A. Heinlein", "release_date": "1966-04-01", "page_count": 288}
|
||||
|
||||
'''
|
||||
buildRestTests.setups['sensor_rollup_job'] = '''
|
||||
- do:
|
||||
indices.create:
|
||||
index: sensor-1
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
number_of_replicas: 0
|
||||
mappings:
|
||||
_doc:
|
||||
properties:
|
||||
timestamp:
|
||||
type: date
|
||||
temperature:
|
||||
type: long
|
||||
voltage:
|
||||
type: float
|
||||
node:
|
||||
type: keyword
|
||||
- do:
|
||||
xpack.rollup.put_job:
|
||||
id: "sensor"
|
||||
body: >
|
||||
{
|
||||
"index_pattern": "sensor-*",
|
||||
"rollup_index": "sensor_rollup",
|
||||
"cron": "*/30 * * * * ?",
|
||||
"page_size" :1000,
|
||||
"groups" : {
|
||||
"date_histogram": {
|
||||
"field": "timestamp",
|
||||
"interval": "1h",
|
||||
"delay": "7d"
|
||||
},
|
||||
"terms": {
|
||||
"fields": ["node"]
|
||||
}
|
||||
},
|
||||
"metrics": [
|
||||
{
|
||||
"field": "temperature",
|
||||
"metrics": ["min", "max", "sum"]
|
||||
},
|
||||
{
|
||||
"field": "voltage",
|
||||
"metrics": ["avg"]
|
||||
}
|
||||
]
|
||||
}
|
||||
'''
|
||||
buildRestTests.setups['sensor_started_rollup_job'] = '''
|
||||
- do:
|
||||
indices.create:
|
||||
index: sensor-1
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
number_of_replicas: 0
|
||||
mappings:
|
||||
_doc:
|
||||
properties:
|
||||
timestamp:
|
||||
type: date
|
||||
temperature:
|
||||
type: long
|
||||
voltage:
|
||||
type: float
|
||||
node:
|
||||
type: keyword
|
||||
|
||||
- do:
|
||||
bulk:
|
||||
index: sensor-1
|
||||
type: _doc
|
||||
refresh: true
|
||||
body: |
|
||||
{"index":{}}
|
||||
{"timestamp": 1516729294000, "temperature": 200, "voltage": 5.2, "node": "a"}
|
||||
{"index":{}}
|
||||
{"timestamp": 1516642894000, "temperature": 201, "voltage": 5.8, "node": "b"}
|
||||
{"index":{}}
|
||||
{"timestamp": 1516556494000, "temperature": 202, "voltage": 5.1, "node": "a"}
|
||||
{"index":{}}
|
||||
{"timestamp": 1516470094000, "temperature": 198, "voltage": 5.6, "node": "b"}
|
||||
{"index":{}}
|
||||
{"timestamp": 1516383694000, "temperature": 200, "voltage": 4.2, "node": "c"}
|
||||
{"index":{}}
|
||||
{"timestamp": 1516297294000, "temperature": 202, "voltage": 4.0, "node": "c"}
|
||||
|
||||
- do:
|
||||
xpack.rollup.put_job:
|
||||
id: "sensor"
|
||||
body: >
|
||||
{
|
||||
"index_pattern": "sensor-*",
|
||||
"rollup_index": "sensor_rollup",
|
||||
"cron": "* * * * * ?",
|
||||
"page_size" :1000,
|
||||
"groups" : {
|
||||
"date_histogram": {
|
||||
"field": "timestamp",
|
||||
"interval": "1h",
|
||||
"delay": "7d"
|
||||
},
|
||||
"terms": {
|
||||
"fields": ["node"]
|
||||
}
|
||||
},
|
||||
"metrics": [
|
||||
{
|
||||
"field": "temperature",
|
||||
"metrics": ["min", "max", "sum"]
|
||||
},
|
||||
{
|
||||
"field": "voltage",
|
||||
"metrics": ["avg"]
|
||||
}
|
||||
]
|
||||
}
|
||||
- do:
|
||||
xpack.rollup.start_job:
|
||||
id: "sensor"
|
||||
'''
|
||||
|
||||
buildRestTests.setups['sensor_index'] = '''
|
||||
- do:
|
||||
indices.create:
|
||||
index: sensor-1
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
number_of_replicas: 0
|
||||
mappings:
|
||||
_doc:
|
||||
properties:
|
||||
timestamp:
|
||||
type: date
|
||||
temperature:
|
||||
type: long
|
||||
voltage:
|
||||
type: float
|
||||
node:
|
||||
type: keyword
|
||||
load:
|
||||
type: double
|
||||
net_in:
|
||||
type: long
|
||||
net_out:
|
||||
type: long
|
||||
hostname:
|
||||
type: keyword
|
||||
datacenter:
|
||||
type: keyword
|
||||
'''
|
||||
|
||||
buildRestTests.setups['sensor_prefab_data'] = '''
|
||||
- do:
|
||||
indices.create:
|
||||
index: sensor-1
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
number_of_replicas: 0
|
||||
mappings:
|
||||
_doc:
|
||||
properties:
|
||||
timestamp:
|
||||
type: date
|
||||
temperature:
|
||||
type: long
|
||||
voltage:
|
||||
type: float
|
||||
node:
|
||||
type: keyword
|
||||
- do:
|
||||
indices.create:
|
||||
index: sensor_rollup
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
number_of_replicas: 0
|
||||
mappings:
|
||||
_doc:
|
||||
properties:
|
||||
node.terms.value:
|
||||
type: keyword
|
||||
temperature.sum.value:
|
||||
type: double
|
||||
temperature.max.value:
|
||||
type: double
|
||||
temperature.min.value:
|
||||
type: double
|
||||
timestamp.date_histogram.time_zone:
|
||||
type: keyword
|
||||
timestamp.date_histogram.interval:
|
||||
type: keyword
|
||||
timestamp.date_histogram.timestamp:
|
||||
type: date
|
||||
timestamp.date_histogram._count:
|
||||
type: long
|
||||
voltage.avg.value:
|
||||
type: double
|
||||
voltage.avg._count:
|
||||
type: long
|
||||
_rollup.id:
|
||||
type: keyword
|
||||
_rollup.version:
|
||||
type: long
|
||||
_meta:
|
||||
_rollup:
|
||||
sensor:
|
||||
cron: "* * * * * ?"
|
||||
rollup_index: "sensor_rollup"
|
||||
index_pattern: "sensor-*"
|
||||
timeout: "20s"
|
||||
page_size: 1000
|
||||
groups:
|
||||
date_histogram:
|
||||
delay: "7d"
|
||||
field: "timestamp"
|
||||
interval: "1h"
|
||||
time_zone: "UTC"
|
||||
terms:
|
||||
fields:
|
||||
- "node"
|
||||
id: sensor
|
||||
metrics:
|
||||
- field: "temperature"
|
||||
metrics:
|
||||
- min
|
||||
- max
|
||||
- sum
|
||||
- field: "voltage"
|
||||
metrics:
|
||||
- avg
|
||||
|
||||
- do:
|
||||
bulk:
|
||||
index: sensor_rollup
|
||||
type: _doc
|
||||
refresh: true
|
||||
body: |
|
||||
{"index":{}}
|
||||
{"node.terms.value":"b","temperature.sum.value":201.0,"temperature.max.value":201.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":201.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.800000190734863,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516640400000,"voltage.avg._count":1.0,"_rollup.id":"sensor"}
|
||||
{"index":{}}
|
||||
{"node.terms.value":"c","temperature.sum.value":200.0,"temperature.max.value":200.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":200.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":4.199999809265137,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516381200000,"voltage.avg._count":1.0,"_rollup.id":"sensor"}
|
||||
{"index":{}}
|
||||
{"node.terms.value":"a","temperature.sum.value":202.0,"temperature.max.value":202.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":202.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.099999904632568,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516554000000,"voltage.avg._count":1.0,"_rollup.id":"sensor"}
|
||||
{"index":{}}
|
||||
{"node.terms.value":"a","temperature.sum.value":200.0,"temperature.max.value":200.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":200.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.199999809265137,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516726800000,"voltage.avg._count":1.0,"_rollup.id":"sensor"}
|
||||
{"index":{}}
|
||||
{"node.terms.value":"b","temperature.sum.value":198.0,"temperature.max.value":198.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":198.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.599999904632568,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516467600000,"voltage.avg._count":1.0,"_rollup.id":"sensor"}
|
||||
{"index":{}}
|
||||
{"node.terms.value":"c","temperature.sum.value":202.0,"temperature.max.value":202.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":202.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":4.0,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516294800000,"voltage.avg._count":1.0,"_rollup.id":"sensor"}
|
||||
|
||||
'''
|
||||
buildRestTests.setups['sample_job'] = '''
|
||||
- do:
|
||||
xpack.ml.put_job:
|
||||
job_id: "sample_job"
|
||||
body: >
|
||||
{
|
||||
"description" : "Very basic job",
|
||||
"analysis_config" : {
|
||||
"bucket_span":"10m",
|
||||
"detectors" :[
|
||||
{
|
||||
"function": "count"
|
||||
}
|
||||
]},
|
||||
"data_description" : {
|
||||
"time_field":"timestamp",
|
||||
"time_format": "epoch_ms"
|
||||
}
|
||||
}
|
||||
'''
|
||||
buildRestTests.setups['farequote_index'] = '''
|
||||
- do:
|
||||
indices.create:
|
||||
index: farequote
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
number_of_replicas: 0
|
||||
mappings:
|
||||
metric:
|
||||
properties:
|
||||
time:
|
||||
type: date
|
||||
responsetime:
|
||||
type: float
|
||||
airline:
|
||||
type: keyword
|
||||
doc_count:
|
||||
type: integer
|
||||
'''
|
||||
buildRestTests.setups['farequote_data'] = buildRestTests.setups['farequote_index'] + '''
|
||||
- do:
|
||||
bulk:
|
||||
index: farequote
|
||||
type: metric
|
||||
refresh: true
|
||||
body: |
|
||||
{"index": {"_id":"1"}}
|
||||
{"airline":"JZA","responsetime":990.4628,"time":"2016-02-07T00:00:00+0000", "doc_count": 5}
|
||||
{"index": {"_id":"2"}}
|
||||
{"airline":"JBU","responsetime":877.5927,"time":"2016-02-07T00:00:00+0000", "doc_count": 23}
|
||||
{"index": {"_id":"3"}}
|
||||
{"airline":"KLM","responsetime":1355.4812,"time":"2016-02-07T00:00:00+0000", "doc_count": 42}
|
||||
'''
|
||||
buildRestTests.setups['farequote_job'] = buildRestTests.setups['farequote_data'] + '''
|
||||
- do:
|
||||
xpack.ml.put_job:
|
||||
job_id: "farequote"
|
||||
body: >
|
||||
{
|
||||
"analysis_config": {
|
||||
"bucket_span": "60m",
|
||||
"detectors": [{
|
||||
"function": "mean",
|
||||
"field_name": "responsetime",
|
||||
"by_field_name": "airline"
|
||||
}],
|
||||
"summary_count_field_name": "doc_count"
|
||||
},
|
||||
"data_description": {
|
||||
"time_field": "time"
|
||||
}
|
||||
}
|
||||
'''
|
||||
buildRestTests.setups['farequote_datafeed'] = buildRestTests.setups['farequote_job'] + '''
|
||||
- do:
|
||||
xpack.ml.put_datafeed:
|
||||
datafeed_id: "datafeed-farequote"
|
||||
body: >
|
||||
{
|
||||
"job_id":"farequote",
|
||||
"indexes":"farequote"
|
||||
}
|
||||
'''
|
||||
buildRestTests.setups['server_metrics_index'] = '''
|
||||
- do:
|
||||
indices.create:
|
||||
index: server-metrics
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
number_of_replicas: 0
|
||||
mappings:
|
||||
metric:
|
||||
properties:
|
||||
timestamp:
|
||||
type: date
|
||||
total:
|
||||
type: long
|
||||
'''
|
||||
buildRestTests.setups['server_metrics_data'] = buildRestTests.setups['server_metrics_index'] + '''
|
||||
- do:
|
||||
bulk:
|
||||
index: server-metrics
|
||||
type: metric
|
||||
refresh: true
|
||||
body: |
|
||||
{"index": {"_id":"1177"}}
|
||||
{"timestamp":"2017-03-23T13:00:00","total":40476}
|
||||
{"index": {"_id":"1178"}}
|
||||
{"timestamp":"2017-03-23T13:00:00","total":15287}
|
||||
{"index": {"_id":"1179"}}
|
||||
{"timestamp":"2017-03-23T13:00:00","total":-776}
|
||||
{"index": {"_id":"1180"}}
|
||||
{"timestamp":"2017-03-23T13:00:00","total":11366}
|
||||
{"index": {"_id":"1181"}}
|
||||
{"timestamp":"2017-03-23T13:00:00","total":3606}
|
||||
{"index": {"_id":"1182"}}
|
||||
{"timestamp":"2017-03-23T13:00:00","total":19006}
|
||||
{"index": {"_id":"1183"}}
|
||||
{"timestamp":"2017-03-23T13:00:00","total":38613}
|
||||
{"index": {"_id":"1184"}}
|
||||
{"timestamp":"2017-03-23T13:00:00","total":19516}
|
||||
{"index": {"_id":"1185"}}
|
||||
{"timestamp":"2017-03-23T13:00:00","total":-258}
|
||||
{"index": {"_id":"1186"}}
|
||||
{"timestamp":"2017-03-23T13:00:00","total":9551}
|
||||
{"index": {"_id":"1187"}}
|
||||
{"timestamp":"2017-03-23T13:00:00","total":11217}
|
||||
{"index": {"_id":"1188"}}
|
||||
{"timestamp":"2017-03-23T13:00:00","total":22557}
|
||||
{"index": {"_id":"1189"}}
|
||||
{"timestamp":"2017-03-23T13:00:00","total":40508}
|
||||
{"index": {"_id":"1190"}}
|
||||
{"timestamp":"2017-03-23T13:00:00","total":11887}
|
||||
{"index": {"_id":"1191"}}
|
||||
{"timestamp":"2017-03-23T13:00:00","total":31659}
|
||||
'''
|
||||
buildRestTests.setups['server_metrics_job'] = buildRestTests.setups['server_metrics_data'] + '''
|
||||
- do:
|
||||
xpack.ml.put_job:
|
||||
job_id: "total-requests"
|
||||
body: >
|
||||
{
|
||||
"description" : "Total sum of requests",
|
||||
"analysis_config" : {
|
||||
"bucket_span":"10m",
|
||||
"detectors" :[
|
||||
{
|
||||
"detector_description": "Sum of total",
|
||||
"function": "sum",
|
||||
"field_name": "total"
|
||||
}
|
||||
]},
|
||||
"data_description" : {
|
||||
"time_field":"timestamp",
|
||||
"time_format": "epoch_ms"
|
||||
}
|
||||
}
|
||||
'''
|
||||
buildRestTests.setups['server_metrics_datafeed'] = buildRestTests.setups['server_metrics_job'] + '''
|
||||
- do:
|
||||
xpack.ml.put_datafeed:
|
||||
datafeed_id: "datafeed-total-requests"
|
||||
body: >
|
||||
{
|
||||
"job_id":"total-requests",
|
||||
"indexes":"server-metrics"
|
||||
}
|
||||
'''
|
||||
buildRestTests.setups['server_metrics_openjob'] = buildRestTests.setups['server_metrics_datafeed'] + '''
|
||||
- do:
|
||||
xpack.ml.open_job:
|
||||
job_id: "total-requests"
|
||||
'''
|
||||
buildRestTests.setups['server_metrics_startdf'] = buildRestTests.setups['server_metrics_openjob'] + '''
|
||||
- do:
|
||||
xpack.ml.start_datafeed:
|
||||
datafeed_id: "datafeed-total-requests"
|
||||
'''
|
||||
buildRestTests.setups['calendar_outages'] = '''
|
||||
- do:
|
||||
xpack.ml.put_calendar:
|
||||
calendar_id: "planned-outages"
|
||||
'''
|
||||
buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['calendar_outages'] + '''
|
||||
- do:
|
||||
xpack.ml.post_calendar_events:
|
||||
calendar_id: "planned-outages"
|
||||
body: >
|
||||
{ "description": "event 1", "start_time": "2017-12-01T00:00:00Z", "end_time": "2017-12-02T00:00:00Z", "calendar_id": "planned-outages" }
|
||||
|
||||
|
||||
'''
|
||||
buildRestTests.setups['calendar_outages_openjob'] = buildRestTests.setups['server_metrics_openjob'] + '''
|
||||
- do:
|
||||
xpack.ml.put_calendar:
|
||||
calendar_id: "planned-outages"
|
||||
'''
|
||||
buildRestTests.setups['calendar_outages_addjob'] = buildRestTests.setups['server_metrics_openjob'] + '''
|
||||
- do:
|
||||
xpack.ml.put_calendar:
|
||||
calendar_id: "planned-outages"
|
||||
body: >
|
||||
{
|
||||
"job_ids": ["total-requests"]
|
||||
}
|
||||
'''
|
||||
buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['calendar_outages_addjob'] + '''
|
||||
- do:
|
||||
xpack.ml.post_calendar_events:
|
||||
calendar_id: "planned-outages"
|
||||
body: >
|
||||
{ "events" : [
|
||||
{ "description": "event 1", "start_time": "1513641600000", "end_time": "1513728000000"},
|
||||
{ "description": "event 2", "start_time": "1513814400000", "end_time": "1513900800000"},
|
||||
{ "description": "event 3", "start_time": "1514160000000", "end_time": "1514246400000"}
|
||||
]}
|
||||
'''
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,181 @@
|
|||
[[java-rest-high-document-update-by-query]]
|
||||
=== Update By Query API
|
||||
|
||||
[[java-rest-high-document-update-by-query-request]]
|
||||
==== Update By Query Request
|
||||
|
||||
A `UpdateByQueryRequest` can be used to update documents in an index.
|
||||
|
||||
It requires an existing index (or a set of indices) on which the update is to be performed.
|
||||
|
||||
The simplest form of a `UpdateByQueryRequest` looks like follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request]
|
||||
--------------------------------------------------
|
||||
<1> Creates the `UpdateByQueryRequest` on a set of indices.
|
||||
|
||||
By default version conflicts abort the `UpdateByQueryRequest` process but you can just count them by settings it to
|
||||
`proceed` in the request body
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-conflicts]
|
||||
--------------------------------------------------
|
||||
<1> Set `proceed` on version conflict
|
||||
|
||||
You can limit the documents by adding a type to the source or by adding a query.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-typeOrQuery]
|
||||
--------------------------------------------------
|
||||
<1> Only copy `doc` type
|
||||
<2> Only copy documents which have field `user` set to `kimchy`
|
||||
|
||||
It’s also possible to limit the number of processed documents by setting size.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-size]
|
||||
--------------------------------------------------
|
||||
<1> Only copy 10 documents
|
||||
|
||||
By default `UpdateByQueryRequest` uses batches of 1000. You can change the batch size with `setBatchSize`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-scrollSize]
|
||||
--------------------------------------------------
|
||||
<1> Use batches of 100 documents
|
||||
|
||||
Update by query can also use the ingest feature by specifying a `pipeline`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-pipeline]
|
||||
--------------------------------------------------
|
||||
<1> set pipeline to `my_pipeline`
|
||||
|
||||
`UpdateByQueryRequest` also supports a `script` that modifies the document. The following example illustrates that.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-script]
|
||||
--------------------------------------------------
|
||||
<1> `setScript` to increment the `likes` field on all documents with user `kimchy`.
|
||||
|
||||
`UpdateByQueryRequest` also helps in automatically parallelizing using `sliced-scroll` to
|
||||
slice on `_uid`. Use `setSlices` to specify the number of slices to use.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-slices]
|
||||
--------------------------------------------------
|
||||
<1> set number of slices to use
|
||||
|
||||
`UpdateByQueryRequest` uses the `scroll` parameter to control how long it keeps the "search context" alive.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-scroll]
|
||||
--------------------------------------------------
|
||||
<1> set scroll time
|
||||
|
||||
If you provide routing then the routing is copied to the scroll query, limiting the process to the shards that match
|
||||
that routing value.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-routing]
|
||||
--------------------------------------------------
|
||||
<1> set routing
|
||||
|
||||
|
||||
==== Optional arguments
|
||||
In addition to the options above the following arguments can optionally be also provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for the update by query request to be performed as a `TimeValue`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-refresh]
|
||||
--------------------------------------------------
|
||||
<1> Refresh index after calling update by query
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-indicesOptions]
|
||||
--------------------------------------------------
|
||||
<1> Set indices options
|
||||
|
||||
|
||||
[[java-rest-high-document-update-by-query-sync]]
|
||||
==== Synchronous Execution
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-document-update-by-query-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of an update by query request requires both the `UpdateByQueryRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `UpdateByQueryRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `BulkByScrollResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument and contains a list of individual results for each
|
||||
operation that was executed. Note that one or more operations might have
|
||||
failed while the others have been successfully executed.
|
||||
<2> Called when the whole `UpdateByQueryRequest` fails. In this case the raised
|
||||
exception is provided as an argument and no operation has been executed.
|
||||
|
||||
[[java-rest-high-document-update-by-query-execute-listener-response]]
|
||||
==== Update By Query Response
|
||||
|
||||
The returned `BulkByScrollResponse` contains information about the executed operations and
|
||||
allows to iterate over each result as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-response]
|
||||
--------------------------------------------------
|
||||
<1> Get total time taken
|
||||
<2> Check if the request timed out
|
||||
<3> Get total number of docs processed
|
||||
<4> Number of docs that were updated
|
||||
<5> Number of docs that were deleted
|
||||
<6> Number of batches that were executed
|
||||
<7> Number of skipped docs
|
||||
<8> Number of version conflicts
|
||||
<9> Number of times request had to retry bulk index operations
|
||||
<10> Number of times request had to retry search operations
|
||||
<11> The total time this request has throttled itself not including the current throttle time if it is currently sleeping
|
||||
<12> Remaining delay of any current throttle sleep or 0 if not sleeping
|
||||
<13> Failures during search phase
|
||||
<14> Failures during bulk index operation
|
|
@ -0,0 +1,83 @@
|
|||
[[java-rest-high-x-pack-ml-flush-job]]
|
||||
=== Flush Job API
|
||||
|
||||
The Flush Job API provides the ability to flush a {ml} job's
|
||||
datafeed in the cluster.
|
||||
It accepts a `FlushJobRequest` object and responds
|
||||
with a `FlushJobResponse` object.
|
||||
|
||||
[[java-rest-high-x-pack-ml-flush-job-request]]
|
||||
==== Flush Job Request
|
||||
|
||||
A `FlushJobRequest` object gets created with an existing non-null `jobId`.
|
||||
All other fields are optional for the request.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-request]
|
||||
--------------------------------------------------
|
||||
<1> Constructing a new request referencing an existing `jobId`
|
||||
|
||||
==== Optional Arguments
|
||||
|
||||
The following arguments are optional.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-request-options]
|
||||
--------------------------------------------------
|
||||
<1> Set request to calculate the interim results
|
||||
<2> Set the advanced time to flush to the particular time value
|
||||
<3> Set the start time for the range of buckets on which
|
||||
to calculate the interim results (requires `calc_interim` to be `true`)
|
||||
<4> Set the end time for the range of buckets on which
|
||||
to calculate interim results (requires `calc_interim` to be `true`)
|
||||
<5> Set the skip time to skip a particular time value
|
||||
|
||||
[[java-rest-high-x-pack-ml-flush-job-execution]]
|
||||
==== Execution
|
||||
|
||||
The request can be executed through the `MachineLearningClient` contained
|
||||
in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-x-pack-ml-flush-job-execution-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The request can also be executed asynchronously:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `FlushJobRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The method does not block and returns immediately. The passed `ActionListener` is used
|
||||
to notify the caller of completion. A typical `ActionListener` for `FlushJobResponse` may
|
||||
look like
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-listener]
|
||||
--------------------------------------------------
|
||||
<1> `onResponse` is called back when the action is completed successfully
|
||||
<2> `onFailure` is called back when some unexpected error occurs
|
||||
|
||||
[[java-rest-high-x-pack-ml-flush-job-response]]
|
||||
==== Flush Job Response
|
||||
|
||||
A `FlushJobResponse` contains an acknowledgement and an optional end date for the
|
||||
last finalized bucket
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-response]
|
||||
--------------------------------------------------
|
||||
<1> `isFlushed()` indicates if the job was successfully flushed or not.
|
||||
<2> `getLastFinalizedBucketEnd()` provides the timestamp
|
||||
(in milliseconds-since-the-epoch) of the end of the last bucket that was processed.
|
|
@ -0,0 +1,67 @@
|
|||
[[java-rest-high-x-pack-ml-get-job-stats]]
|
||||
=== Get Job Stats API
|
||||
|
||||
The Get Job Stats API provides the ability to get any number of
|
||||
{ml} job's statistics in the cluster.
|
||||
It accepts a `GetJobStatsRequest` object and responds
|
||||
with a `GetJobStatsResponse` object.
|
||||
|
||||
[[java-rest-high-x-pack-ml-get-job-stats-request]]
|
||||
==== Get Job Stats Request
|
||||
|
||||
A `GetJobsStatsRequest` object can have any number of `jobId`
|
||||
entries. However, they all must be non-null. An empty list is the same as
|
||||
requesting statistics for all jobs.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-stats-request]
|
||||
--------------------------------------------------
|
||||
<1> Constructing a new request referencing existing `jobIds`, can contain wildcards
|
||||
<2> Whether to ignore if a wildcard expression matches no jobs.
|
||||
(This includes `_all` string or when no jobs have been specified)
|
||||
|
||||
[[java-rest-high-x-pack-ml-get-job-stats-execution]]
|
||||
==== Execution
|
||||
|
||||
The request can be executed through the `MachineLearningClient` contained
|
||||
in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-stats-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-x-pack-ml-get-job-stats-execution-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The request can also be executed asynchronously:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-stats-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `GetJobsStatsRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The method does not block and returns immediately. The passed `ActionListener` is used
|
||||
to notify the caller of completion. A typical `ActionListener` for `GetJobsStatsResponse` may
|
||||
look like
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-stats-listener]
|
||||
--------------------------------------------------
|
||||
<1> `onResponse` is called back when the action is completed successfully
|
||||
<2> `onFailure` is called back when some unexpected error occurs
|
||||
|
||||
[[java-rest-high-x-pack-ml-get-job-stats-response]]
|
||||
==== Get Job Stats Response
|
||||
The returned `GetJobStatsResponse` contains the requested job statistics:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-stats-response]
|
||||
--------------------------------------------------
|
||||
<1> `getCount()` indicates the number of jobs statistics found
|
||||
<2> `getJobStats()` is the collection of {ml} `JobStats` objects found
|
|
@ -16,6 +16,7 @@ Multi-document APIs::
|
|||
* <<java-rest-high-document-bulk>>
|
||||
* <<java-rest-high-document-multi-get>>
|
||||
* <<java-rest-high-document-reindex>>
|
||||
* <<java-rest-high-document-update-by-query>>
|
||||
|
||||
include::document/index.asciidoc[]
|
||||
include::document/get.asciidoc[]
|
||||
|
@ -25,6 +26,7 @@ include::document/update.asciidoc[]
|
|||
include::document/bulk.asciidoc[]
|
||||
include::document/multi-get.asciidoc[]
|
||||
include::document/reindex.asciidoc[]
|
||||
include::document/update-by-query.asciidoc[]
|
||||
|
||||
== Search APIs
|
||||
|
||||
|
@ -211,6 +213,8 @@ The Java High Level REST Client supports the following Machine Learning APIs:
|
|||
* <<java-rest-high-x-pack-ml-delete-job>>
|
||||
* <<java-rest-high-x-pack-ml-open-job>>
|
||||
* <<java-rest-high-x-pack-ml-close-job>>
|
||||
* <<java-rest-high-x-pack-ml-flush-job>>
|
||||
* <<java-rest-high-x-pack-ml-get-job-stats>>
|
||||
* <<java-rest-high-x-pack-ml-get-buckets>>
|
||||
* <<java-rest-high-x-pack-ml-get-records>>
|
||||
|
||||
|
@ -219,6 +223,8 @@ include::ml/get-job.asciidoc[]
|
|||
include::ml/delete-job.asciidoc[]
|
||||
include::ml/open-job.asciidoc[]
|
||||
include::ml/close-job.asciidoc[]
|
||||
include::ml/flush-job.asciidoc[]
|
||||
include::ml/get-job-stats.asciidoc[]
|
||||
include::ml/get-buckets.asciidoc[]
|
||||
include::ml/get-records.asciidoc[]
|
||||
|
||||
|
|
|
@ -15,13 +15,14 @@ Which looks like:
|
|||
[source,txt]
|
||||
--------------------------------------------------
|
||||
node-0 analyze 0 0 0
|
||||
...
|
||||
node-0 fetch_shard_started 0 0 0
|
||||
node-0 fetch_shard_store 0 0 0
|
||||
node-0 flush 0 0 0
|
||||
...
|
||||
node-0 write 0 0 0
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/\.\.\./(node-0 .+ 0 0 0\n)+/]
|
||||
// TESTRESPONSE[s/\.\.\./(node-0 \\S+ 0 0 0\n)*/]
|
||||
// TESTRESPONSE[s/\d+/\\d+/ _cat]
|
||||
// The substitutions do two things:
|
||||
// 1. Expect any number of extra thread pools. This allows us to only list a
|
||||
|
@ -45,6 +46,7 @@ The second column is the thread pool name
|
|||
--------------------------------------------------
|
||||
name
|
||||
analyze
|
||||
ccr (default distro only)
|
||||
fetch_shard_started
|
||||
fetch_shard_store
|
||||
flush
|
||||
|
@ -81,6 +83,7 @@ active queue rejected
|
|||
0 0 0
|
||||
0 0 0
|
||||
0 0 0
|
||||
0 0 0
|
||||
1 0 0
|
||||
0 0 0
|
||||
0 0 0
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
[role="xpack"]
|
||||
[[xpack-commands]]
|
||||
= {xpack} Commands
|
||||
[[commands]]
|
||||
= Command line tools
|
||||
|
||||
[partintro]
|
||||
--
|
||||
|
||||
{xpack} includes commands that help you configure security:
|
||||
{es} provides the following tools for configuring security and performing other
|
||||
tasks from the command line:
|
||||
|
||||
* <<certgen>>
|
||||
* <<certutil>>
|
||||
|
|
|
@ -63,12 +63,6 @@ corruption is detected, it will prevent the shard from being opened. Accepts:
|
|||
Check for both physical and logical corruption. This is much more
|
||||
expensive in terms of CPU and memory usage.
|
||||
|
||||
`fix`::
|
||||
|
||||
Check for both physical and logical corruption. Segments that were reported
|
||||
as corrupted will be automatically removed. This option *may result in data loss*.
|
||||
Use with extreme caution!
|
||||
|
||||
WARNING: Expert only. Checking shards may take a lot of time on large indices.
|
||||
--
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ include::sql/index.asciidoc[]
|
|||
|
||||
include::monitoring/index.asciidoc[]
|
||||
|
||||
include::{xes-repo-dir}/rollup/index.asciidoc[]
|
||||
include::rollup/index.asciidoc[]
|
||||
|
||||
include::rest-api/index.asciidoc[]
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@ Elasticsearch 6.x in order to be readable by Elasticsearch 7.x.
|
|||
* <<breaking_70_scripting_changes>>
|
||||
* <<breaking_70_snapshotstats_changes>>
|
||||
* <<breaking_70_restclient_changes>>
|
||||
* <<breaking_70_low_level_restclient_changes>>
|
||||
|
||||
include::migrate_7_0/aggregations.asciidoc[]
|
||||
include::migrate_7_0/analysis.asciidoc[]
|
||||
|
@ -53,4 +54,5 @@ include::migrate_7_0/java.asciidoc[]
|
|||
include::migrate_7_0/settings.asciidoc[]
|
||||
include::migrate_7_0/scripting.asciidoc[]
|
||||
include::migrate_7_0/snapshotstats.asciidoc[]
|
||||
include::migrate_7_0/restclient.asciidoc[]
|
||||
include::migrate_7_0/restclient.asciidoc[]
|
||||
include::migrate_7_0/low_level_restclient.asciidoc[]
|
||||
|
|
|
@ -78,3 +78,7 @@ The parent circuit breaker defines a new setting `indices.breaker.total.use_real
|
|||
heap memory instead of only considering the reserved memory by child circuit breakers. When this
|
||||
setting is `true`, the default parent breaker limit also changes from 70% to 95% of the JVM heap size.
|
||||
The previous behavior can be restored by setting `indices.breaker.total.use_real_memory` to `false`.
|
||||
|
||||
==== `fix` value for `index.shard.check_on_startup` is removed
|
||||
|
||||
Deprecated option value `fix` for setting `index.shard.check_on_startup` is not supported.
|
|
@ -0,0 +1,14 @@
|
|||
[[breaking_70_low_level_restclient_changes]]
|
||||
=== Low-level REST client changes
|
||||
|
||||
==== Deprecated flavors of performRequest have been removed
|
||||
|
||||
We deprecated the flavors of `performRequest` and `performRequestAsync` that
|
||||
do not take `Request` objects in 6.4.0 in favor of the flavors that take
|
||||
`Request` objects because those methods can be extended without breaking
|
||||
backwards compatibility.
|
||||
|
||||
==== Removed setHosts
|
||||
|
||||
We deprecated `setHosts` in 6.4.0 in favor of `setNodes` because it supports
|
||||
host metadata used by the `NodeSelector`.
|
|
@ -41,7 +41,7 @@ PUT _xpack/ml/anomaly_detectors/farequote
|
|||
}
|
||||
----------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:farequote_data]
|
||||
// TEST[skip:setup:farequote_data]
|
||||
|
||||
In this example, the `airline`, `responsetime`, and `time` fields are
|
||||
aggregations.
|
||||
|
@ -90,7 +90,7 @@ PUT _xpack/ml/datafeeds/datafeed-farequote
|
|||
}
|
||||
----------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:farequote_job]
|
||||
// TEST[skip:setup:farequote_job]
|
||||
|
||||
In this example, the aggregations have names that match the fields that they
|
||||
operate on. That is to say, the `max` aggregation is named `time` and its
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-calendar-resource]]
|
||||
=== Calendar Resources
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-close-job]]
|
||||
=== Close Jobs API
|
||||
++++
|
||||
|
@ -80,7 +81,7 @@ The following example closes the `total-requests` job:
|
|||
POST _xpack/ml/anomaly_detectors/total-requests/_close
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:server_metrics_openjob]
|
||||
// TEST[skip:setup:server_metrics_openjob]
|
||||
|
||||
When the job is closed, you receive the following results:
|
||||
[source,js]
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-datafeed-resource]]
|
||||
=== {dfeed-cap} Resources
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-delete-calendar-event]]
|
||||
=== Delete Events from Calendar API
|
||||
++++
|
||||
|
@ -44,7 +45,7 @@ calendar:
|
|||
DELETE _xpack/ml/calendars/planned-outages/events/LS8LJGEBMTCMA-qz49st
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[catch:missing]
|
||||
// TEST[skip:catch:missing]
|
||||
|
||||
When the event is removed, you receive the following results:
|
||||
[source,js]
|
||||
|
@ -53,4 +54,3 @@ When the event is removed, you receive the following results:
|
|||
"acknowledged": true
|
||||
}
|
||||
----
|
||||
// NOTCONSOLE
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-delete-calendar-job]]
|
||||
=== Delete Jobs from Calendar API
|
||||
++++
|
||||
|
@ -38,7 +39,7 @@ calendar and `total-requests` job:
|
|||
DELETE _xpack/ml/calendars/planned-outages/jobs/total-requests
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:calendar_outages_addjob]
|
||||
// TEST[skip:setup:calendar_outages_addjob]
|
||||
|
||||
When the job is removed from the calendar, you receive the following
|
||||
results:
|
||||
|
@ -50,4 +51,4 @@ results:
|
|||
"job_ids": []
|
||||
}
|
||||
----
|
||||
//TESTRESPONSE
|
||||
// TESTRESPONSE
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-delete-calendar]]
|
||||
=== Delete Calendar API
|
||||
++++
|
||||
|
@ -40,7 +41,7 @@ The following example deletes the `planned-outages` calendar:
|
|||
DELETE _xpack/ml/calendars/planned-outages
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:calendar_outages]
|
||||
// TEST[skip:setup:calendar_outages]
|
||||
|
||||
When the calendar is deleted, you receive the following results:
|
||||
[source,js]
|
||||
|
@ -49,4 +50,4 @@ When the calendar is deleted, you receive the following results:
|
|||
"acknowledged": true
|
||||
}
|
||||
----
|
||||
//TESTRESPONSE
|
||||
// TESTRESPONSE
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-delete-datafeed]]
|
||||
=== Delete {dfeeds-cap} API
|
||||
++++
|
||||
|
@ -47,7 +48,7 @@ The following example deletes the `datafeed-total-requests` {dfeed}:
|
|||
DELETE _xpack/ml/datafeeds/datafeed-total-requests
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:server_metrics_datafeed]
|
||||
// TEST[skip:setup:server_metrics_datafeed]
|
||||
|
||||
When the {dfeed} is deleted, you receive the following results:
|
||||
[source,js]
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-delete-filter]]
|
||||
=== Delete Filter API
|
||||
++++
|
||||
|
@ -41,7 +42,7 @@ The following example deletes the `safe_domains` filter:
|
|||
DELETE _xpack/ml/filters/safe_domains
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:ml_filter_safe_domains]
|
||||
// TEST[skip:setup:ml_filter_safe_domains]
|
||||
|
||||
When the filter is deleted, you receive the following results:
|
||||
[source,js]
|
||||
|
@ -50,4 +51,4 @@ When the filter is deleted, you receive the following results:
|
|||
"acknowledged": true
|
||||
}
|
||||
----
|
||||
//TESTRESPONSE
|
||||
// TESTRESPONSE
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-delete-job]]
|
||||
=== Delete Jobs API
|
||||
++++
|
||||
|
@ -56,7 +57,7 @@ The following example deletes the `total-requests` job:
|
|||
DELETE _xpack/ml/anomaly_detectors/total-requests
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:server_metrics_job]
|
||||
// TEST[skip:setup:server_metrics_job]
|
||||
|
||||
When the job is deleted, you receive the following results:
|
||||
[source,js]
|
||||
|
@ -65,4 +66,4 @@ When the job is deleted, you receive the following results:
|
|||
"acknowledged": true
|
||||
}
|
||||
----
|
||||
// TESTRESPONSE
|
||||
// TESTRESPONSE
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-delete-snapshot]]
|
||||
=== Delete Model Snapshots API
|
||||
++++
|
||||
|
@ -32,7 +33,6 @@ the `model_snapshot_id` in the results from the get jobs API.
|
|||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see {xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
==== Examples
|
||||
|
@ -53,3 +53,4 @@ When the snapshot is deleted, you receive the following results:
|
|||
"acknowledged": true
|
||||
}
|
||||
----
|
||||
// TESTRESPONSE
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-event-resource]]
|
||||
=== Scheduled Event Resources
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-filter-resource]]
|
||||
=== Filter Resources
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-flush-job]]
|
||||
=== Flush Jobs API
|
||||
++++
|
||||
|
@ -74,7 +75,7 @@ POST _xpack/ml/anomaly_detectors/total-requests/_flush
|
|||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:server_metrics_openjob]
|
||||
// TEST[skip:setup:server_metrics_openjob]
|
||||
|
||||
When the operation succeeds, you receive the following results:
|
||||
[source,js]
|
||||
|
@ -84,7 +85,7 @@ When the operation succeeds, you receive the following results:
|
|||
"last_finalized_bucket_end": 1455234900000
|
||||
}
|
||||
----
|
||||
// TESTRESPONSE[s/"last_finalized_bucket_end": 1455234900000/"last_finalized_bucket_end": $body.last_finalized_bucket_end/]
|
||||
//TESTRESPONSE[s/"last_finalized_bucket_end": 1455234900000/"last_finalized_bucket_end": $body.last_finalized_bucket_end/]
|
||||
|
||||
The `last_finalized_bucket_end` provides the timestamp (in
|
||||
milliseconds-since-the-epoch) of the end of the last bucket that was processed.
|
||||
|
@ -101,7 +102,7 @@ POST _xpack/ml/anomaly_detectors/total-requests/_flush
|
|||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:server_metrics_openjob]
|
||||
// TEST[skip:setup:server_metrics_openjob]
|
||||
|
||||
When the operation succeeds, you receive the following results:
|
||||
[source,js]
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-forecast]]
|
||||
=== Forecast Jobs API
|
||||
++++
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-get-bucket]]
|
||||
=== Get Buckets API
|
||||
++++
|
||||
|
@ -81,7 +82,6 @@ that stores the results. The `machine_learning_admin` and `machine_learning_user
|
|||
roles provide these privileges. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges] and
|
||||
{xpack-ref}/built-in-roles.html[Built-in Roles].
|
||||
//<<security-privileges>> and <<built-in-roles>>.
|
||||
|
||||
|
||||
==== Examples
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-get-calendar-event]]
|
||||
=== Get Scheduled Events API
|
||||
++++
|
||||
|
@ -66,7 +67,7 @@ The following example gets information about the scheduled events in the
|
|||
GET _xpack/ml/calendars/planned-outages/events
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:calendar_outages_addevent]
|
||||
// TEST[skip:setup:calendar_outages_addevent]
|
||||
|
||||
The API returns the following results:
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-get-calendar]]
|
||||
=== Get Calendars API
|
||||
++++
|
||||
|
@ -62,7 +63,7 @@ calendar:
|
|||
GET _xpack/ml/calendars/planned-outages
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:calendar_outages_addjob]
|
||||
// TEST[skip:setup:calendar_outages_addjob]
|
||||
|
||||
The API returns the following results:
|
||||
[source,js]
|
||||
|
@ -79,4 +80,4 @@ The API returns the following results:
|
|||
]
|
||||
}
|
||||
----
|
||||
//TESTRESPONSE
|
||||
// TESTRESPONSE
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-get-category]]
|
||||
=== Get Categories API
|
||||
++++
|
||||
|
@ -18,7 +19,6 @@ Retrieves job results for one or more categories.
|
|||
|
||||
For more information about categories, see
|
||||
{xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages].
|
||||
//<<ml-configuring-categories>>.
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
|
@ -56,7 +56,6 @@ that stores the results. The `machine_learning_admin` and `machine_learning_user
|
|||
roles provide these privileges. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges] and
|
||||
{xpack-ref}/built-in-roles.html[Built-in Roles].
|
||||
//<<security-privileges>> and <<built-in-roles>>.
|
||||
|
||||
|
||||
==== Examples
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-get-datafeed-stats]]
|
||||
=== Get {dfeed-cap} Statistics API
|
||||
++++
|
||||
|
@ -66,7 +67,7 @@ The following example gets usage information for the
|
|||
GET _xpack/ml/datafeeds/datafeed-total-requests/_stats
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:server_metrics_startdf]
|
||||
// TEST[skip:setup:server_metrics_startdf]
|
||||
|
||||
The API returns the following results:
|
||||
[source,js]
|
||||
|
@ -97,4 +98,4 @@ The API returns the following results:
|
|||
// TESTRESPONSE[s/"node-0"/$body.$_path/]
|
||||
// TESTRESPONSE[s/"hoXMLZB0RWKfR9UPPUCxXX"/$body.$_path/]
|
||||
// TESTRESPONSE[s/"127.0.0.1:9300"/$body.$_path/]
|
||||
// TESTRESPONSE[s/"17179869184"/$body.datafeeds.0.node.attributes.ml\\.machine_memory/]
|
||||
// TESTRESPONSE[s/"17179869184"/$body.datafeeds.0.node.attributes.ml\\.machine_memory/]
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-get-datafeed]]
|
||||
=== Get {dfeeds-cap} API
|
||||
++++
|
||||
|
@ -60,7 +61,7 @@ The following example gets configuration information for the
|
|||
GET _xpack/ml/datafeeds/datafeed-total-requests
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:server_metrics_datafeed]
|
||||
// TEST[skip:setup:server_metrics_datafeed]
|
||||
|
||||
The API returns the following results:
|
||||
[source,js]
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-get-filter]]
|
||||
=== Get Filters API
|
||||
++++
|
||||
|
@ -62,7 +63,7 @@ filter:
|
|||
GET _xpack/ml/filters/safe_domains
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:ml_filter_safe_domains]
|
||||
// TEST[skip:setup:ml_filter_safe_domains]
|
||||
|
||||
The API returns the following results:
|
||||
[source,js]
|
||||
|
@ -81,4 +82,4 @@ The API returns the following results:
|
|||
]
|
||||
}
|
||||
----
|
||||
//TESTRESPONSE
|
||||
// TESTRESPONSE
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-get-influencer]]
|
||||
=== Get Influencers API
|
||||
++++
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-get-job-stats]]
|
||||
=== Get Job Statistics API
|
||||
++++
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-get-job]]
|
||||
=== Get Jobs API
|
||||
++++
|
||||
|
@ -59,7 +60,7 @@ The following example gets configuration information for the `total-requests` jo
|
|||
GET _xpack/ml/anomaly_detectors/total-requests
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:server_metrics_job]
|
||||
// TEST[skip:setup:server_metrics_job]
|
||||
|
||||
The API returns the following results:
|
||||
[source,js]
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-get-overall-buckets]]
|
||||
=== Get Overall Buckets API
|
||||
++++
|
||||
|
@ -93,7 +94,6 @@ that stores the results. The `machine_learning_admin` and `machine_learning_user
|
|||
roles provide these privileges. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges] and
|
||||
{xpack-ref}/built-in-roles.html[Built-in Roles].
|
||||
//<<security-privileges>> and <<built-in-roles>>.
|
||||
|
||||
|
||||
==== Examples
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-get-record]]
|
||||
=== Get Records API
|
||||
++++
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-get-snapshot]]
|
||||
=== Get Model Snapshots API
|
||||
++++
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-jobstats]]
|
||||
=== Job Statistics
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-job-resource]]
|
||||
=== Job Resources
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-apis]]
|
||||
== Machine Learning APIs
|
||||
|
||||
|
@ -70,57 +71,57 @@ machine learning APIs and in advanced job configuration options in Kibana.
|
|||
* <<ml-get-record,Get records>>
|
||||
|
||||
//ADD
|
||||
include::ml/post-calendar-event.asciidoc[]
|
||||
include::ml/put-calendar-job.asciidoc[]
|
||||
include::post-calendar-event.asciidoc[]
|
||||
include::put-calendar-job.asciidoc[]
|
||||
//CLOSE
|
||||
include::ml/close-job.asciidoc[]
|
||||
include::close-job.asciidoc[]
|
||||
//CREATE
|
||||
include::ml/put-calendar.asciidoc[]
|
||||
include::ml/put-datafeed.asciidoc[]
|
||||
include::ml/put-filter.asciidoc[]
|
||||
include::ml/put-job.asciidoc[]
|
||||
include::put-calendar.asciidoc[]
|
||||
include::put-datafeed.asciidoc[]
|
||||
include::put-filter.asciidoc[]
|
||||
include::put-job.asciidoc[]
|
||||
//DELETE
|
||||
include::ml/delete-calendar.asciidoc[]
|
||||
include::ml/delete-datafeed.asciidoc[]
|
||||
include::ml/delete-calendar-event.asciidoc[]
|
||||
include::ml/delete-filter.asciidoc[]
|
||||
include::ml/delete-job.asciidoc[]
|
||||
include::ml/delete-calendar-job.asciidoc[]
|
||||
include::ml/delete-snapshot.asciidoc[]
|
||||
include::delete-calendar.asciidoc[]
|
||||
include::delete-datafeed.asciidoc[]
|
||||
include::delete-calendar-event.asciidoc[]
|
||||
include::delete-filter.asciidoc[]
|
||||
include::delete-job.asciidoc[]
|
||||
include::delete-calendar-job.asciidoc[]
|
||||
include::delete-snapshot.asciidoc[]
|
||||
//FLUSH
|
||||
include::ml/flush-job.asciidoc[]
|
||||
include::flush-job.asciidoc[]
|
||||
//FORECAST
|
||||
include::ml/forecast.asciidoc[]
|
||||
include::forecast.asciidoc[]
|
||||
//GET
|
||||
include::ml/get-calendar.asciidoc[]
|
||||
include::ml/get-bucket.asciidoc[]
|
||||
include::ml/get-overall-buckets.asciidoc[]
|
||||
include::ml/get-category.asciidoc[]
|
||||
include::ml/get-datafeed.asciidoc[]
|
||||
include::ml/get-datafeed-stats.asciidoc[]
|
||||
include::ml/get-influencer.asciidoc[]
|
||||
include::ml/get-job.asciidoc[]
|
||||
include::ml/get-job-stats.asciidoc[]
|
||||
include::ml/get-snapshot.asciidoc[]
|
||||
include::ml/get-calendar-event.asciidoc[]
|
||||
include::ml/get-filter.asciidoc[]
|
||||
include::ml/get-record.asciidoc[]
|
||||
include::get-calendar.asciidoc[]
|
||||
include::get-bucket.asciidoc[]
|
||||
include::get-overall-buckets.asciidoc[]
|
||||
include::get-category.asciidoc[]
|
||||
include::get-datafeed.asciidoc[]
|
||||
include::get-datafeed-stats.asciidoc[]
|
||||
include::get-influencer.asciidoc[]
|
||||
include::get-job.asciidoc[]
|
||||
include::get-job-stats.asciidoc[]
|
||||
include::get-snapshot.asciidoc[]
|
||||
include::get-calendar-event.asciidoc[]
|
||||
include::get-filter.asciidoc[]
|
||||
include::get-record.asciidoc[]
|
||||
//OPEN
|
||||
include::ml/open-job.asciidoc[]
|
||||
include::open-job.asciidoc[]
|
||||
//POST
|
||||
include::ml/post-data.asciidoc[]
|
||||
include::post-data.asciidoc[]
|
||||
//PREVIEW
|
||||
include::ml/preview-datafeed.asciidoc[]
|
||||
include::preview-datafeed.asciidoc[]
|
||||
//REVERT
|
||||
include::ml/revert-snapshot.asciidoc[]
|
||||
include::revert-snapshot.asciidoc[]
|
||||
//START/STOP
|
||||
include::ml/start-datafeed.asciidoc[]
|
||||
include::ml/stop-datafeed.asciidoc[]
|
||||
include::start-datafeed.asciidoc[]
|
||||
include::stop-datafeed.asciidoc[]
|
||||
//UPDATE
|
||||
include::ml/update-datafeed.asciidoc[]
|
||||
include::ml/update-filter.asciidoc[]
|
||||
include::ml/update-job.asciidoc[]
|
||||
include::ml/update-snapshot.asciidoc[]
|
||||
include::update-datafeed.asciidoc[]
|
||||
include::update-filter.asciidoc[]
|
||||
include::update-job.asciidoc[]
|
||||
include::update-snapshot.asciidoc[]
|
||||
//VALIDATE
|
||||
//include::ml/validate-detector.asciidoc[]
|
||||
//include::ml/validate-job.asciidoc[]
|
||||
//include::validate-detector.asciidoc[]
|
||||
//include::validate-job.asciidoc[]
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-open-job]]
|
||||
=== Open Jobs API
|
||||
++++
|
||||
|
@ -56,7 +57,7 @@ POST _xpack/ml/anomaly_detectors/total-requests/_open
|
|||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:server_metrics_job]
|
||||
// TEST[skip:setup:server_metrics_job]
|
||||
|
||||
When the job opens, you receive the following results:
|
||||
[source,js]
|
||||
|
@ -65,5 +66,4 @@ When the job opens, you receive the following results:
|
|||
"opened": true
|
||||
}
|
||||
----
|
||||
//CONSOLE
|
||||
// TESTRESPONSE
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-post-calendar-event]]
|
||||
=== Add Events to Calendar API
|
||||
++++
|
||||
|
@ -52,7 +53,7 @@ POST _xpack/ml/calendars/planned-outages/events
|
|||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:calendar_outages_addjob]
|
||||
// TEST[skip:setup:calendar_outages_addjob]
|
||||
|
||||
The API returns the following results:
|
||||
|
||||
|
@ -81,7 +82,7 @@ The API returns the following results:
|
|||
]
|
||||
}
|
||||
----
|
||||
//TESTRESPONSE
|
||||
// TESTRESPONSE
|
||||
|
||||
For more information about these properties, see
|
||||
<<ml-event-resource,Scheduled Event Resources>>.
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-post-data]]
|
||||
=== Post Data to Jobs API
|
||||
++++
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-preview-datafeed]]
|
||||
=== Preview {dfeeds-cap} API
|
||||
++++
|
||||
|
@ -53,7 +54,7 @@ The following example obtains a preview of the `datafeed-farequote` {dfeed}:
|
|||
GET _xpack/ml/datafeeds/datafeed-farequote/_preview
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:farequote_datafeed]
|
||||
// TEST[skip:setup:farequote_datafeed]
|
||||
|
||||
The data that is returned for this example is as follows:
|
||||
[source,js]
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-put-calendar-job]]
|
||||
=== Add Jobs to Calendar API
|
||||
++++
|
||||
|
@ -38,7 +39,7 @@ The following example associates the `planned-outages` calendar with the
|
|||
PUT _xpack/ml/calendars/planned-outages/jobs/total-requests
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:calendar_outages_openjob]
|
||||
// TEST[skip:setup:calendar_outages_openjob]
|
||||
|
||||
The API returns the following results:
|
||||
|
||||
|
@ -51,4 +52,4 @@ The API returns the following results:
|
|||
]
|
||||
}
|
||||
----
|
||||
//TESTRESPONSE
|
||||
// TESTRESPONSE
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue