Moves Java code to fit x-pack style structure

This change moves the Java code from `/java/apps/engine-node` to `/elasticsearch` so it matches the structure required by X-Pack. This will make it easier to get the gradle build working correct and building the full Pack zip.

Original commit: elastic/x-pack-elasticsearch@2fd6539e85
This commit is contained in:
Colin Goodheart-Smithe 2016-11-18 15:21:39 +00:00
parent 85b5220c59
commit 06df439db0
461 changed files with 60365 additions and 5 deletions

44
elasticsearch/.gitignore vendored Normal file
View File

@ -0,0 +1,44 @@
# intellij files
.idea/
*.iml
*.ipr
*.iws
build-idea/
# eclipse files
.project
.classpath
.settings
build-eclipse/
# netbeans files
nb-configuration.xml
nbactions.xml
# gradle stuff
.gradle/
build/
# gradle wrapper
/gradle/
gradlew
gradlew.bat
# maven stuff (to be removed when trunk becomes 4.x)
*-execution-hints.log
target/
dependency-reduced-pom.xml
# testing stuff
**/.local*
.vagrant/
# osx stuff
.DS_Store
# needed in case docs build is run...maybe we can configure doc build to generate files under build?
html_docs
# random old stuff that we should look at the necessity of...
/tmp/
backwards/

View File

@ -0,0 +1,3 @@
= Elasticsearch Prelert Plugin
Behavioral Analytics for Elasticsearch

View File

@ -0,0 +1,91 @@
import org.elasticsearch.gradle.precommit.PrecommitTasks
import org.gradle.internal.os.OperatingSystem
import java.nio.file.Files
import java.nio.file.Paths
import java.nio.file.StandardCopyOption
boolean isWindows = OperatingSystem.current().isWindows()
boolean isLinux = OperatingSystem.current().isLinux()
boolean isMacOsX = OperatingSystem.current().isMacOsX()
project.ext.nasDirectory = isWindows ? "\\\\prelert-nas\\builds\\6.5.0\\" :
(isMacOsX ? "/Volumes/builds/6.5.0/" : "/export/builds/6.5.0/")
// norelease: replace with something else when we become part of x-plugins
project.ext.nasExtension = '_' + (System.getenv()['GIT_COMMIT'] ?: 'xxxxxxxxxxxxxx').substring(0, 14) +
(isWindows ? "_windows-x86_64.zip" : (isMacOsX ? "_darwin-x86_64.zip" :
(isLinux ? "_linux-x86_64.zip" : "_sunos-x86_64.zip")))
apply plugin: 'elasticsearch.esplugin'
esplugin {
name 'prelert'
description 'Prelert Plugin'
classname 'org.elasticsearch.xpack.prelert.PrelertPlugin'
}
version = "${elasticsearchVersion}"
// We need to enable this at some point
thirdPartyAudit.enabled = false
dependencies {
compile group: 'net.sf.supercsv', name: 'super-csv', version:"${supercsvVersion}"
testCompile group: 'org.ini4j', name: 'ini4j', version:'0.5.2'
}
test {
exclude '**/*NoBootstrapTests.class'
}
task noBootstrapTest(type: Test,
dependsOn: test.dependsOn) {
classpath = project.test.classpath
testClassesDir = project.test.testClassesDir
include '**/*NoBootstrapTests.class'
}
check.dependsOn noBootstrapTest
noBootstrapTest.mustRunAfter test
integTest {
cluster {
//setting 'useNativeProcess', 'true'
distribution = 'zip'
}
}
integTest.mustRunAfter noBootstrapTest
bundlePlugin {
from("${rootDir}/cppdistribution") {
into '.'
// Don't copy Windows import libraries
exclude "**/*.lib"
// Don't copy the test support library
exclude "**/libPreTest.*"
includeEmptyDirs = false
}
}
// norelease: this won't be needed when the pluginAll task below is removed
class SimpleCopy extends DefaultTask {
String sourceFile;
String destFile;
@TaskAction
def copy() {
Files.copy(Paths.get(sourceFile), Paths.get(destFile), StandardCopyOption.REPLACE_EXISTING)
}
}
// norelease: by the time we move to x-plugins we cannot use the Prelert NAS at all
task pluginAll(type: SimpleCopy) {
// This doesn't use a Copy task because that builds hashes for a huge number of files on the NAS
String zipFile = "${esplugin.name}-${elasticsearchVersion}.zip"
sourceFile = "${projectDir}/build/distributions/" + zipFile
destFile = project.ext.nasDirectory + zipFile.replace('.zip', project.ext.nasExtension)
}
pluginAll.dependsOn check

View File

@ -0,0 +1 @@
supercsvVersion=2.4.0

View File

@ -0,0 +1 @@
017f8708c929029dde48bc298deaf3c7ae2452d3

View File

@ -0,0 +1,203 @@
/*
* Apache License
* Version 2.0, January 2004
* http://www.apache.org/licenses/
*
* TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
*
* 1. Definitions.
*
* "License" shall mean the terms and conditions for use, reproduction,
* and distribution as defined by Sections 1 through 9 of this document.
*
* "Licensor" shall mean the copyright owner or entity authorized by
* the copyright owner that is granting the License.
*
* "Legal Entity" shall mean the union of the acting entity and all
* other entities that control, are controlled by, or are under common
* control with that entity. For the purposes of this definition,
* "control" means (i) the power, direct or indirect, to cause the
* direction or management of such entity, whether by contract or
* otherwise, or (ii) ownership of fifty percent (50%) or more of the
* outstanding shares, or (iii) beneficial ownership of such entity.
*
* "You" (or "Your") shall mean an individual or Legal Entity
* exercising permissions granted by this License.
*
* "Source" form shall mean the preferred form for making modifications,
* including but not limited to software source code, documentation
* source, and configuration files.
*
* "Object" form shall mean any form resulting from mechanical
* transformation or translation of a Source form, including but
* not limited to compiled object code, generated documentation,
* and conversions to other media types.
*
* "Work" shall mean the work of authorship, whether in Source or
* Object form, made available under the License, as indicated by a
* copyright notice that is included in or attached to the work
* (an example is provided in the Appendix below).
*
* "Derivative Works" shall mean any work, whether in Source or Object
* form, that is based on (or derived from) the Work and for which the
* editorial revisions, annotations, elaborations, or other modifications
* represent, as a whole, an original work of authorship. For the purposes
* of this License, Derivative Works shall not include works that remain
* separable from, or merely link (or bind by name) to the interfaces of,
* the Work and Derivative Works thereof.
*
* "Contribution" shall mean any work of authorship, including
* the original version of the Work and any modifications or additions
* to that Work or Derivative Works thereof, that is intentionally
* submitted to Licensor for inclusion in the Work by the copyright owner
* or by an individual or Legal Entity authorized to submit on behalf of
* the copyright owner. For the purposes of this definition, "submitted"
* means any form of electronic, verbal, or written communication sent
* to the Licensor or its representatives, including but not limited to
* communication on electronic mailing lists, source code control systems,
* and issue tracking systems that are managed by, or on behalf of, the
* Licensor for the purpose of discussing and improving the Work, but
* excluding communication that is conspicuously marked or otherwise
* designated in writing by the copyright owner as "Not a Contribution."
*
* "Contributor" shall mean Licensor and any individual or Legal Entity
* on behalf of whom a Contribution has been received by Licensor and
* subsequently incorporated within the Work.
*
* 2. Grant of Copyright License. Subject to the terms and conditions of
* this License, each Contributor hereby grants to You a perpetual,
* worldwide, non-exclusive, no-charge, royalty-free, irrevocable
* copyright license to reproduce, prepare Derivative Works of,
* publicly display, publicly perform, sublicense, and distribute the
* Work and such Derivative Works in Source or Object form.
*
* 3. Grant of Patent License. Subject to the terms and conditions of
* this License, each Contributor hereby grants to You a perpetual,
* worldwide, non-exclusive, no-charge, royalty-free, irrevocable
* (except as stated in this section) patent license to make, have made,
* use, offer to sell, sell, import, and otherwise transfer the Work,
* where such license applies only to those patent claims licensable
* by such Contributor that are necessarily infringed by their
* Contribution(s) alone or by combination of their Contribution(s)
* with the Work to which such Contribution(s) was submitted. If You
* institute patent litigation against any entity (including a
* cross-claim or counterclaim in a lawsuit) alleging that the Work
* or a Contribution incorporated within the Work constitutes direct
* or contributory patent infringement, then any patent licenses
* granted to You under this License for that Work shall terminate
* as of the date such litigation is filed.
*
* 4. Redistribution. You may reproduce and distribute copies of the
* Work or Derivative Works thereof in any medium, with or without
* modifications, and in Source or Object form, provided that You
* meet the following conditions:
*
* (a) You must give any other recipients of the Work or
* Derivative Works a copy of this License; and
*
* (b) You must cause any modified files to carry prominent notices
* stating that You changed the files; and
*
* (c) You must retain, in the Source form of any Derivative Works
* that You distribute, all copyright, patent, trademark, and
* attribution notices from the Source form of the Work,
* excluding those notices that do not pertain to any part of
* the Derivative Works; and
*
* (d) If the Work includes a "NOTICE" text file as part of its
* distribution, then any Derivative Works that You distribute must
* include a readable copy of the attribution notices contained
* within such NOTICE file, excluding those notices that do not
* pertain to any part of the Derivative Works, in at least one
* of the following places: within a NOTICE text file distributed
* as part of the Derivative Works; within the Source form or
* documentation, if provided along with the Derivative Works; or,
* within a display generated by the Derivative Works, if and
* wherever such third-party notices normally appear. The contents
* of the NOTICE file are for informational purposes only and
* do not modify the License. You may add Your own attribution
* notices within Derivative Works that You distribute, alongside
* or as an addendum to the NOTICE text from the Work, provided
* that such additional attribution notices cannot be construed
* as modifying the License.
*
* You may add Your own copyright statement to Your modifications and
* may provide additional or different license terms and conditions
* for use, reproduction, or distribution of Your modifications, or
* for any such Derivative Works as a whole, provided Your use,
* reproduction, and distribution of the Work otherwise complies with
* the conditions stated in this License.
*
* 5. Submission of Contributions. Unless You explicitly state otherwise,
* any Contribution intentionally submitted for inclusion in the Work
* by You to the Licensor shall be under the terms and conditions of
* this License, without any additional terms or conditions.
* Notwithstanding the above, nothing herein shall supersede or modify
* the terms of any separate license agreement you may have executed
* with Licensor regarding such Contributions.
*
* 6. Trademarks. This License does not grant permission to use the trade
* names, trademarks, service marks, or product names of the Licensor,
* except as required for reasonable and customary use in describing the
* origin of the Work and reproducing the content of the NOTICE file.
*
* 7. Disclaimer of Warranty. Unless required by applicable law or
* agreed to in writing, Licensor provides the Work (and each
* Contributor provides its Contributions) on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied, including, without limitation, any warranties or conditions
* of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
* PARTICULAR PURPOSE. You are solely responsible for determining the
* appropriateness of using or redistributing the Work and assume any
* risks associated with Your exercise of permissions under this License.
*
* 8. Limitation of Liability. In no event and under no legal theory,
* whether in tort (including negligence), contract, or otherwise,
* unless required by applicable law (such as deliberate and grossly
* negligent acts) or agreed to in writing, shall any Contributor be
* liable to You for damages, including any direct, indirect, special,
* incidental, or consequential damages of any character arising as a
* result of this License or out of the use or inability to use the
* Work (including but not limited to damages for loss of goodwill,
* work stoppage, computer failure or malfunction, or any and all
* other commercial damages or losses), even if such Contributor
* has been advised of the possibility of such damages.
*
* 9. Accepting Warranty or Additional Liability. While redistributing
* the Work or Derivative Works thereof, You may choose to offer,
* and charge a fee for, acceptance of support, warranty, indemnity,
* or other liability obligations and/or rights consistent with this
* License. However, in accepting such obligations, You may act only
* on Your own behalf and on Your sole responsibility, not on behalf
* of any other Contributor, and only if You agree to indemnify,
* defend, and hold each Contributor harmless for any liability
* incurred by, or claims asserted against, such Contributor by reason
* of your accepting any such warranty or additional liability.
*
* END OF TERMS AND CONDITIONS
*
* APPENDIX: How to apply the Apache License to your work.
*
* To apply the Apache License to your work, attach the following
* boilerplate notice, with the fields enclosed by brackets "[]"
* replaced with your own identifying information. (Don't include
* the brackets!) The text should be enclosed in the appropriate
* comment syntax for the file format. We also recommend that a
* file or class name and description of purpose be included on the
* same "printed page" as the copyright notice for easier
* identification within third-party archives.
*
* Copyright 2007 Kasper B. Graversen
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

View File

View File

@ -0,0 +1,264 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.env.Environment;
import org.elasticsearch.plugins.ActionPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.rest.RestHandler;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchRequestParsers;
import org.elasticsearch.threadpool.ExecutorBuilder;
import org.elasticsearch.threadpool.FixedExecutorBuilder;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.watcher.ResourceWatcherService;
import org.elasticsearch.xpack.prelert.action.PutListAction;
import org.elasticsearch.xpack.prelert.action.DeleteJobAction;
import org.elasticsearch.xpack.prelert.action.DeleteModelSnapshotAction;
import org.elasticsearch.xpack.prelert.action.GetBucketAction;
import org.elasticsearch.xpack.prelert.action.GetCategoryDefinitionAction;
import org.elasticsearch.xpack.prelert.action.GetInfluencersAction;
import org.elasticsearch.xpack.prelert.action.GetJobAction;
import org.elasticsearch.xpack.prelert.action.GetJobsAction;
import org.elasticsearch.xpack.prelert.action.GetListAction;
import org.elasticsearch.xpack.prelert.action.GetModelSnapshotsAction;
import org.elasticsearch.xpack.prelert.action.GetRecordsAction;
import org.elasticsearch.xpack.prelert.action.PauseJobAction;
import org.elasticsearch.xpack.prelert.action.PostDataAction;
import org.elasticsearch.xpack.prelert.action.PostDataCloseAction;
import org.elasticsearch.xpack.prelert.action.PostDataFlushAction;
import org.elasticsearch.xpack.prelert.action.PutJobAction;
import org.elasticsearch.xpack.prelert.action.PutModelSnapshotDescriptionAction;
import org.elasticsearch.xpack.prelert.action.ResumeJobAction;
import org.elasticsearch.xpack.prelert.action.RevertModelSnapshotAction;
import org.elasticsearch.xpack.prelert.action.StartJobSchedulerAction;
import org.elasticsearch.xpack.prelert.action.StopJobSchedulerAction;
import org.elasticsearch.xpack.prelert.action.UpdateJobSchedulerStatusAction;
import org.elasticsearch.xpack.prelert.action.UpdateJobStatusAction;
import org.elasticsearch.xpack.prelert.action.ValidateDetectorAction;
import org.elasticsearch.xpack.prelert.action.ValidateTransformAction;
import org.elasticsearch.xpack.prelert.action.ValidateTransformsAction;
import org.elasticsearch.xpack.prelert.job.data.DataProcessor;
import org.elasticsearch.xpack.prelert.job.logs.JobLogs;
import org.elasticsearch.xpack.prelert.job.manager.AutodetectProcessManager;
import org.elasticsearch.xpack.prelert.job.manager.JobManager;
import org.elasticsearch.xpack.prelert.job.persistence.ElasticsearchBulkDeleterFactory;
import org.elasticsearch.xpack.prelert.job.persistence.ElasticsearchJobProvider;
import org.elasticsearch.xpack.prelert.job.process.autodetect.output.parsing.AutoDetectResultProcessor;
import org.elasticsearch.xpack.prelert.job.process.autodetect.output.parsing.AutodetectResultsParser;
import org.elasticsearch.xpack.prelert.job.process.normalizer.noop.NoOpRenormaliser;
import org.elasticsearch.xpack.prelert.job.scheduler.ScheduledJobService;
import org.elasticsearch.xpack.prelert.job.metadata.JobAllocator;
import org.elasticsearch.xpack.prelert.job.metadata.JobLifeCycleService;
import org.elasticsearch.xpack.prelert.job.metadata.PrelertMetadata;
import org.elasticsearch.xpack.prelert.job.process.NativeController;
import org.elasticsearch.xpack.prelert.job.process.ProcessCtrl;
import org.elasticsearch.xpack.prelert.job.process.autodetect.AutodetectProcessFactory;
import org.elasticsearch.xpack.prelert.job.process.autodetect.BlackHoleAutodetectProcess;
import org.elasticsearch.xpack.prelert.job.process.autodetect.NativeAutodetectProcessFactory;
import org.elasticsearch.xpack.prelert.job.scheduler.http.HttpDataExtractorFactory;
import org.elasticsearch.xpack.prelert.job.status.StatusReporter;
import org.elasticsearch.xpack.prelert.job.usage.UsageReporter;
import org.elasticsearch.xpack.prelert.rest.data.RestPostDataAction;
import org.elasticsearch.xpack.prelert.rest.data.RestPostDataCloseAction;
import org.elasticsearch.xpack.prelert.rest.data.RestPostDataFlushAction;
import org.elasticsearch.xpack.prelert.rest.influencers.RestGetInfluencersAction;
import org.elasticsearch.xpack.prelert.rest.job.RestDeleteJobAction;
import org.elasticsearch.xpack.prelert.rest.job.RestGetJobAction;
import org.elasticsearch.xpack.prelert.rest.job.RestGetJobsAction;
import org.elasticsearch.xpack.prelert.rest.job.RestPauseJobAction;
import org.elasticsearch.xpack.prelert.rest.job.RestPutJobsAction;
import org.elasticsearch.xpack.prelert.rest.job.RestResumeJobAction;
import org.elasticsearch.xpack.prelert.rest.list.RestPutListAction;
import org.elasticsearch.xpack.prelert.rest.list.RestGetListAction;
import org.elasticsearch.xpack.prelert.rest.modelsnapshots.RestDeleteModelSnapshotAction;
import org.elasticsearch.xpack.prelert.rest.modelsnapshots.RestGetModelSnapshotsAction;
import org.elasticsearch.xpack.prelert.rest.modelsnapshots.RestPutModelSnapshotDescriptionAction;
import org.elasticsearch.xpack.prelert.rest.modelsnapshots.RestRevertModelSnapshotAction;
import org.elasticsearch.xpack.prelert.rest.results.RestGetBucketAction;
import org.elasticsearch.xpack.prelert.rest.results.RestGetCategoryAction;
import org.elasticsearch.xpack.prelert.rest.results.RestGetRecordsAction;
import org.elasticsearch.xpack.prelert.rest.schedulers.RestStartJobSchedulerAction;
import org.elasticsearch.xpack.prelert.rest.schedulers.RestStopJobSchedulerAction;
import org.elasticsearch.xpack.prelert.rest.validate.RestValidateDetectorAction;
import org.elasticsearch.xpack.prelert.rest.validate.RestValidateTransformAction;
import org.elasticsearch.xpack.prelert.rest.validate.RestValidateTransformsAction;
import org.elasticsearch.xpack.prelert.utils.NamedPipeHelper;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
public class PrelertPlugin extends Plugin implements ActionPlugin {
public static final String NAME = "prelert";
public static final String BASE_PATH = "/_xpack/prelert/";
public static final String THREAD_POOL_NAME = NAME;
// NORELEASE - temporary solution
static final Setting<Boolean> USE_NATIVE_PROCESS_OPTION = Setting.boolSetting("useNativeProcess", false, Property.NodeScope,
Property.Deprecated);
private final Settings settings;
private final Environment env;
private final ParseFieldMatcherSupplier parseFieldMatcherSupplier;
static {
MetaData.registerPrototype(PrelertMetadata.TYPE, PrelertMetadata.PROTO);
}
public PrelertPlugin(Settings settings) {
this.settings = settings;
this.env = new Environment(settings);
ParseFieldMatcher matcher = new ParseFieldMatcher(settings);
parseFieldMatcherSupplier = () -> matcher;
}
@Override
public List<Setting<?>> getSettings() {
return Collections.unmodifiableList(
Arrays.asList(USE_NATIVE_PROCESS_OPTION,
JobLogs.DONT_DELETE_LOGS_SETTING,
ProcessCtrl.DONT_PERSIST_MODEL_STATE_SETTING,
ProcessCtrl.MAX_ANOMALY_RECORDS_SETTING,
StatusReporter.ACCEPTABLE_PERCENTAGE_DATE_PARSE_ERRORS_SETTING,
StatusReporter.ACCEPTABLE_PERCENTAGE_OUT_OF_ORDER_ERRORS_SETTING,
UsageReporter.UPDATE_INTERVAL_SETTING));
}
@Override
public Collection<Object> createComponents(Client client, ClusterService clusterService, ThreadPool threadPool,
ResourceWatcherService resourceWatcherService, ScriptService scriptService,
SearchRequestParsers searchRequestParsers) {
// All components get binded in the guice context to the instances returned here
// and interfaces are not bound to their concrete classes.
// instead of `bind(Interface.class).to(Implementation.class);` this happens:
// `bind(Implementation.class).toInstance(INSTANCE);`
// For this reason we can't use interfaces in the constructor of transport actions.
// This ok for now as we will remove Guice soon
ElasticsearchJobProvider jobProvider = new ElasticsearchJobProvider(client, 0, parseFieldMatcherSupplier.getParseFieldMatcher());
JobManager jobManager = new JobManager(env, settings, jobProvider, clusterService);
AutodetectProcessFactory processFactory;
if (USE_NATIVE_PROCESS_OPTION.get(settings)) {
try {
NativeController nativeController = new NativeController(env, new NamedPipeHelper());
nativeController.tailLogsInThread();
processFactory = new NativeAutodetectProcessFactory(jobProvider, env, settings, nativeController);
} catch (IOException e) {
throw new ElasticsearchException("Failed to create native process factory", e);
}
} else {
processFactory = (JobDetails, ignoreDowntime) -> new BlackHoleAutodetectProcess();
}
AutodetectResultsParser autodetectResultsParser = new AutodetectResultsParser(settings, parseFieldMatcherSupplier);
DataProcessor dataProcessor =
new AutodetectProcessManager(settings, client, env, threadPool, jobManager, autodetectResultsParser, processFactory);
ScheduledJobService scheduledJobService = new ScheduledJobService(threadPool, client, jobProvider, dataProcessor,
new HttpDataExtractorFactory(), System::currentTimeMillis);
return Arrays.asList(
jobProvider,
jobManager,
new JobAllocator(settings, clusterService, threadPool),
new JobLifeCycleService(settings, client, clusterService, scheduledJobService, dataProcessor, threadPool.generic()),
new ElasticsearchBulkDeleterFactory(client), //NORELEASE: this should use Delete-by-query
dataProcessor
);
}
@Override
public List<Class<? extends RestHandler>> getRestHandlers() {
return Arrays.asList(
RestGetJobAction.class,
RestGetJobsAction.class,
RestPutJobsAction.class,
RestDeleteJobAction.class,
RestPauseJobAction.class,
RestResumeJobAction.class,
RestGetListAction.class,
RestPutListAction.class,
RestGetInfluencersAction.class,
RestGetRecordsAction.class,
RestGetBucketAction.class,
RestPostDataAction.class,
RestPostDataCloseAction.class,
RestPostDataFlushAction.class,
RestValidateDetectorAction.class,
RestValidateTransformAction.class,
RestValidateTransformsAction.class,
RestGetCategoryAction.class,
RestGetModelSnapshotsAction.class,
RestRevertModelSnapshotAction.class,
RestPutModelSnapshotDescriptionAction.class,
RestStartJobSchedulerAction.class,
RestStopJobSchedulerAction.class,
RestDeleteModelSnapshotAction.class
);
}
@Override
public List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> getActions() {
return Arrays.asList(
new ActionHandler<>(GetJobAction.INSTANCE, GetJobAction.TransportAction.class),
new ActionHandler<>(GetJobsAction.INSTANCE, GetJobsAction.TransportAction.class),
new ActionHandler<>(PutJobAction.INSTANCE, PutJobAction.TransportAction.class),
new ActionHandler<>(DeleteJobAction.INSTANCE, DeleteJobAction.TransportAction.class),
new ActionHandler<>(PauseJobAction.INSTANCE, PauseJobAction.TransportAction.class),
new ActionHandler<>(ResumeJobAction.INSTANCE, ResumeJobAction.TransportAction.class),
new ActionHandler<>(UpdateJobStatusAction.INSTANCE, UpdateJobStatusAction.TransportAction.class),
new ActionHandler<>(UpdateJobSchedulerStatusAction.INSTANCE, UpdateJobSchedulerStatusAction.TransportAction.class),
new ActionHandler<>(GetListAction.INSTANCE, GetListAction.TransportAction.class),
new ActionHandler<>(PutListAction.INSTANCE, PutListAction.TransportAction.class),
new ActionHandler<>(GetBucketAction.INSTANCE, GetBucketAction.TransportAction.class),
new ActionHandler<>(GetInfluencersAction.INSTANCE, GetInfluencersAction.TransportAction.class),
new ActionHandler<>(GetRecordsAction.INSTANCE, GetRecordsAction.TransportAction.class),
new ActionHandler<>(PostDataAction.INSTANCE, PostDataAction.TransportAction.class),
new ActionHandler<>(PostDataCloseAction.INSTANCE, PostDataCloseAction.TransportAction.class),
new ActionHandler<>(PostDataFlushAction.INSTANCE, PostDataFlushAction.TransportAction.class),
new ActionHandler<>(ValidateDetectorAction.INSTANCE, ValidateDetectorAction.TransportAction.class),
new ActionHandler<>(ValidateTransformAction.INSTANCE, ValidateTransformAction.TransportAction.class),
new ActionHandler<>(ValidateTransformsAction.INSTANCE, ValidateTransformsAction.TransportAction.class),
new ActionHandler<>(GetCategoryDefinitionAction.INSTANCE, GetCategoryDefinitionAction.TransportAction.class),
new ActionHandler<>(GetModelSnapshotsAction.INSTANCE, GetModelSnapshotsAction.TransportAction.class),
new ActionHandler<>(RevertModelSnapshotAction.INSTANCE, RevertModelSnapshotAction.TransportAction.class),
new ActionHandler<>(PutModelSnapshotDescriptionAction.INSTANCE, PutModelSnapshotDescriptionAction.TransportAction.class),
new ActionHandler<>(StartJobSchedulerAction.INSTANCE, StartJobSchedulerAction.TransportAction.class),
new ActionHandler<>(StopJobSchedulerAction.INSTANCE, StopJobSchedulerAction.TransportAction.class),
new ActionHandler<>(DeleteModelSnapshotAction.INSTANCE, DeleteModelSnapshotAction.TransportAction.class)
);
}
public static Path resolveConfigFile(Environment env, String name) {
return env.configFile().resolve(NAME).resolve(name);
}
public static Path resolveLogFile(Environment env, String name) {
return env.logsFile().resolve(NAME).resolve(name);
}
@Override
public List<ExecutorBuilder<?>> getExecutorBuilders(Settings settings) {
final FixedExecutorBuilder builder = new FixedExecutorBuilder(settings, THREAD_POOL_NAME,
5 * EsExecutors.boundedNumberOfProcessors(settings), 1000, "xpack.prelert.thread_pool");
return Collections.singletonList(builder);
}
}

View File

@ -0,0 +1,168 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.manager.JobManager;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.Objects;
public class DeleteJobAction extends Action<DeleteJobAction.Request, DeleteJobAction.Response, DeleteJobAction.RequestBuilder> {
public static final DeleteJobAction INSTANCE = new DeleteJobAction();
public static final String NAME = "cluster:admin/prelert/job/delete";
private DeleteJobAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public Response newResponse() {
return new Response();
}
public static class Request extends AcknowledgedRequest<Request> {
private String jobId;
public Request(String jobId) {
this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName());
}
Request() {}
public String getJobId() {
return jobId;
}
public void setJobId(String jobId) {
this.jobId = jobId;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobId = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(jobId);
}
@Override
public int hashCode() {
return Objects.hash(jobId);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || obj.getClass() != getClass()) {
return false;
}
DeleteJobAction.Request other = (DeleteJobAction.Request) obj;
return Objects.equals(jobId, other.jobId);
}
}
static class RequestBuilder extends MasterNodeOperationRequestBuilder<Request, Response, RequestBuilder> {
public RequestBuilder(ElasticsearchClient client, DeleteJobAction action) {
super(client, action, new Request());
}
}
public static class Response extends AcknowledgedResponse {
public Response(boolean acknowledged) {
super(acknowledged);
}
private Response() {}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}
public static class TransportAction extends TransportMasterNodeAction<Request, Response> {
private final JobManager jobManager;
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
JobManager jobManager) {
super(settings, DeleteJobAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, Request::new);
this.jobManager = jobManager;
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected Response newResponse() {
return new Response();
}
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
jobManager.deleteJob(request, listener);
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
}
}
}

View File

@ -0,0 +1,203 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.ModelSnapshot;
import org.elasticsearch.xpack.prelert.job.manager.JobManager;
import org.elasticsearch.xpack.prelert.job.messages.Messages;
import org.elasticsearch.xpack.prelert.job.persistence.ElasticsearchBulkDeleter;
import org.elasticsearch.xpack.prelert.job.persistence.ElasticsearchBulkDeleterFactory;
import org.elasticsearch.xpack.prelert.job.persistence.ElasticsearchJobProvider;
import org.elasticsearch.xpack.prelert.job.persistence.JobProvider;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.List;
import java.util.Optional;
public class DeleteModelSnapshotAction extends Action<DeleteModelSnapshotAction.Request,
DeleteModelSnapshotAction.Response, DeleteModelSnapshotAction.RequestBuilder> {
public static final DeleteModelSnapshotAction INSTANCE = new DeleteModelSnapshotAction();
public static final String NAME = "cluster:admin/prelert/modelsnapshots/delete";
private DeleteModelSnapshotAction() {
super(NAME);
}
@Override
public DeleteModelSnapshotAction.RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public DeleteModelSnapshotAction.Response newResponse() {
return new Response();
}
public static class Request extends ActionRequest {
private String jobId;
private String snapshotId;
private Request() {
}
public Request(String jobId, String snapshotId) {
this.jobId = ExceptionsHelper.requireNonNull(jobId, "jobId");
this.snapshotId = ExceptionsHelper.requireNonNull(snapshotId, "snapshotId");
}
public String getJobId() {
return jobId;
}
public String getSnapshotId() {
return snapshotId;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobId = in.readString();
snapshotId = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(jobId);
out.writeString(snapshotId);
}
}
public static class Response extends AcknowledgedResponse {
public Response(boolean acknowledged) {
super(acknowledged);
}
private Response() {}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}
public static class RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder> {
public RequestBuilder(ElasticsearchClient client, DeleteModelSnapshotAction action) {
super(client, action, new Request());
}
}
public static class TransportAction extends HandledTransportAction<Request, Response> {
private final JobProvider jobProvider;
private final JobManager jobManager;
private final ClusterService clusterService;
private final ElasticsearchBulkDeleterFactory bulkDeleterFactory;
@Inject
public TransportAction(Settings settings, TransportService transportService, ThreadPool threadPool,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
ElasticsearchJobProvider jobProvider, JobManager jobManager, ClusterService clusterService,
ElasticsearchBulkDeleterFactory bulkDeleterFactory) {
super(settings, NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, Request::new);
this.jobProvider = jobProvider;
this.jobManager = jobManager;
this.clusterService = clusterService;
this.bulkDeleterFactory = bulkDeleterFactory;
}
@Override
protected void doExecute(Request request, ActionListener<Response> listener) {
// Verify the snapshot exists
List<ModelSnapshot> deleteCandidates;
deleteCandidates = jobProvider.modelSnapshots(
request.getJobId(), 0, 1, null, null, null, true, request.getSnapshotId(), null
).hits();
if (deleteCandidates.size() > 1) {
logger.warn("More than one model found for [jobId: " + request.getJobId()
+ ", snapshotId: " + request.getSnapshotId() + "] tuple.");
}
if (deleteCandidates.isEmpty()) {
throw new ResourceNotFoundException(Messages.getMessage(Messages.REST_NO_SUCH_MODEL_SNAPSHOT, request.getJobId()));
}
ModelSnapshot deleteCandidate = deleteCandidates.get(0);
// Verify the snapshot is not being used
//
// NORELEASE: technically, this could be stale and refuse a delete, but I think that's acceptable
// since it is non-destructive
Optional<Job> job = jobManager.getJob(request.getJobId(), clusterService.state());
if (job.isPresent()) {
String currentModelInUse = job.get().getModelSnapshotId();
if (currentModelInUse != null && currentModelInUse.equals(request.getSnapshotId())) {
throw new IllegalArgumentException(Messages.getMessage(Messages.REST_CANNOT_DELETE_HIGHEST_PRIORITY,
request.getSnapshotId(), request.getJobId()));
}
}
// Delete the snapshot and any associated state files
ElasticsearchBulkDeleter deleter = bulkDeleterFactory.apply(request.getJobId());
deleter.deleteModelSnapshot(deleteCandidate);
deleter.commit(new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse bulkResponse) {
// We don't care about the bulk response, just that it succeeded
listener.onResponse(new DeleteModelSnapshotAction.Response(true));
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
jobManager.audit(request.getJobId()).info(Messages.getMessage(Messages.JOB_AUDIT_SNAPSHOT_DELETED,
deleteCandidate.getDescription()));
}
}
}

View File

@ -0,0 +1,414 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.StatusToXContent;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.persistence.BucketQueryBuilder;
import org.elasticsearch.xpack.prelert.job.persistence.BucketsQueryBuilder;
import org.elasticsearch.xpack.prelert.job.persistence.ElasticsearchJobProvider;
import org.elasticsearch.xpack.prelert.job.persistence.JobProvider;
import org.elasticsearch.xpack.prelert.job.persistence.QueryPage;
import org.elasticsearch.xpack.prelert.job.results.Bucket;
import org.elasticsearch.xpack.prelert.job.results.PageParams;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.Objects;
import static org.elasticsearch.action.ValidateActions.addValidationError;
public class GetBucketAction extends Action<GetBucketAction.Request, GetBucketAction.Response, GetBucketAction.RequestBuilder> {
public static final GetBucketAction INSTANCE = new GetBucketAction();
public static final String NAME = "indices:admin/prelert/results/bucket/get";
private GetBucketAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client);
}
@Override
public Response newResponse() {
return new Response();
}
public static class Request extends ActionRequest implements ToXContent {
public static final ParseField EXPAND = new ParseField("expand");
public static final ParseField INCLUDE_INTERIM = new ParseField("includeInterim");
public static final ParseField PARTITION_VALUE = new ParseField("partitionValue");
public static final ParseField START = new ParseField("start");
public static final ParseField END = new ParseField("end");
public static final ParseField ANOMALY_SCORE = new ParseField("anomalyScore");
public static final ParseField MAX_NORMALIZED_PROBABILITY = new ParseField("maxNormalizedProbability");
public static final ParseField TIMESTAMP = new ParseField("timestamp");
private static final ObjectParser<Request, ParseFieldMatcherSupplier> PARSER = new ObjectParser<>(NAME, Request::new);
static {
PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID);
PARSER.declareString(Request::setTimestamp, Bucket.TIMESTAMP);
PARSER.declareString(Request::setPartitionValue, PARTITION_VALUE);
PARSER.declareBoolean(Request::setExpand, EXPAND);
PARSER.declareBoolean(Request::setIncludeInterim, INCLUDE_INTERIM);
PARSER.declareString(Request::setStart, START);
PARSER.declareString(Request::setEnd, END);
PARSER.declareBoolean(Request::setExpand, EXPAND);
PARSER.declareBoolean(Request::setIncludeInterim, INCLUDE_INTERIM);
PARSER.declareObject(Request::setPageParams, PageParams.PARSER, PageParams.PAGE);
PARSER.declareDouble(Request::setAnomalyScore, ANOMALY_SCORE);
PARSER.declareDouble(Request::setMaxNormalizedProbability, MAX_NORMALIZED_PROBABILITY);
PARSER.declareString(Request::setPartitionValue, PARTITION_VALUE);
}
public static Request parseRequest(String jobId, XContentParser parser,
ParseFieldMatcherSupplier parseFieldMatcherSupplier) {
Request request = PARSER.apply(parser, parseFieldMatcherSupplier);
if (jobId != null) {
request.jobId = jobId;
}
return request;
}
private String jobId;
private String timestamp;
private boolean expand = false;
private boolean includeInterim = false;
private String partitionValue;
private String start;
private String end;
private PageParams pageParams = null;
private double anomalyScore = 0.0;
private double maxNormalizedProbability = 0.0;
Request() {
}
public Request(String jobId) {
this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName());
}
public String getJobId() {
return jobId;
}
public void setTimestamp(String timestamp) {
this.timestamp = ExceptionsHelper.requireNonNull(timestamp, Bucket.TIMESTAMP.getPreferredName());
}
public String getTimestamp() {
return timestamp;
}
public boolean isExpand() {
return expand;
}
public void setExpand(boolean expand) {
this.expand = expand;
}
public boolean isIncludeInterim() {
return includeInterim;
}
public void setIncludeInterim(boolean includeInterim) {
this.includeInterim = includeInterim;
}
public String getPartitionValue() {
return partitionValue;
}
public void setPartitionValue(String partitionValue) {
this.partitionValue = ExceptionsHelper.requireNonNull(partitionValue, PARTITION_VALUE.getPreferredName());
}
public String getStart() {
return start;
}
public void setStart(String start) {
this.start = ExceptionsHelper.requireNonNull(start, START.getPreferredName());
}
public String getEnd() {
return end;
}
public void setEnd(String end) {
this.end = ExceptionsHelper.requireNonNull(end, END.getPreferredName());
}
public PageParams getPageParams() {
return pageParams;
}
public void setPageParams(PageParams pageParams) {
this.pageParams = ExceptionsHelper.requireNonNull(pageParams, PageParams.PAGE.getPreferredName());
}
public double getAnomalyScore() {
return anomalyScore;
}
public void setAnomalyScore(double anomalyScore) {
this.anomalyScore = anomalyScore;
}
public double getMaxNormalizedProbability() {
return maxNormalizedProbability;
}
public void setMaxNormalizedProbability(double maxNormalizedProbability) {
this.maxNormalizedProbability = maxNormalizedProbability;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if ((timestamp == null || timestamp.isEmpty())
&& (start == null || start.isEmpty() || end == null || end.isEmpty())) {
validationException = addValidationError("Either [timestamp] or [start, end] parameters must be set.", validationException);
}
return validationException;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobId = in.readString();
timestamp = in.readOptionalString();
expand = in.readBoolean();
includeInterim = in.readBoolean();
partitionValue = in.readOptionalString();
start = in.readOptionalString();
end = in.readOptionalString();
anomalyScore = in.readDouble();
maxNormalizedProbability = in.readDouble();
pageParams = in.readOptionalWriteable(PageParams::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(jobId);
out.writeOptionalString(timestamp);
out.writeBoolean(expand);
out.writeBoolean(includeInterim);
out.writeOptionalString(partitionValue);
out.writeOptionalString(start);
out.writeOptionalString(end);
out.writeDouble(anomalyScore);
out.writeDouble(maxNormalizedProbability);
out.writeOptionalWriteable(pageParams);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Job.ID.getPreferredName(), jobId);
if (timestamp != null) {
builder.field(Bucket.TIMESTAMP.getPreferredName(), timestamp);
}
builder.field(EXPAND.getPreferredName(), expand);
builder.field(INCLUDE_INTERIM.getPreferredName(), includeInterim);
if (partitionValue != null) {
builder.field(PARTITION_VALUE.getPreferredName(), partitionValue);
}
if (start != null) {
builder.field(START.getPreferredName(), start);
}
if (end != null) {
builder.field(END.getPreferredName(), end);
}
if (pageParams != null) {
builder.field(PageParams.PAGE.getPreferredName(), pageParams);
}
builder.field(ANOMALY_SCORE.getPreferredName(), anomalyScore);
builder.field(MAX_NORMALIZED_PROBABILITY.getPreferredName(), maxNormalizedProbability);
builder.endObject();
return builder;
}
@Override
public int hashCode() {
return Objects.hash(jobId, timestamp, partitionValue, expand, includeInterim,
anomalyScore, maxNormalizedProbability, pageParams, start, end);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Request other = (Request) obj;
return Objects.equals(jobId, other.jobId) &&
Objects.equals(timestamp, other.timestamp) &&
Objects.equals(partitionValue, other.partitionValue) &&
Objects.equals(expand, other.expand) &&
Objects.equals(includeInterim, other.includeInterim) &&
Objects.equals(anomalyScore, other.anomalyScore) &&
Objects.equals(maxNormalizedProbability, other.maxNormalizedProbability) &&
Objects.equals(pageParams, other.pageParams) &&
Objects.equals(start, other.start) &&
Objects.equals(end, other.end);
}
}
static class RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder> {
RequestBuilder(ElasticsearchClient client) {
super(client, INSTANCE, new Request());
}
}
public static class Response extends ActionResponse implements StatusToXContent {
private QueryPage<Bucket> buckets;
Response() {
}
Response(QueryPage<Bucket> buckets) {
this.buckets = buckets;
}
public QueryPage<Bucket> getBuckets() {
return buckets;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
buckets = new QueryPage<>(in, Bucket::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
buckets.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return buckets.doXContentBody(builder, params);
}
@Override
public RestStatus status() {
return buckets.hitCount() == 0 ? RestStatus.NOT_FOUND : RestStatus.OK;
}
@Override
public int hashCode() {
return Objects.hash(buckets);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Response other = (Response) obj;
return Objects.equals(buckets, other.buckets);
}
@SuppressWarnings("deprecation")
@Override
public final String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.prettyPrint();
toXContent(builder, EMPTY_PARAMS);
return builder.string();
} catch (Exception e) {
// So we have a stack trace logged somewhere
return "{ \"error\" : \"" + org.elasticsearch.ExceptionsHelper.detailedMessage(e) + "\"}";
}
}
}
public static class TransportAction extends HandledTransportAction<Request, Response> {
private final JobProvider jobProvider;
@Inject
public TransportAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
ElasticsearchJobProvider jobProvider) {
super(settings, NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, Request::new);
this.jobProvider = jobProvider;
}
@Override
protected void doExecute(Request request, ActionListener<Response> listener) {
QueryPage<Bucket> results;
// Single bucket
if (request.timestamp != null) {
BucketQueryBuilder.BucketQuery query =
new BucketQueryBuilder(request.timestamp).expand(request.expand)
.includeInterim(request.includeInterim)
.partitionValue(request.partitionValue)
.build();
results = jobProvider.bucket(request.jobId, query);
} else {
// Multiple buckets
BucketsQueryBuilder.BucketsQuery query =
new BucketsQueryBuilder().expand(request.expand)
.includeInterim(request.includeInterim)
.epochStart(request.start)
.epochEnd(request.end)
.from(request.pageParams.getFrom())
.size(request.pageParams.getSize())
.anomalyScoreThreshold(request.anomalyScore)
.normalizedProbabilityThreshold(request.maxNormalizedProbability)
.partitionValue(request.partitionValue)
.build();
results = jobProvider.buckets(request.jobId, query);
}
listener.onResponse(new Response(results));
}
}
}

View File

@ -0,0 +1,209 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.persistence.ElasticsearchJobProvider;
import org.elasticsearch.xpack.prelert.job.persistence.JobProvider;
import org.elasticsearch.xpack.prelert.job.persistence.QueryPage;
import org.elasticsearch.xpack.prelert.job.results.CategoryDefinition;
import org.elasticsearch.xpack.prelert.job.results.PageParams;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.Objects;
public class GetCategoryDefinitionAction extends
Action<GetCategoryDefinitionAction.Request, GetCategoryDefinitionAction.Response, GetCategoryDefinitionAction.RequestBuilder> {
public static final GetCategoryDefinitionAction INSTANCE = new GetCategoryDefinitionAction();
private static final String NAME = "cluster:admin/prelert/categorydefinition/get";
private GetCategoryDefinitionAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public Response newResponse() {
return new Response();
}
public static class Request extends ActionRequest {
public static final ParseField CATEGORY_ID = new ParseField("categoryId");
public static final ParseField FROM = new ParseField("from");
public static final ParseField SIZE = new ParseField("size");
private String jobId;
private String categoryId;
private PageParams pageParams = null;
public Request(String jobId) {
this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName());
}
Request() {
}
public String getCategoryId() {
return categoryId;
}
public void setCategoryId(String categoryId) {
this.categoryId = ExceptionsHelper.requireNonNull(categoryId, CATEGORY_ID.getPreferredName());
}
public PageParams getPageParams() {
return pageParams;
}
public void setPageParams(PageParams pageParams) {
this.pageParams = pageParams;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobId = in.readString();
categoryId = in.readOptionalString();
pageParams = in.readOptionalWriteable(PageParams::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(jobId);
out.writeOptionalString(categoryId);
out.writeOptionalWriteable(pageParams);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
Request request = (Request) o;
return Objects.equals(jobId, request.jobId)
&& Objects.equals(categoryId, request.categoryId)
&& Objects.equals(pageParams, request.pageParams);
}
@Override
public int hashCode() {
return Objects.hash(jobId, categoryId, pageParams);
}
}
public static class RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder> {
public RequestBuilder(ElasticsearchClient client, GetCategoryDefinitionAction action) {
super(client, action, new Request());
}
}
public static class Response extends ActionResponse implements ToXContent {
private QueryPage<CategoryDefinition> result;
public Response(QueryPage<CategoryDefinition> result) {
this.result = result;
}
Response() {
}
public QueryPage<CategoryDefinition> getResult() {
return result;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
result = new QueryPage<>(in, CategoryDefinition::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
result.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
result.doXContentBody(builder, params);
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
Response response = (Response) o;
return Objects.equals(result, response.result);
}
@Override
public int hashCode() {
return Objects.hash(result);
}
}
public static class TransportAction extends HandledTransportAction<Request, Response> {
private final JobProvider jobProvider;
@Inject
public TransportAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, ElasticsearchJobProvider jobProvider) {
super(settings, NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, Request::new);
this.jobProvider = jobProvider;
}
@Override
protected void doExecute(Request request, ActionListener<Response> listener) {
QueryPage<CategoryDefinition> result;
if (request.categoryId != null ) {
result = jobProvider.categoryDefinition(request.jobId, request.categoryId);
} else {
result = jobProvider.categoryDefinitions(request.jobId, request.pageParams.getFrom(),
request.pageParams.getSize());
}
listener.onResponse(new Response(result));
}
}
}

View File

@ -0,0 +1,329 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.persistence.ElasticsearchJobProvider;
import org.elasticsearch.xpack.prelert.job.persistence.InfluencersQueryBuilder;
import org.elasticsearch.xpack.prelert.job.persistence.JobProvider;
import org.elasticsearch.xpack.prelert.job.persistence.QueryPage;
import org.elasticsearch.xpack.prelert.job.results.Influencer;
import org.elasticsearch.xpack.prelert.job.results.PageParams;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.Objects;
public class GetInfluencersAction
extends Action<GetInfluencersAction.Request, GetInfluencersAction.Response, GetInfluencersAction.RequestBuilder> {
public static final GetInfluencersAction INSTANCE = new GetInfluencersAction();
public static final String NAME = "indices:admin/prelert/results/influencers/get";
private GetInfluencersAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client);
}
@Override
public Response newResponse() {
return new Response();
}
public static class Request extends ActionRequest implements ToXContent {
public static final ParseField START = new ParseField("start");
public static final ParseField END = new ParseField("end");
public static final ParseField INCLUDE_INTERIM = new ParseField("includeInterim");
public static final ParseField ANOMALY_SCORE = new ParseField("anomalyScore");
public static final ParseField SORT_FIELD = new ParseField("sort");
public static final ParseField DESCENDING_SORT = new ParseField("desc");
private static final ObjectParser<Request, ParseFieldMatcherSupplier> PARSER = new ObjectParser<>(NAME, Request::new);
static {
PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID);
PARSER.declareString((request, start) -> request.start = start, START);
PARSER.declareString((request, end) -> request.end = end, END);
PARSER.declareBoolean(Request::setIncludeInterim, INCLUDE_INTERIM);
PARSER.declareObject(Request::setPageParams, PageParams.PARSER, PageParams.PAGE);
PARSER.declareDouble(Request::setAnomalyScore, ANOMALY_SCORE);
PARSER.declareString(Request::setSort, SORT_FIELD);
PARSER.declareBoolean(Request::setDecending, DESCENDING_SORT);
}
public static Request parseRequest(String jobId, String start, String end, XContentParser parser,
ParseFieldMatcherSupplier parseFieldMatcherSupplier) {
Request request = PARSER.apply(parser, parseFieldMatcherSupplier);
if (jobId != null) {
request.jobId = jobId;
}
if (start != null) {
request.start = start;
}
if (end != null) {
request.end = end;
}
return request;
}
private String jobId;
private String start;
private String end;
private boolean includeInterim = false;
private PageParams pageParams = new PageParams(0, 100);
private double anomalyScoreFilter = 0.0;
private String sort = Influencer.ANOMALY_SCORE.getPreferredName();
private boolean decending = false;
Request() {
}
public Request(String jobId, String start, String end) {
this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName());
this.start = ExceptionsHelper.requireNonNull(start, START.getPreferredName());
this.end = ExceptionsHelper.requireNonNull(end, END.getPreferredName());
}
public String getJobId() {
return jobId;
}
public String getStart() {
return start;
}
public String getEnd() {
return end;
}
public boolean isDecending() {
return decending;
}
public void setDecending(boolean decending) {
this.decending = decending;
}
public boolean isIncludeInterim() {
return includeInterim;
}
public void setIncludeInterim(boolean includeInterim) {
this.includeInterim = includeInterim;
}
public void setPageParams(PageParams pageParams) {
this.pageParams = pageParams;
}
public PageParams getPageParams() {
return pageParams;
}
public double getAnomalyScoreFilter() {
return anomalyScoreFilter;
}
public void setAnomalyScore(double anomalyScoreFilter) {
this.anomalyScoreFilter = anomalyScoreFilter;
}
public String getSort() {
return sort;
}
public void setSort(String sort) {
this.sort = ExceptionsHelper.requireNonNull(sort, SORT_FIELD.getPreferredName());
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobId = in.readString();
includeInterim = in.readBoolean();
pageParams = new PageParams(in);
start = in.readString();
end = in.readString();
sort = in.readOptionalString();
decending = in.readBoolean();
anomalyScoreFilter = in.readDouble();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(jobId);
out.writeBoolean(includeInterim);
pageParams.writeTo(out);
out.writeString(start);
out.writeString(end);
out.writeOptionalString(sort);
out.writeBoolean(decending);
out.writeDouble(anomalyScoreFilter);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Job.ID.getPreferredName(), jobId);
builder.field(INCLUDE_INTERIM.getPreferredName(), includeInterim);
builder.field(PageParams.PAGE.getPreferredName(), pageParams);
builder.field(START.getPreferredName(), start);
builder.field(END.getPreferredName(), end);
builder.field(SORT_FIELD.getPreferredName(), sort);
builder.field(DESCENDING_SORT.getPreferredName(), decending);
builder.field(ANOMALY_SCORE.getPreferredName(), anomalyScoreFilter);
builder.endObject();
return builder;
}
@Override
public int hashCode() {
return Objects.hash(jobId, includeInterim, pageParams, start, end, sort, decending, anomalyScoreFilter);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Request other = (Request) obj;
return Objects.equals(jobId, other.jobId) && Objects.equals(start, other.start) && Objects.equals(end, other.end)
&& Objects.equals(includeInterim, other.includeInterim) && Objects.equals(pageParams, other.pageParams)
&& Objects.equals(anomalyScoreFilter, other.anomalyScoreFilter) && Objects.equals(decending, other.decending)
&& Objects.equals(sort, other.sort);
}
}
static class RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder> {
RequestBuilder(ElasticsearchClient client) {
super(client, INSTANCE, new Request());
}
}
public static class Response extends ActionResponse implements ToXContent {
private QueryPage<Influencer> influencers;
Response() {
}
Response(QueryPage<Influencer> influencers) {
this.influencers = influencers;
}
public QueryPage<Influencer> getInfluencers() {
return influencers;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
influencers = new QueryPage<>(in, Influencer::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
influencers.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return influencers.doXContentBody(builder, params);
}
@Override
public int hashCode() {
return Objects.hash(influencers);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Response other = (Response) obj;
return Objects.equals(influencers, other.influencers);
}
@SuppressWarnings("deprecation")
@Override
public final String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.prettyPrint();
toXContent(builder, ToXContent.EMPTY_PARAMS);
return builder.string();
} catch (Exception e) {
// So we have a stack trace logged somewhere
return "{ \"error\" : \"" + org.elasticsearch.ExceptionsHelper.detailedMessage(e) + "\"}";
}
}
}
public static class TransportAction extends HandledTransportAction<Request, Response> {
private final JobProvider jobProvider;
@Inject
public TransportAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, ElasticsearchJobProvider jobProvider) {
super(settings, NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, Request::new);
this.jobProvider = jobProvider;
}
@Override
protected void doExecute(Request request, ActionListener<Response> listener) {
InfluencersQueryBuilder.InfluencersQuery query = new InfluencersQueryBuilder().includeInterim(request.includeInterim)
.epochStart(request.start).epochEnd(request.end).from(request.pageParams.getFrom()).size(request.pageParams.getSize())
.anomalyScoreThreshold(request.anomalyScoreFilter).sortField(request.sort).sortDescending(request.decending).build();
QueryPage<Influencer> page = jobProvider.influencers(request.jobId, query);
listener.onResponse(new Response(page));
}
}
}

View File

@ -0,0 +1,376 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
import org.elasticsearch.action.support.master.MasterNodeReadRequest;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.StatusToXContent;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.DataCounts;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.ModelSizeStats;
import org.elasticsearch.xpack.prelert.job.manager.AutodetectProcessManager;
import org.elasticsearch.xpack.prelert.job.manager.JobManager;
import org.elasticsearch.xpack.prelert.job.persistence.ElasticsearchJobProvider;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import org.elasticsearch.xpack.prelert.utils.SingleDocument;
import java.io.IOException;
import java.util.Locale;
import java.util.Objects;
import java.util.Optional;
public class GetJobAction extends Action<GetJobAction.Request, GetJobAction.Response, GetJobAction.RequestBuilder> {
public static final GetJobAction INSTANCE = new GetJobAction();
public static final String NAME = "cluster:admin/prelert/job/get";
private GetJobAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public Response newResponse() {
return new Response();
}
public static class Request extends MasterNodeReadRequest<Request> {
private String jobId;
private boolean config;
private boolean dataCounts;
private boolean modelSizeStats;
Request() {
}
public Request(String jobId) {
this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName());
}
public String getJobId() {
return jobId;
}
public Request all() {
this.config = true;
this.dataCounts = true;
this.modelSizeStats = true;
return this;
}
public boolean config() {
return config;
}
public Request config(boolean config) {
this.config = config;
return this;
}
public boolean dataCounts() {
return dataCounts;
}
public Request dataCounts(boolean dataCounts) {
this.dataCounts = dataCounts;
return this;
}
public boolean modelSizeStats() {
return modelSizeStats;
}
public Request modelSizeStats(boolean modelSizeStats) {
this.modelSizeStats = modelSizeStats;
return this;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobId = in.readString();
config = in.readBoolean();
dataCounts = in.readBoolean();
modelSizeStats = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(jobId);
out.writeBoolean(config);
out.writeBoolean(dataCounts);
out.writeBoolean(modelSizeStats);
}
@Override
public int hashCode() {
return Objects.hash(jobId, config, dataCounts, modelSizeStats);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Request other = (Request) obj;
return Objects.equals(jobId, other.jobId) && this.config == other.config
&& this.dataCounts == other.dataCounts && this.modelSizeStats == other.modelSizeStats;
}
}
public static class RequestBuilder extends MasterNodeReadOperationRequestBuilder<Request, Response, RequestBuilder> {
public RequestBuilder(ElasticsearchClient client, GetJobAction action) {
super(client, action, new Request());
}
}
public static class Response extends ActionResponse implements StatusToXContent {
static class JobInfo implements ToXContent, Writeable {
@Nullable
private Job jobConfig;
@Nullable
private DataCounts dataCounts;
@Nullable
private ModelSizeStats modelSizeStats;
JobInfo(@Nullable Job job, @Nullable DataCounts dataCounts, @Nullable ModelSizeStats modelSizeStats) {
this.jobConfig = job;
this.dataCounts = dataCounts;
this.modelSizeStats = modelSizeStats;
}
JobInfo(StreamInput in) throws IOException {
jobConfig = in.readOptionalWriteable(Job::new);
dataCounts = in.readOptionalWriteable(DataCounts::new);
modelSizeStats = in.readOptionalWriteable(ModelSizeStats::new);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (jobConfig != null) {
builder.field("config", jobConfig);
}
if (dataCounts != null) {
builder.field("data_counts", dataCounts);
}
if (modelSizeStats != null) {
builder.field("model_size_stats", modelSizeStats);
}
builder.endObject();
return builder;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalWriteable(jobConfig);
out.writeOptionalWriteable(dataCounts);
out.writeOptionalWriteable(modelSizeStats);
}
@Override
public int hashCode() {
return Objects.hash(jobConfig, dataCounts, modelSizeStats);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
JobInfo other = (JobInfo) obj;
return Objects.equals(jobConfig, other.jobConfig)
&& Objects.equals(this.dataCounts, other.dataCounts)
&& Objects.equals(this.modelSizeStats, other.modelSizeStats);
}
}
private SingleDocument<JobInfo> jobResponse;
public Response() {
jobResponse = SingleDocument.empty(Job.TYPE);
}
public Response(JobInfo jobResponse) {
this.jobResponse = new SingleDocument<>(Job.TYPE, jobResponse);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobResponse = new SingleDocument<>(in, JobInfo::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
jobResponse.writeTo(out);
}
@Override
public RestStatus status() {
return jobResponse.status();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return jobResponse.toXContent(builder, params);
}
@Override
public int hashCode() {
return Objects.hash(jobResponse);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Response other = (Response) obj;
return Objects.equals(jobResponse, other.jobResponse);
}
@SuppressWarnings("deprecation")
@Override
public final String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.prettyPrint();
builder.startObject();
toXContent(builder, EMPTY_PARAMS);
builder.endObject();
return builder.string();
} catch (Exception e) {
// So we have a stack trace logged somewhere
return "{ \"error\" : \"" + org.elasticsearch.ExceptionsHelper.detailedMessage(e) + "\"}";
}
}
}
public static class TransportAction extends TransportMasterNodeReadAction<Request, Response> {
private final JobManager jobManager;
private final AutodetectProcessManager processManager;
private final ElasticsearchJobProvider jobProvider;
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
JobManager jobManager, AutodetectProcessManager processManager, ElasticsearchJobProvider jobProvider) {
super(settings, GetJobAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, Request::new);
this.jobManager = jobManager;
this.processManager = processManager;
this.jobProvider = jobProvider;
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected Response newResponse() {
return new Response();
}
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
logger.debug("Get job '{}', config={}, data_counts={}, model_size_stats={}",
request.getJobId(), request.config(), request.dataCounts(), request.modelSizeStats());
// always get the job regardless of the request.config param because if the job
// can't be found a different response is returned.
Optional<Job> optionalJob = jobManager.getJob(request.getJobId(), state);
if (optionalJob.isPresent() == false) {
logger.debug(String.format(Locale.ROOT, "Cannot find job '%s'", request.getJobId()));
listener.onResponse(new Response());
return;
}
logger.debug("Returning job '" + optionalJob.get().getJobId() + "'");
Job job = request.config() && optionalJob.isPresent() ? optionalJob.get() : null;
DataCounts dataCounts = readDataCounts(request);
ModelSizeStats modelSizeStats = readModelSizeStats(request);
listener.onResponse(new Response(new Response.JobInfo(job, dataCounts, modelSizeStats)));
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ);
}
private DataCounts readDataCounts(Request request) {
if (request.dataCounts()) {
Optional<DataCounts> counts = processManager.getDataCounts(request.getJobId());
return counts.orElseGet(() -> jobProvider.dataCounts(request.getJobId()));
}
return null;
}
private ModelSizeStats readModelSizeStats(Request request) {
if (request.modelSizeStats()) {
Optional<ModelSizeStats> sizeStats = processManager.getModelSizeStats(request.getJobId());
return sizeStats.orElseGet(() -> jobProvider.modelSizeStats(request.getJobId()).orElse(null));
}
return null;
}
}
}

View File

@ -0,0 +1,228 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
import org.elasticsearch.action.support.master.MasterNodeReadRequest;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.manager.JobManager;
import org.elasticsearch.xpack.prelert.job.persistence.QueryPage;
import org.elasticsearch.xpack.prelert.job.results.PageParams;
import java.io.IOException;
import java.util.Objects;
public class GetJobsAction extends Action<GetJobsAction.Request, GetJobsAction.Response, GetJobsAction.RequestBuilder> {
public static final GetJobsAction INSTANCE = new GetJobsAction();
public static final String NAME = "cluster:admin/prelert/jobs/get";
private GetJobsAction() {
super(NAME);
}
@Override
public GetJobsAction.RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public GetJobsAction.Response newResponse() {
return new Response();
}
public static class Request extends MasterNodeReadRequest<Request> implements ToXContent {
public static final ObjectParser<Request, ParseFieldMatcherSupplier> PARSER = new ObjectParser<>(NAME, Request::new);
static {
PARSER.declareObject(Request::setPageParams, PageParams.PARSER, PageParams.PAGE);
}
private PageParams pageParams = new PageParams(0, 100);
public PageParams getPageParams() {
return pageParams;
}
public void setPageParams(PageParams pageParams) {
this.pageParams = pageParams;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
pageParams = new PageParams(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
pageParams.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(PageParams.PAGE.getPreferredName(), pageParams);
builder.endObject();
return builder;
}
@Override
public int hashCode() {
return Objects.hash(pageParams);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Request other = (Request) obj;
return Objects.equals(pageParams, other.pageParams);
}
}
public static class Response extends ActionResponse implements ToXContent {
private QueryPage<Job> jobs;
public Response(QueryPage<Job> jobs) {
this.jobs = jobs;
}
public Response() {}
public QueryPage<Job> getResponse() {
return jobs;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobs = new QueryPage<>(in, Job::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
jobs.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return jobs.doXContentBody(builder, params);
}
@Override
public int hashCode() {
return Objects.hash(jobs);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Response other = (Response) obj;
return Objects.equals(jobs, other.jobs);
}
@SuppressWarnings("deprecation")
@Override
public final String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.prettyPrint();
builder.startObject();
toXContent(builder, EMPTY_PARAMS);
builder.endObject();
return builder.string();
} catch (Exception e) {
// So we have a stack trace logged somewhere
return "{ \"error\" : \"" + org.elasticsearch.ExceptionsHelper.detailedMessage(e) + "\"}";
}
}
}
public static class RequestBuilder extends MasterNodeReadOperationRequestBuilder<Request, Response, RequestBuilder> {
public RequestBuilder(ElasticsearchClient client, GetJobsAction action) {
super(client, action, new Request());
}
}
public static class TransportAction extends TransportMasterNodeReadAction<Request, Response> {
private final JobManager jobManager;
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, JobManager jobManager) {
super(settings, GetJobsAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, Request::new);
this.jobManager = jobManager;
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected Response newResponse() {
return new Response();
}
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
QueryPage<Job> jobsPage = jobManager.getJobs(request.pageParams.getFrom(), request.pageParams.getSize(), state);
listener.onResponse(new Response(jobsPage));
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ);
}
}
}

View File

@ -0,0 +1,264 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.get.TransportGetAction;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
import org.elasticsearch.action.support.master.MasterNodeReadRequest;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.StatusToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.lists.ListDocument;
import org.elasticsearch.xpack.prelert.utils.SingleDocument;
import java.io.IOException;
import java.util.Objects;
import static org.elasticsearch.action.ValidateActions.addValidationError;
public class GetListAction extends Action<GetListAction.Request, GetListAction.Response, GetListAction.RequestBuilder> {
public static final GetListAction INSTANCE = new GetListAction();
public static final String NAME = "cluster:admin/prelert/list/get";
private GetListAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public Response newResponse() {
return new Response();
}
public static class Request extends MasterNodeReadRequest<Request> {
private String listId;
Request() {
}
public Request(String listId) {
this.listId = listId;
}
public String getListId() {
return listId;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (listId == null) {
validationException = addValidationError("List ID is required for GetList API.", validationException);
}
return validationException;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
listId = in.readOptionalString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeOptionalString(listId);
}
@Override
public int hashCode() {
return Objects.hash(listId);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Request other = (Request) obj;
return Objects.equals(listId, other.listId);
}
}
public static class RequestBuilder extends MasterNodeReadOperationRequestBuilder<Request, Response, RequestBuilder> {
public RequestBuilder(ElasticsearchClient client, GetListAction action) {
super(client, action, new Request());
}
}
public static class Response extends ActionResponse implements StatusToXContent {
private SingleDocument<ListDocument> response;
public Response(SingleDocument<ListDocument> document) {
this.response = document;
}
Response() {
}
public SingleDocument<ListDocument> getResponse() {
return response;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
response = new SingleDocument<>(in, ListDocument::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
response.writeTo(out);
}
@Override
public RestStatus status() {
return response.status();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return response.toXContent(builder, params);
}
@Override
public int hashCode() {
return Objects.hash(response);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Response other = (Response) obj;
return Objects.equals(response, other.response);
}
@SuppressWarnings("deprecation")
@Override
public final String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.prettyPrint();
builder.startObject();
toXContent(builder, EMPTY_PARAMS);
builder.endObject();
return builder.string();
} catch (Exception e) {
// So we have a stack trace logged somewhere
return "{ \"error\" : \"" + org.elasticsearch.ExceptionsHelper.detailedMessage(e) + "\"}";
}
}
}
public static class TransportAction extends TransportMasterNodeReadAction<Request, Response> {
private final TransportGetAction transportGetAction;
// TODO these need to be moved to a settings object later
// See #20
private static final String PRELERT_INFO_INDEX = "prelert-int";
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
TransportGetAction transportGetAction) {
super(settings, GetListAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, Request::new);
this.transportGetAction = transportGetAction;
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected Response newResponse() {
return new Response();
}
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
final String listId = request.getListId();
GetRequest getRequest = new GetRequest(PRELERT_INFO_INDEX, ListDocument.TYPE.getPreferredName(), listId);
transportGetAction.execute(getRequest, new ActionListener<GetResponse>() {
@Override
public void onResponse(GetResponse getDocResponse) {
try {
SingleDocument<ListDocument> responseBody;
if (getDocResponse.isExists()) {
BytesReference docSource = getDocResponse.getSourceAsBytesRef();
XContentParser parser = XContentFactory.xContent(docSource).createParser(docSource);
ListDocument listDocument = ListDocument.PARSER.apply(parser, () -> parseFieldMatcher);
responseBody = new SingleDocument<>(ListDocument.TYPE.getPreferredName(), listDocument);
} else {
responseBody = SingleDocument.empty(ListDocument.TYPE.getPreferredName());
}
Response listResponse = new Response(responseBody);
listener.onResponse(listResponse);
} catch (Exception e) {
this.onFailure(e);
}
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ);
}
}
}

View File

@ -0,0 +1,349 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.ModelSnapshot;
import org.elasticsearch.xpack.prelert.job.persistence.ElasticsearchJobProvider;
import org.elasticsearch.xpack.prelert.job.persistence.JobProvider;
import org.elasticsearch.xpack.prelert.job.persistence.QueryPage;
import org.elasticsearch.xpack.prelert.job.results.PageParams;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.Locale;
import java.util.Objects;
public class GetModelSnapshotsAction
extends Action<GetModelSnapshotsAction.Request, GetModelSnapshotsAction.Response, GetModelSnapshotsAction.RequestBuilder> {
public static final GetModelSnapshotsAction INSTANCE = new GetModelSnapshotsAction();
public static final String NAME = "cluster:admin/prelert/modelsnapshots/get";
private GetModelSnapshotsAction() {
super(NAME);
}
@Override
public GetModelSnapshotsAction.RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public GetModelSnapshotsAction.Response newResponse() {
return new Response();
}
public static class Request extends ActionRequest implements ToXContent {
public static final ParseField SORT = new ParseField("sort");
public static final ParseField DESCRIPTION = new ParseField("description");
public static final ParseField START = new ParseField("start");
public static final ParseField END = new ParseField("end");
public static final ParseField DESC = new ParseField("desc");
private static final ObjectParser<Request, ParseFieldMatcherSupplier> PARSER = new ObjectParser<>(NAME, Request::new);
static {
PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID);
PARSER.declareString(Request::setDescriptionString, DESCRIPTION);
PARSER.declareString(Request::setStart, START);
PARSER.declareString(Request::setEnd, END);
PARSER.declareString(Request::setSort, SORT);
PARSER.declareBoolean(Request::setDescOrder, DESC);
PARSER.declareObject(Request::setPageParams, PageParams.PARSER, PageParams.PAGE);
}
public static Request parseRequest(String jobId, XContentParser parser, ParseFieldMatcherSupplier parseFieldMatcherSupplier) {
Request request = PARSER.apply(parser, parseFieldMatcherSupplier);
if (jobId != null) {
request.jobId = jobId;
}
return request;
}
private String jobId;
private String sort;
private String description;
private String start;
private String end;
private boolean desc;
private PageParams pageParams = new PageParams(0, 100);
Request() {
}
public Request(String jobId) {
this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName());
}
public String getJobId() {
return jobId;
}
@Nullable
public String getSort() {
return sort;
}
public void setSort(String sort) {
this.sort = sort;
}
public boolean getDescOrder() {
return desc;
}
public void setDescOrder(boolean desc) {
this.desc = desc;
}
public PageParams getPageParams() {
return pageParams;
}
public void setPageParams(PageParams pageParams) {
this.pageParams = ExceptionsHelper.requireNonNull(pageParams, PageParams.PAGE.getPreferredName());
}
@Nullable
public String getStart() {
return start;
}
public void setStart(String start) {
this.start = ExceptionsHelper.requireNonNull(start, START.getPreferredName());
}
@Nullable
public String getEnd() {
return end;
}
public void setEnd(String end) {
this.end = ExceptionsHelper.requireNonNull(end, END.getPreferredName());
}
@Nullable
public String getDescriptionString() {
return description;
}
public void setDescriptionString(String description) {
this.description = ExceptionsHelper.requireNonNull(description, DESCRIPTION.getPreferredName());
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobId = in.readString();
sort = in.readOptionalString();
description = in.readOptionalString();
start = in.readOptionalString();
end = in.readOptionalString();
desc = in.readBoolean();
pageParams = new PageParams(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(jobId);
out.writeOptionalString(sort);
out.writeOptionalString(description);
out.writeOptionalString(start);
out.writeOptionalString(end);
out.writeBoolean(desc);
pageParams.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Job.ID.getPreferredName(), jobId);
if (description != null) {
builder.field(DESCRIPTION.getPreferredName(), description);
}
if (start != null) {
builder.field(START.getPreferredName(), start);
}
if (end != null) {
builder.field(END.getPreferredName(), end);
}
if (sort != null) {
builder.field(SORT.getPreferredName(), sort);
}
builder.field(DESC.getPreferredName(), desc);
builder.field(PageParams.PAGE.getPreferredName(), pageParams);
builder.endObject();
return builder;
}
@Override
public int hashCode() {
return Objects.hash(jobId, description, start, end, sort, desc);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Request other = (Request) obj;
return Objects.equals(jobId, other.jobId) && Objects.equals(description, other.description)
&& Objects.equals(start, other.start) && Objects.equals(end, other.end) && Objects.equals(sort, other.sort)
&& Objects.equals(desc, other.desc);
}
}
public static class Response extends ActionResponse implements ToXContent {
private QueryPage<ModelSnapshot> page;
public Response(QueryPage<ModelSnapshot> page) {
this.page = page;
}
Response() {
}
public QueryPage<ModelSnapshot> getPage() {
return page;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
page = new QueryPage<>(in, ModelSnapshot::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
page.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return page.doXContentBody(builder, params);
}
@Override
public int hashCode() {
return Objects.hash(page);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Response other = (Response) obj;
return Objects.equals(page, other.page);
}
@SuppressWarnings("deprecation")
@Override
public final String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.prettyPrint();
builder.startObject();
toXContent(builder, EMPTY_PARAMS);
builder.endObject();
return builder.string();
} catch (Exception e) {
// So we have a stack trace logged somewhere
return "{ \"error\" : \"" + org.elasticsearch.ExceptionsHelper.detailedMessage(e) + "\"}";
}
}
}
public static class RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder> {
public RequestBuilder(ElasticsearchClient client, GetModelSnapshotsAction action) {
super(client, action, new Request());
}
}
public static class TransportAction extends HandledTransportAction<Request, Response> {
private final JobProvider jobProvider;
@Inject
public TransportAction(Settings settings, TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, ElasticsearchJobProvider jobProvider) {
super(settings, NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, Request::new);
this.jobProvider = jobProvider;
}
@Override
protected void doExecute(Request request, ActionListener<Response> listener) {
logger.debug(String.format(Locale.ROOT,
"Get model snapshots for job %s. from = %d, size = %d"
+ " start = '%s', end='%s', sort=%s descending=%b, description filter=%s",
request.getJobId(), request.pageParams.getFrom(), request.pageParams.getSize(), request.getStart(), request.getEnd(),
request.getSort(), request.getDescOrder(), request.getDescriptionString()));
QueryPage<ModelSnapshot> page = doGetPage(jobProvider, request);
logger.debug(String.format(Locale.ROOT, "Return %d model snapshots for job %s", page.hitCount(), request.getJobId()));
listener.onResponse(new Response(page));
}
public static QueryPage<ModelSnapshot> doGetPage(JobProvider jobProvider, Request request) {
QueryPage<ModelSnapshot> page = jobProvider.modelSnapshots(request.getJobId(), request.pageParams.getFrom(),
request.pageParams.getSize(), request.getStart(), request.getEnd(), request.getSort(), request.getDescOrder(), null,
request.getDescriptionString());
// The quantiles can be large, and totally dominate the output -
// it's
// clearer to remove them
if (page.hits() != null) {
for (ModelSnapshot modelSnapshot : page.hits()) {
modelSnapshot.setQuantiles(null);
}
}
return page;
}
}
}

View File

@ -0,0 +1,373 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.persistence.ElasticsearchJobProvider;
import org.elasticsearch.xpack.prelert.job.persistence.JobProvider;
import org.elasticsearch.xpack.prelert.job.persistence.QueryPage;
import org.elasticsearch.xpack.prelert.job.persistence.RecordsQueryBuilder;
import org.elasticsearch.xpack.prelert.job.results.AnomalyRecord;
import org.elasticsearch.xpack.prelert.job.results.Influencer;
import org.elasticsearch.xpack.prelert.job.results.PageParams;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.Objects;
public class GetRecordsAction extends Action<GetRecordsAction.Request, GetRecordsAction.Response, GetRecordsAction.RequestBuilder> {
public static final GetRecordsAction INSTANCE = new GetRecordsAction();
public static final String NAME = "indices:admin/prelert/results/records/get";
private GetRecordsAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client);
}
@Override
public Response newResponse() {
return new Response();
}
public static class Request extends ActionRequest implements ToXContent {
public static final ParseField START = new ParseField("start");
public static final ParseField END = new ParseField("end");
public static final ParseField INCLUDE_INTERIM = new ParseField("includeInterim");
public static final ParseField ANOMALY_SCORE_FILTER = new ParseField("anomalyScore");
public static final ParseField SORT = new ParseField("sort");
public static final ParseField DESCENDING = new ParseField("desc");
public static final ParseField MAX_NORMALIZED_PROBABILITY = new ParseField("normalizedProbability");
public static final ParseField PARTITION_VALUE = new ParseField("partitionValue");
private static final ObjectParser<Request, ParseFieldMatcherSupplier> PARSER = new ObjectParser<>(NAME, Request::new);
static {
PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID);
PARSER.declareString((request, start) -> request.start = start, START);
PARSER.declareString((request, end) -> request.end = end, END);
PARSER.declareString(Request::setPartitionValue, PARTITION_VALUE);
PARSER.declareString(Request::setSort, SORT);
PARSER.declareBoolean(Request::setDecending, DESCENDING);
PARSER.declareBoolean(Request::setIncludeInterim, INCLUDE_INTERIM);
PARSER.declareObject(Request::setPageParams, PageParams.PARSER, PageParams.PAGE);
PARSER.declareDouble(Request::setAnomalyScore, ANOMALY_SCORE_FILTER);
PARSER.declareDouble(Request::setMaxNormalizedProbability, MAX_NORMALIZED_PROBABILITY);
}
public static Request parseRequest(String jobId, XContentParser parser, ParseFieldMatcherSupplier parseFieldMatcherSupplier) {
Request request = PARSER.apply(parser, parseFieldMatcherSupplier);
if (jobId != null) {
request.jobId = jobId;
}
return request;
}
private String jobId;
private String start;
private String end;
private boolean includeInterim = false;
private PageParams pageParams = new PageParams(0, 100);
private double anomalyScoreFilter = 0.0;
private String sort = Influencer.ANOMALY_SCORE.getPreferredName();
private boolean decending = false;
private double maxNormalizedProbability = 0.0;
private String partitionValue;
Request() {
}
public Request(String jobId, String start, String end) {
this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName());
this.start = ExceptionsHelper.requireNonNull(start, START.getPreferredName());
this.end = ExceptionsHelper.requireNonNull(end, END.getPreferredName());
}
public String getJobId() {
return jobId;
}
public String getStart() {
return start;
}
public String getEnd() {
return end;
}
public boolean isDecending() {
return decending;
}
public void setDecending(boolean decending) {
this.decending = decending;
}
public boolean isIncludeInterim() {
return includeInterim;
}
public void setIncludeInterim(boolean includeInterim) {
this.includeInterim = includeInterim;
}
public void setPageParams(PageParams pageParams) {
this.pageParams = pageParams;
}
public PageParams getPageParams() {
return pageParams;
}
public double getAnomalyScoreFilter() {
return anomalyScoreFilter;
}
public void setAnomalyScore(double anomalyScoreFilter) {
this.anomalyScoreFilter = anomalyScoreFilter;
}
public String getSort() {
return sort;
}
public void setSort(String sort) {
this.sort = ExceptionsHelper.requireNonNull(sort, SORT.getPreferredName());
}
public double getMaxNormalizedProbability() {
return maxNormalizedProbability;
}
public void setMaxNormalizedProbability(double maxNormalizedProbability) {
this.maxNormalizedProbability = maxNormalizedProbability;
}
public String getPartitionValue() {
return partitionValue;
}
public void setPartitionValue(String partitionValue) {
this.partitionValue = ExceptionsHelper.requireNonNull(partitionValue, PARTITION_VALUE.getPreferredName());
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobId = in.readString();
includeInterim = in.readBoolean();
pageParams = new PageParams(in);
start = in.readString();
end = in.readString();
sort = in.readOptionalString();
decending = in.readBoolean();
anomalyScoreFilter = in.readDouble();
maxNormalizedProbability = in.readDouble();
partitionValue = in.readOptionalString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(jobId);
out.writeBoolean(includeInterim);
pageParams.writeTo(out);
out.writeString(start);
out.writeString(end);
out.writeOptionalString(sort);
out.writeBoolean(decending);
out.writeDouble(anomalyScoreFilter);
out.writeDouble(maxNormalizedProbability);
out.writeOptionalString(partitionValue);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Job.ID.getPreferredName(), jobId);
if (start != null) {
builder.field(START.getPreferredName(), start);
}
if (end != null) {
builder.field(END.getPreferredName(), end);
}
builder.field(SORT.getPreferredName(), sort);
builder.field(DESCENDING.getPreferredName(), decending);
builder.field(ANOMALY_SCORE_FILTER.getPreferredName(), anomalyScoreFilter);
builder.field(INCLUDE_INTERIM.getPreferredName(), includeInterim);
builder.field(MAX_NORMALIZED_PROBABILITY.getPreferredName(), maxNormalizedProbability);
builder.field(PageParams.PAGE.getPreferredName(), pageParams);
if (partitionValue != null) {
builder.field(PARTITION_VALUE.getPreferredName(), partitionValue);
}
builder.endObject();
return builder;
}
@Override
public int hashCode() {
return Objects.hash(jobId, start, end, sort, decending, anomalyScoreFilter, includeInterim, maxNormalizedProbability,
pageParams, partitionValue);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Request other = (Request) obj;
return Objects.equals(jobId, other.jobId) &&
Objects.equals(start, other.start) &&
Objects.equals(end, other.end) &&
Objects.equals(sort, other.sort) &&
Objects.equals(decending, other.decending) &&
Objects.equals(anomalyScoreFilter, other.anomalyScoreFilter) &&
Objects.equals(includeInterim, other.includeInterim) &&
Objects.equals(maxNormalizedProbability, other.maxNormalizedProbability) &&
Objects.equals(pageParams, other.pageParams) &&
Objects.equals(partitionValue, other.partitionValue);
}
}
static class RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder> {
RequestBuilder(ElasticsearchClient client) {
super(client, INSTANCE, new Request());
}
}
public static class Response extends ActionResponse implements ToXContent {
private QueryPage<AnomalyRecord> records;
Response() {
}
Response(QueryPage<AnomalyRecord> records) {
this.records = records;
}
public QueryPage<AnomalyRecord> getRecords() {
return records;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
records = new QueryPage<>(in, AnomalyRecord::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
records.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return records.doXContentBody(builder, params);
}
@Override
public int hashCode() {
return Objects.hash(records);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Response other = (Response) obj;
return Objects.equals(records, other.records);
}
@SuppressWarnings("deprecation")
@Override
public final String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.prettyPrint();
builder.startObject();
toXContent(builder, EMPTY_PARAMS);
builder.endObject();
return builder.string();
} catch (Exception e) {
// So we have a stack trace logged somewhere
return "{ \"error\" : \"" + org.elasticsearch.ExceptionsHelper.detailedMessage(e) + "\"}";
}
}
}
public static class TransportAction extends HandledTransportAction<Request, Response> {
private final JobProvider jobProvider;
@Inject
public TransportAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
ElasticsearchJobProvider jobProvider) {
super(settings, NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, Request::new);
this.jobProvider = jobProvider;
}
@Override
protected void doExecute(Request request, ActionListener<Response> listener) {
RecordsQueryBuilder.RecordsQuery query = new RecordsQueryBuilder()
.includeInterim(request.includeInterim)
.epochStart(request.start)
.epochEnd(request.end)
.from(request.pageParams.getFrom())
.size(request.pageParams.getSize())
.anomalyScoreThreshold(request.anomalyScoreFilter)
.sortField(request.sort)
.sortDescending(request.decending)
.build();
QueryPage<AnomalyRecord> page = jobProvider.records(request.jobId, query);
listener.onResponse(new Response(page));
}
}
}

View File

@ -0,0 +1,169 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.manager.JobManager;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.Objects;
public class PauseJobAction extends Action<PauseJobAction.Request, PauseJobAction.Response, PauseJobAction.RequestBuilder> {
public static final PauseJobAction INSTANCE = new PauseJobAction();
public static final String NAME = "cluster:admin/prelert/job/pause";
private PauseJobAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public Response newResponse() {
return new Response();
}
public static class Request extends AcknowledgedRequest<Request> {
private String jobId;
public Request(String jobId) {
this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName());
}
Request() {}
public String getJobId() {
return jobId;
}
public void setJobId(String jobId) {
this.jobId = jobId;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobId = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(jobId);
}
@Override
public int hashCode() {
return Objects.hash(jobId);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || obj.getClass() != getClass()) {
return false;
}
Request other = (Request) obj;
return Objects.equals(jobId, other.jobId);
}
}
static class RequestBuilder extends MasterNodeOperationRequestBuilder<Request, Response, RequestBuilder> {
public RequestBuilder(ElasticsearchClient client, PauseJobAction action) {
super(client, action, new Request());
}
}
public static class Response extends AcknowledgedResponse {
public Response(boolean acknowledged) {
super(acknowledged);
}
private Response() {}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}
public static class TransportAction extends TransportMasterNodeAction<Request, Response> {
private final JobManager jobManager;
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
JobManager jobManager) {
super(settings, PauseJobAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, Request::new);
this.jobManager = jobManager;
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected Response newResponse() {
return new Response();
}
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
logger.info("Pausing job " + request.getJobId());
jobManager.pauseJob(request, listener);
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
}
}
}

View File

@ -0,0 +1,253 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.StatusToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.PrelertPlugin;
import org.elasticsearch.xpack.prelert.job.DataCounts;
import org.elasticsearch.xpack.prelert.job.manager.AutodetectProcessManager;
import org.elasticsearch.xpack.prelert.job.process.autodetect.params.DataLoadParams;
import org.elasticsearch.xpack.prelert.job.process.autodetect.params.TimeRange;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.Objects;
public class PostDataAction extends Action<PostDataAction.Request, PostDataAction.Response, PostDataAction.RequestBuilder> {
public static final PostDataAction INSTANCE = new PostDataAction();
public static final String NAME = "cluster:admin/prelert/data/post";
private PostDataAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public Response newResponse() {
return null;
}
static class RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder> {
RequestBuilder(ElasticsearchClient client, PostDataAction action) {
super(client, action, new Request());
}
}
public static class Response extends ActionResponse implements StatusToXContent {
private DataCounts dataCounts;
Response(String jobId) {
dataCounts = new DataCounts(jobId);
}
public Response(DataCounts counts) {
this.dataCounts = counts;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
dataCounts = new DataCounts(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
dataCounts.writeTo(out);
}
@Override
public RestStatus status() {
return RestStatus.ACCEPTED;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return dataCounts.doXContentBody(builder, params);
}
@Override
public int hashCode() {
return Objects.hashCode(dataCounts);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Response other = (Response) obj;
return Objects.equals(dataCounts, other.dataCounts);
}
}
public static class Request extends ActionRequest {
public static final ParseField IGNORE_DOWNTIME = new ParseField("ignoreDowntime");
public static final ParseField RESET_START = new ParseField("resetStart");
public static final ParseField RESET_END = new ParseField("resetEnd");
private String jobId;
private boolean ignoreDowntime = false;
private String resetStart;
private String resetEnd;
private BytesReference content;
Request() {
}
public Request(String jobId) {
ExceptionsHelper.requireNonNull(jobId, "jobId");
this.jobId = jobId;
}
public String getJobId() {
return jobId;
}
public boolean isIgnoreDowntime() {
return ignoreDowntime;
}
public void setIgnoreDowntime(boolean ignoreDowntime) {
this.ignoreDowntime = ignoreDowntime;
}
public String getResetStart() {
return resetStart;
}
public void setResetStart(String resetStart) {
this.resetStart = resetStart;
}
public String getResetEnd() {
return resetEnd;
}
public void setResetEnd(String resetEnd) {
this.resetEnd = resetEnd;
}
public BytesReference getContent() { return content; }
public void setContent(BytesReference content) {
this.content = content;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobId = in.readString();
ignoreDowntime = in.readBoolean();
resetStart = in.readOptionalString();
resetEnd = in.readOptionalString();
content = in.readBytesReference();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(jobId);
out.writeBoolean(ignoreDowntime);
out.writeOptionalString(resetStart);
out.writeOptionalString(resetEnd);
out.writeBytesReference(content);
}
@Override
public int hashCode() {
// content stream not included
return Objects.hash(jobId, ignoreDowntime, resetStart, resetEnd);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Request other = (Request) obj;
// content stream not included
return Objects.equals(jobId, other.jobId) &&
Objects.equals(ignoreDowntime, other.ignoreDowntime) &&
Objects.equals(resetStart, other.resetStart) &&
Objects.equals(resetEnd, other.resetEnd);
}
}
public static class TransportAction extends HandledTransportAction<Request, Response> {
private final AutodetectProcessManager processManager;
@Inject
public TransportAction(Settings settings, TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, AutodetectProcessManager processManager) {
super(settings, PostDataAction.NAME, false, threadPool, transportService, actionFilters,
indexNameExpressionResolver, Request::new);
this.processManager = processManager;
}
@Override
protected final void doExecute(Request request, ActionListener<Response> listener) {
TimeRange timeRange = TimeRange.builder().startTime(request.getResetStart()).endTime(request.getResetEnd()).build();
DataLoadParams params = new DataLoadParams(timeRange, request.isIgnoreDowntime());
// NORELEASE Make this all async so we don't need to pass off to another thread pool and block
threadPool.executor(PrelertPlugin.THREAD_POOL_NAME).execute(() -> {
try {
DataCounts dataCounts = processManager.processData(request.getJobId(), request.content.streamInput(), params);
listener.onResponse(new Response(dataCounts));
} catch (IOException | ElasticsearchException e) {
listener.onFailure(e);
}
});
}
}
}

View File

@ -0,0 +1,149 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.manager.AutodetectProcessManager;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.Objects;
public class PostDataCloseAction extends Action<PostDataCloseAction.Request, PostDataCloseAction.Response,
PostDataCloseAction.RequestBuilder> {
public static final PostDataCloseAction INSTANCE = new PostDataCloseAction();
public static final String NAME = "cluster:admin/prelert/data/post/close";
private PostDataCloseAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public Response newResponse() {
return new Response();
}
public static class Request extends ActionRequest {
private String jobId;
Request() {}
public Request(String jobId) {
this.jobId = ExceptionsHelper.requireNonNull(jobId, "jobId");
}
public String getJobId() {
return jobId;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobId = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(jobId);
}
@Override
public int hashCode() {
return Objects.hash(jobId);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || obj.getClass() != getClass()) {
return false;
}
Request other = (Request) obj;
return Objects.equals(jobId, other.jobId);
}
}
static class RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder> {
public RequestBuilder(ElasticsearchClient client, PostDataCloseAction action) {
super(client, action, new Request());
}
}
public static class Response extends AcknowledgedResponse {
private Response() {
}
private Response(boolean acknowledged) {
super(acknowledged);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}
// NORELEASE This should be a master node operation that updates the job's state
public static class TransportAction extends HandledTransportAction<Request, Response> {
private final AutodetectProcessManager processManager;
@Inject
public TransportAction(Settings settings, TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, AutodetectProcessManager processManager) {
super(settings, PostDataCloseAction.NAME, false, threadPool, transportService, actionFilters,
indexNameExpressionResolver, Request::new);
this.processManager = processManager;
}
@Override
protected final void doExecute(Request request, ActionListener<Response> listener) {
processManager.closeJob(request.getJobId());
listener.onResponse(new Response(true));
}
}
}

View File

@ -0,0 +1,255 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.action.support.master.MasterNodeRequest;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.manager.AutodetectProcessManager;
import org.elasticsearch.xpack.prelert.job.process.autodetect.params.InterimResultsParams;
import org.elasticsearch.xpack.prelert.job.process.autodetect.params.TimeRange;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.Objects;
public class PostDataFlushAction extends Action<PostDataFlushAction.Request, PostDataFlushAction.Response,
PostDataFlushAction.RequestBuilder> {
public static final PostDataFlushAction INSTANCE = new PostDataFlushAction();
public static final String NAME = "cluster:admin/prelert/data/post/flush";
private PostDataFlushAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public Response newResponse() {
return new Response();
}
public static class Request extends MasterNodeRequest<Request> implements ToXContent {
public static final ParseField CALC_INTERIM = new ParseField("calcInterim");
public static final ParseField START = new ParseField("start");
public static final ParseField END = new ParseField("end");
public static final ParseField ADVANCE_TIME = new ParseField("advanceTime");
private static final ObjectParser<Request, ParseFieldMatcherSupplier> PARSER = new ObjectParser<>(NAME, Request::new);
static {
PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID);
PARSER.declareBoolean(Request::setCalcInterim, CALC_INTERIM);
PARSER.declareString(Request::setStart, START);
PARSER.declareString(Request::setEnd, END);
PARSER.declareString(Request::setAdvanceTime, ADVANCE_TIME);
}
public static Request parseRequest(String jobId, XContentParser parser, ParseFieldMatcherSupplier parseFieldMatcherSupplier) {
Request request = PARSER.apply(parser, parseFieldMatcherSupplier);
if (jobId != null) {
request.jobId = jobId;
}
return request;
}
private String jobId;
private boolean calcInterim = false;
private String start;
private String end;
private String advanceTime;
Request() {
}
public Request(String jobId) {
this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName());
}
public String getJobId() {
return jobId;
}
public boolean getCalcInterim() {
return calcInterim;
}
public void setCalcInterim(boolean calcInterim) {
this.calcInterim = calcInterim;
}
public String getStart() {
return start;
}
public void setStart(String start) {
this.start = start;
}
public String getEnd() {
return end;
}
public void setEnd(String end) {
this.end = end;
}
public String getAdvanceTime() { return advanceTime; }
public void setAdvanceTime(String advanceTime) {
this.advanceTime = advanceTime;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobId = in.readString();
calcInterim = in.readBoolean();
start = in.readOptionalString();
end = in.readOptionalString();
advanceTime = in.readOptionalString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(jobId);
out.writeBoolean(calcInterim);
out.writeOptionalString(start);
out.writeOptionalString(end);
out.writeOptionalString(advanceTime);
}
@Override
public int hashCode() {
return Objects.hash(jobId, calcInterim, start, end, advanceTime);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
Request other = (Request) obj;
return Objects.equals(jobId, other.jobId) &&
calcInterim == other.calcInterim &&
Objects.equals(start, other.start) &&
Objects.equals(end, other.end) &&
Objects.equals(advanceTime, other.advanceTime);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Job.ID.getPreferredName(), jobId);
builder.field(CALC_INTERIM.getPreferredName(), calcInterim);
if (start != null) {
builder.field(START.getPreferredName(), start);
}
if (end != null) {
builder.field(END.getPreferredName(), end);
}
if (advanceTime != null) {
builder.field(ADVANCE_TIME.getPreferredName(), advanceTime);
}
builder.endObject();
return builder;
}
}
static class RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder> {
public RequestBuilder(ElasticsearchClient client, PostDataFlushAction action) {
super(client, action, new Request());
}
}
public static class Response extends AcknowledgedResponse {
private Response() {
}
private Response(boolean acknowledged) {
super(acknowledged);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}
public static class TransportAction extends HandledTransportAction<Request, Response> {
// NORELEASE This should be a master node operation that updates the job's state
private final AutodetectProcessManager processManager;
@Inject
public TransportAction(Settings settings, TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, AutodetectProcessManager processManager) {
super(settings, PostDataFlushAction.NAME, false, threadPool, transportService, actionFilters,
indexNameExpressionResolver, PostDataFlushAction.Request::new);
this.processManager = processManager;
}
@Override
protected final void doExecute(PostDataFlushAction.Request request, ActionListener<PostDataFlushAction.Response> listener) {
TimeRange timeRange = TimeRange.builder().startTime(request.getStart()).endTime(request.getEnd()).build();
InterimResultsParams params = InterimResultsParams.builder()
.calcInterim(request.getCalcInterim())
.forTimeRange(timeRange)
.advanceTime(request.getAdvanceTime())
.build();
processManager.flushJob(request.getJobId(), params);
listener.onResponse(new Response(true));
}
}
}

View File

@ -0,0 +1,232 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.manager.JobManager;
import java.io.IOException;
import java.util.Objects;
public class PutJobAction extends Action<PutJobAction.Request, PutJobAction.Response, PutJobAction.RequestBuilder> {
public static final PutJobAction INSTANCE = new PutJobAction();
public static final String NAME = "cluster:admin/prelert/job/put";
private PutJobAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public Response newResponse() {
return new Response();
}
public static class Request extends AcknowledgedRequest<Request> implements ToXContent {
public static Request parseRequest(XContentParser parser, ParseFieldMatcherSupplier matcherSupplier) {
Job job = Job.PARSER.apply(parser, matcherSupplier).build(true);
return new Request(job);
}
private Job job;
private boolean overwrite;
public Request(Job job) {
this.job = job;
}
Request() {
}
public Job getJob() {
return job;
}
public boolean isOverwrite() {
return overwrite;
}
public void setOverwrite(boolean overwrite) {
this.overwrite = overwrite;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
job = new Job(in);
overwrite = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
job.writeTo(out);
out.writeBoolean(overwrite);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
job.toXContent(builder, params);
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Request request = (Request) o;
return overwrite == request.overwrite &&
Objects.equals(job, request.job);
}
@Override
public int hashCode() {
return Objects.hash(job, overwrite);
}
@SuppressWarnings("deprecation")
@Override
public final String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.prettyPrint();
toXContent(builder, EMPTY_PARAMS);
return builder.string();
} catch (Exception e) {
// So we have a stack trace logged somewhere
return "{ \"error\" : \"" + org.elasticsearch.ExceptionsHelper.detailedMessage(e) + "\"}";
}
}
}
public static class RequestBuilder extends MasterNodeOperationRequestBuilder<Request, Response, RequestBuilder> {
public RequestBuilder(ElasticsearchClient client, PutJobAction action) {
super(client, action, new Request());
}
}
public static class Response extends AcknowledgedResponse implements ToXContent {
private Job job;
public Response(boolean acked, Job job) {
super(acked);
this.job = job;
}
Response() {
}
public Job getResponse() {
return job;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
job = new Job(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
job.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
// Don't serialize acknowledged because current api directly serializes the job details
job.doXContentBody(builder, params);
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Response response = (Response) o;
return Objects.equals(job, response.job);
}
@Override
public int hashCode() {
return Objects.hash(job);
}
}
public static class TransportAction extends TransportMasterNodeAction<Request, Response> {
private final JobManager jobManager;
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, JobManager jobManager) {
super(settings, PutJobAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, Request::new);
this.jobManager = jobManager;
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected Response newResponse() {
return new Response();
}
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
jobManager.putJob(request, listener);
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
}
}
}

View File

@ -0,0 +1,206 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.index.TransportIndexAction;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
import org.elasticsearch.action.support.master.MasterNodeReadRequest;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.lists.ListDocument;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.Objects;
public class PutListAction extends Action<PutListAction.Request, PutListAction.Response, PutListAction.RequestBuilder> {
public static final PutListAction INSTANCE = new PutListAction();
public static final String NAME = "cluster:admin/prelert/list/put";
private PutListAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public Response newResponse() {
return new Response();
}
public static class Request extends MasterNodeReadRequest<Request> implements ToXContent {
public static Request parseRequest(XContentParser parser, ParseFieldMatcherSupplier matcherSupplier) {
ListDocument listDocument = ListDocument.PARSER.apply(parser, matcherSupplier);
return new Request(listDocument);
}
private ListDocument listDocument;
Request() {
}
public Request(ListDocument listDocument) {
this.listDocument = ExceptionsHelper.requireNonNull(listDocument, "listDocument");
}
public ListDocument getListDocument() {
return this.listDocument;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
listDocument = new ListDocument(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
listDocument.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
listDocument.toXContent(builder, params);
return builder;
}
@Override
public int hashCode() {
return Objects.hash(listDocument);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Request other = (Request) obj;
return Objects.equals(listDocument, other.listDocument);
}
}
public static class RequestBuilder extends MasterNodeOperationRequestBuilder<Request, Response, RequestBuilder> {
public RequestBuilder(ElasticsearchClient client, PutListAction action) {
super(client, action, new Request());
}
}
public static class Response extends AcknowledgedResponse {
public Response() {
super(true);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}
// extends TransportMasterNodeAction, because we will store in cluster state.
public static class TransportAction extends TransportMasterNodeAction<Request, Response> {
private final TransportIndexAction transportIndexAction;
// TODO these need to be moved to a settings object later. See #20
private static final String PRELERT_INFO_INDEX = "prelert-int";
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
TransportIndexAction transportIndexAction) {
super(settings, PutListAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, Request::new);
this.transportIndexAction = transportIndexAction;
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected Response newResponse() {
return new Response();
}
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
ListDocument listDocument = request.getListDocument();
final String listId = listDocument.getId();
IndexRequest indexRequest = new IndexRequest(PRELERT_INFO_INDEX, ListDocument.TYPE.getPreferredName(), listId);
XContentBuilder builder = XContentFactory.jsonBuilder();
indexRequest.source(listDocument.toXContent(builder, ToXContent.EMPTY_PARAMS));
transportIndexAction.execute(indexRequest, new ActionListener<IndexResponse>() {
@Override
public void onResponse(IndexResponse indexResponse) {
listener.onResponse(new Response());
}
@Override
public void onFailure(Exception e) {
logger.error("Could not create list with ID [" + listId + "]", e);
throw new ResourceNotFoundException("Could not create list with ID [" + listId + "]", e);
}
});
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
}
}
}

View File

@ -0,0 +1,302 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.StatusToXContent;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.ModelSnapshot;
import org.elasticsearch.xpack.prelert.job.messages.Messages;
import org.elasticsearch.xpack.prelert.job.persistence.ElasticsearchJobProvider;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import org.elasticsearch.xpack.prelert.utils.SingleDocument;
import java.io.IOException;
import java.util.List;
import java.util.Objects;
public class PutModelSnapshotDescriptionAction extends
Action<PutModelSnapshotDescriptionAction.Request, PutModelSnapshotDescriptionAction.Response,
PutModelSnapshotDescriptionAction.RequestBuilder> {
public static final PutModelSnapshotDescriptionAction INSTANCE = new PutModelSnapshotDescriptionAction();
public static final String NAME = "cluster:admin/prelert/modelsnapshot/put/description";
private PutModelSnapshotDescriptionAction() {
super(NAME);
}
@Override
public PutModelSnapshotDescriptionAction.RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public PutModelSnapshotDescriptionAction.Response newResponse() {
return new Response();
}
public static class Request extends ActionRequest implements ToXContent {
private static final ObjectParser<Request, ParseFieldMatcherSupplier> PARSER = new ObjectParser<>(NAME, Request::new);
static {
PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID);
PARSER.declareString((request, snapshotId) -> request.snapshotId = snapshotId, ModelSnapshot.SNAPSHOT_ID);
PARSER.declareString((request, description) -> request.description = description, ModelSnapshot.DESCRIPTION);
}
public static Request parseRequest(String jobId, String snapshotId, XContentParser parser,
ParseFieldMatcherSupplier parseFieldMatcherSupplier) {
Request request = PARSER.apply(parser, parseFieldMatcherSupplier);
if (jobId != null) {
request.jobId = jobId;
}
if (snapshotId != null) {
request.snapshotId = snapshotId;
}
return request;
}
private String jobId;
private String snapshotId;
private String description;
Request() {
}
public Request(String jobId, String snapshotId, String description) {
this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName());
this.snapshotId = ExceptionsHelper.requireNonNull(snapshotId, ModelSnapshot.SNAPSHOT_ID.getPreferredName());
this.description = ExceptionsHelper.requireNonNull(description, ModelSnapshot.DESCRIPTION.getPreferredName());
}
public String getJobId() {
return jobId;
}
public String getSnapshotId() {
return snapshotId;
}
public String getDescriptionString() {
return description;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobId = in.readString();
snapshotId = in.readString();
description = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(jobId);
out.writeString(snapshotId);
out.writeString(description);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Job.ID.getPreferredName(), jobId);
builder.field(ModelSnapshot.SNAPSHOT_ID.getPreferredName(), snapshotId);
builder.field(ModelSnapshot.DESCRIPTION.getPreferredName(), description);
builder.endObject();
return builder;
}
@Override
public int hashCode() {
return Objects.hash(jobId, snapshotId, description);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Request other = (Request) obj;
return Objects.equals(jobId, other.jobId) && Objects.equals(snapshotId, other.snapshotId)
&& Objects.equals(description, other.description);
}
}
public static class Response extends ActionResponse implements StatusToXContent {
private SingleDocument<ModelSnapshot> response;
public Response() {
response = SingleDocument.empty(ModelSnapshot.TYPE.getPreferredName());
}
public Response(ModelSnapshot modelSnapshot) {
response = new SingleDocument<>(ModelSnapshot.TYPE.getPreferredName(), modelSnapshot);
}
public SingleDocument<ModelSnapshot> getResponse() {
return response;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
response = new SingleDocument<>(in, ModelSnapshot::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
response.writeTo(out);
}
@Override
public RestStatus status() {
return response.status();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return response.toXContent(builder, params);
}
@Override
public int hashCode() {
return Objects.hash(response);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Response other = (Response) obj;
return Objects.equals(response, other.response);
}
@SuppressWarnings("deprecation")
@Override
public final String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.prettyPrint();
builder.startObject();
toXContent(builder, EMPTY_PARAMS);
builder.endObject();
return builder.string();
} catch (Exception e) {
// So we have a stack trace logged somewhere
return "{ \"error\" : \"" + org.elasticsearch.ExceptionsHelper.detailedMessage(e) + "\"}";
}
}
}
public static class RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder> {
public RequestBuilder(ElasticsearchClient client, PutModelSnapshotDescriptionAction action) {
super(client, action, new Request());
}
}
public static class TransportAction extends HandledTransportAction<Request, Response> {
private final ElasticsearchJobProvider jobProvider;
@Inject
public TransportAction(Settings settings, TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, ElasticsearchJobProvider jobProvider) {
super(settings, NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, Request::new);
this.jobProvider = jobProvider;
}
@Override
protected void doExecute(Request request, ActionListener<Response> listener) {
logger.debug("Received request to change model snapshot description using '" + request.getDescriptionString()
+ "' for snapshot ID '" + request.getSnapshotId() + "' for job '" + request.getJobId() + "'");
List<ModelSnapshot> changeCandidates = getChangeCandidates(request);
checkForClashes(request);
if (changeCandidates.size() > 1) {
logger.warn("More than one model found for [jobId: " + request.getJobId() + ", snapshotId: " + request.getSnapshotId()
+ "] tuple.");
}
ModelSnapshot modelSnapshot = changeCandidates.get(0);
modelSnapshot.setDescription(request.getDescriptionString());
jobProvider.updateModelSnapshot(request.getJobId(), modelSnapshot, false);
modelSnapshot.setDescription(request.getDescriptionString());
// The quantiles can be large, and totally dominate the output -
// it's
// clearer to remove them
modelSnapshot.setQuantiles(null);
listener.onResponse(new Response(modelSnapshot));
}
private List<ModelSnapshot> getChangeCandidates(Request request) {
List<ModelSnapshot> changeCandidates = getModelSnapshots(request.getJobId(), request.getSnapshotId(), null);
if (changeCandidates == null || changeCandidates.isEmpty()) {
throw new ResourceNotFoundException(Messages.getMessage(Messages.REST_NO_SUCH_MODEL_SNAPSHOT, request.getJobId()));
}
return changeCandidates;
}
private void checkForClashes(Request request) {
List<ModelSnapshot> clashCandidates = getModelSnapshots(request.getJobId(), null, request.getDescriptionString());
if (clashCandidates != null && !clashCandidates.isEmpty()) {
throw new IllegalArgumentException(Messages.getMessage(
Messages.REST_DESCRIPTION_ALREADY_USED, request.getDescriptionString(), request.getJobId()));
}
}
private List<ModelSnapshot> getModelSnapshots(String jobId, String snapshotId, String description) {
return jobProvider.modelSnapshots(jobId, 0, 1, null, null, null, true, snapshotId, description).hits();
}
}
}

View File

@ -0,0 +1,169 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.manager.JobManager;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.Objects;
public class ResumeJobAction extends Action<ResumeJobAction.Request, ResumeJobAction.Response, ResumeJobAction.RequestBuilder> {
public static final ResumeJobAction INSTANCE = new ResumeJobAction();
public static final String NAME = "cluster:admin/prelert/job/resume";
private ResumeJobAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public Response newResponse() {
return new Response();
}
public static class Request extends AcknowledgedRequest<Request> {
private String jobId;
public Request(String jobId) {
this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName());
}
Request() {}
public String getJobId() {
return jobId;
}
public void setJobId(String jobId) {
this.jobId = jobId;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobId = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(jobId);
}
@Override
public int hashCode() {
return Objects.hash(jobId);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || obj.getClass() != getClass()) {
return false;
}
ResumeJobAction.Request other = (ResumeJobAction.Request) obj;
return Objects.equals(jobId, other.jobId);
}
}
static class RequestBuilder extends MasterNodeOperationRequestBuilder<Request, Response, RequestBuilder> {
public RequestBuilder(ElasticsearchClient client, ResumeJobAction action) {
super(client, action, new Request());
}
}
public static class Response extends AcknowledgedResponse {
public Response(boolean acknowledged) {
super(acknowledged);
}
private Response() {}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}
public static class TransportAction extends TransportMasterNodeAction<Request, Response> {
private final JobManager jobManager;
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
JobManager jobManager) {
super(settings, ResumeJobAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, Request::new);
this.jobManager = jobManager;
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected Response newResponse() {
return new Response();
}
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
logger.info("Resuming job " + request.getJobId());
jobManager.resumeJob(request, listener);
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
}
}
}

View File

@ -0,0 +1,413 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.StatusToXContent;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.JobStatus;
import org.elasticsearch.xpack.prelert.job.ModelSnapshot;
import org.elasticsearch.xpack.prelert.job.manager.JobManager;
import org.elasticsearch.xpack.prelert.job.messages.Messages;
import org.elasticsearch.xpack.prelert.job.metadata.Allocation;
import org.elasticsearch.xpack.prelert.job.persistence.ElasticsearchBulkDeleterFactory;
import org.elasticsearch.xpack.prelert.job.persistence.ElasticsearchJobProvider;
import org.elasticsearch.xpack.prelert.job.persistence.JobProvider;
import org.elasticsearch.xpack.prelert.job.persistence.OldDataRemover;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import org.elasticsearch.xpack.prelert.utils.SingleDocument;
import java.io.IOException;
import java.util.Date;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import static org.elasticsearch.action.ValidateActions.addValidationError;
public class RevertModelSnapshotAction
extends Action<RevertModelSnapshotAction.Request, RevertModelSnapshotAction.Response, RevertModelSnapshotAction.RequestBuilder> {
public static final RevertModelSnapshotAction INSTANCE = new RevertModelSnapshotAction();
public static final String NAME = "indices:admin/prelert/modelsnapshots/revert";
private RevertModelSnapshotAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client);
}
@Override
public Response newResponse() {
return new Response();
}
public static class Request extends AcknowledgedRequest<Request> implements ToXContent {
public static final ParseField TIME = new ParseField("time");
public static final ParseField SNAPSHOT_ID = new ParseField("snapshotId");
public static final ParseField DESCRIPTION = new ParseField("description");
public static final ParseField DELETE_INTERVENING = new ParseField("deleteInterveningResults");
private static ObjectParser<Request, ParseFieldMatcherSupplier> PARSER = new ObjectParser<>(NAME, Request::new);
static {
PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID);
PARSER.declareString(Request::setTime, TIME);
PARSER.declareString(Request::setSnapshotId, SNAPSHOT_ID);
PARSER.declareString(Request::setDescription, DESCRIPTION);
PARSER.declareBoolean(Request::setDeleteInterveningResults, DELETE_INTERVENING);
}
public static Request parseRequest(String jobId, XContentParser parser, ParseFieldMatcherSupplier parseFieldMatcherSupplier) {
Request request = PARSER.apply(parser, parseFieldMatcherSupplier);
if (jobId != null) {
request.jobId = jobId;
}
return request;
}
private String jobId;
private String time;
private String snapshotId;
private String description;
private boolean deleteInterveningResults;
Request() {
}
public Request(String jobId) {
this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName());
}
public String getJobId() {
return jobId;
}
public String getTime() {
return time;
}
public void setTime(String time) {
this.time = time;
}
public String getSnapshotId() {
return snapshotId;
}
public void setSnapshotId(String snapshotId) {
this.snapshotId = snapshotId;
}
@Override
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public boolean getDeleteInterveningResults() {
return deleteInterveningResults;
}
public void setDeleteInterveningResults(boolean deleteInterveningResults) {
this.deleteInterveningResults = deleteInterveningResults;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (time == null && snapshotId == null && description == null) {
validationException = addValidationError(Messages.getMessage(Messages.REST_INVALID_REVERT_PARAMS), validationException);
}
return validationException;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobId = in.readString();
time = in.readOptionalString();
snapshotId = in.readOptionalString();
description = in.readOptionalString();
deleteInterveningResults = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(jobId);
out.writeOptionalString(time);
out.writeOptionalString(snapshotId);
out.writeOptionalString(description);
out.writeBoolean(deleteInterveningResults);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Job.ID.getPreferredName(), jobId);
if (time != null) {
builder.field(TIME.getPreferredName(), time);
}
if (snapshotId != null) {
builder.field(SNAPSHOT_ID.getPreferredName(), snapshotId);
}
if (description != null) {
builder.field(DESCRIPTION.getPreferredName(), description);
}
builder.field(DELETE_INTERVENING.getPreferredName(), deleteInterveningResults);
builder.endObject();
return builder;
}
@Override
public int hashCode() {
return Objects.hash(jobId, time, snapshotId, description, deleteInterveningResults);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Request other = (Request) obj;
return Objects.equals(jobId, other.jobId) && Objects.equals(time, other.time) && Objects.equals(snapshotId, other.snapshotId)
&& Objects.equals(description, other.description)
&& Objects.equals(deleteInterveningResults, other.deleteInterveningResults);
}
}
static class RequestBuilder extends MasterNodeOperationRequestBuilder<Request, Response, RequestBuilder> {
RequestBuilder(ElasticsearchClient client) {
super(client, INSTANCE, new Request());
}
}
public static class Response extends AcknowledgedResponse implements StatusToXContent {
private SingleDocument<ModelSnapshot> response;
public Response() {
super(false);
response = SingleDocument.empty(ModelSnapshot.TYPE.getPreferredName());
}
public Response(ModelSnapshot modelSnapshot) {
super(true);
response = new SingleDocument<>(ModelSnapshot.TYPE.getPreferredName(), modelSnapshot);
}
public SingleDocument<ModelSnapshot> getResponse() {
return response;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
response = new SingleDocument<>(in, ModelSnapshot::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
response.writeTo(out);
}
@Override
public RestStatus status() {
return response.status();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return response.toXContent(builder, params);
}
@Override
public int hashCode() {
return Objects.hash(response);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Response other = (Response) obj;
return Objects.equals(response, other.response);
}
@SuppressWarnings("deprecation")
@Override
public final String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.prettyPrint();
builder.startObject();
toXContent(builder, EMPTY_PARAMS);
builder.endObject();
return builder.string();
} catch (Exception e) {
// So we have a stack trace logged somewhere
return "{ \"error\" : \"" + org.elasticsearch.ExceptionsHelper.detailedMessage(e) + "\"}";
}
}
}
public static class TransportAction extends TransportMasterNodeAction<Request, Response> {
private final JobManager jobManager;
private final JobProvider jobProvider;
private final ElasticsearchBulkDeleterFactory bulkDeleterFactory;
@Inject
public TransportAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, JobManager jobManager, ElasticsearchJobProvider jobProvider,
ClusterService clusterService, ElasticsearchBulkDeleterFactory bulkDeleterFactory) {
super(settings, NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, Request::new);
this.jobManager = jobManager;
this.jobProvider = jobProvider;
this.bulkDeleterFactory = bulkDeleterFactory;
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected Response newResponse() {
return new Response();
}
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
logger.debug("Received request to revert to time '" + request.getTime() + "' description '" + request.getDescription()
+ "' snapshot id '" + request.getSnapshotId() + "' for job '" + request.getJobId() + "', deleting intervening "
+ " results: " + request.getDeleteInterveningResults());
if (request.getTime() == null && request.getSnapshotId() == null && request.getDescription() == null) {
throw new IllegalStateException(Messages.getMessage(Messages.REST_INVALID_REVERT_PARAMS));
}
Optional<Job> job = jobManager.getJob(request.getJobId(), clusterService.state());
Allocation allocation = jobManager.getJobAllocation(request.getJobId());
if (job.isPresent() && allocation.getStatus().equals(JobStatus.RUNNING)) {
throw ExceptionsHelper.conflictStatusException(Messages.getMessage(Messages.REST_JOB_NOT_CLOSED_REVERT));
}
ModelSnapshot modelSnapshot = getModelSnapshot(request, jobProvider);
if (request.getDeleteInterveningResults()) {
listener = wrapListener(listener, modelSnapshot, request.getJobId());
}
jobManager.revertSnapshot(request, listener, modelSnapshot);
}
private ModelSnapshot getModelSnapshot(Request request, JobProvider provider) {
logger.info("Reverting to snapshot '" + request.getSnapshotId() + "' for time '" + request.getTime() + "'");
List<ModelSnapshot> revertCandidates;
revertCandidates = provider.modelSnapshots(request.getJobId(), 0, 1, null, request.getTime(),
ModelSnapshot.TIMESTAMP.getPreferredName(), true, request.getSnapshotId(), request.getDescription()).hits();
if (revertCandidates == null || revertCandidates.isEmpty()) {
throw new ResourceNotFoundException(Messages.getMessage(Messages.REST_NO_SUCH_MODEL_SNAPSHOT, request.getJobId()));
}
ModelSnapshot modelSnapshot = revertCandidates.get(0);
// The quantiles can be large, and totally dominate the output -
// it's
// clearer to remove them
modelSnapshot.setQuantiles(null);
return modelSnapshot;
}
private ActionListener<RevertModelSnapshotAction.Response> wrapListener(ActionListener<RevertModelSnapshotAction.Response> listener,
ModelSnapshot modelSnapshot, String jobId) {
// If we need to delete buckets that occurred after the snapshot, we
// wrap
// the listener with one that invokes the OldDataRemover on
// acknowledged responses
return ActionListener.wrap(response -> {
if (response.isAcknowledged()) {
Date deleteAfter = modelSnapshot.getLatestResultTimeStamp();
logger.debug("Removing intervening records: last record: " + deleteAfter + ", last result: "
+ modelSnapshot.getLatestResultTimeStamp());
logger.info("Deleting buckets after '" + deleteAfter + "'");
// NORELEASE: OldDataRemover is basically delete-by-query.
// We should replace this
// whole abstraction with DBQ eventually
OldDataRemover remover = new OldDataRemover(jobProvider, bulkDeleterFactory);
remover.deleteResultsAfter(new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse bulkItemResponses) {
listener.onResponse(response);
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
}, jobId, deleteAfter.getTime() + 1);
}
}, listener::onFailure);
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
}
}
}

View File

@ -0,0 +1,212 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.JobSchedulerStatus;
import org.elasticsearch.xpack.prelert.job.SchedulerState;
import org.elasticsearch.xpack.prelert.job.manager.JobManager;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.Objects;
public class StartJobSchedulerAction
extends Action<StartJobSchedulerAction.Request, StartJobSchedulerAction.Response, StartJobSchedulerAction.RequestBuilder> {
public static final StartJobSchedulerAction INSTANCE = new StartJobSchedulerAction();
public static final String NAME = "cluster:admin/prelert/job/scheduler/start";
private StartJobSchedulerAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public Response newResponse() {
return new Response();
}
public static class Request extends AcknowledgedRequest<Request> implements ToXContent {
public static ObjectParser<Request, ParseFieldMatcherSupplier> PARSER = new ObjectParser<>(NAME, Request::new);
static {
PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID);
PARSER.declareObject((request, schedulerState) -> request.schedulerState = schedulerState, SchedulerState.PARSER,
SchedulerState.TYPE_FIELD);
}
public static Request parseRequest(String jobId, XContentParser parser, ParseFieldMatcherSupplier parseFieldMatcherSupplier) {
Request request = PARSER.apply(parser, parseFieldMatcherSupplier);
if (jobId != null) {
request.jobId = jobId;
}
return request;
}
private String jobId;
// TODO (norelease): instead of providing a scheduler state, the user should just provide: startTimeMillis and endTimeMillis
// the state is useless here as it should always be STARTING
private SchedulerState schedulerState;
public Request(String jobId, SchedulerState schedulerState) {
this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName());
this.schedulerState = ExceptionsHelper.requireNonNull(schedulerState, SchedulerState.TYPE_FIELD.getPreferredName());
if (schedulerState.getStatus() != JobSchedulerStatus.STARTING) {
throw new IllegalStateException(
"Start job scheduler action requires the scheduler status to be [" + JobSchedulerStatus.STARTING + "]");
}
}
Request() {
}
public String getJobId() {
return jobId;
}
public SchedulerState getSchedulerState() {
return schedulerState;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobId = in.readString();
schedulerState = new SchedulerState(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(jobId);
schedulerState.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Job.ID.getPreferredName(), jobId);
builder.field(SchedulerState.TYPE_FIELD.getPreferredName(), schedulerState);
builder.endObject();
return builder;
}
@Override
public int hashCode() {
return Objects.hash(jobId, schedulerState);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Request other = (Request) obj;
return Objects.equals(jobId, other.jobId) && Objects.equals(schedulerState, other.schedulerState);
}
}
static class RequestBuilder extends MasterNodeOperationRequestBuilder<Request, Response, RequestBuilder> {
public RequestBuilder(ElasticsearchClient client, StartJobSchedulerAction action) {
super(client, action, new Request());
}
}
public static class Response extends AcknowledgedResponse {
public Response(boolean acknowledged) {
super(acknowledged);
}
private Response() {
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}
public static class TransportAction extends TransportMasterNodeAction<Request, Response> {
private final JobManager jobManager;
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, JobManager jobManager) {
super(settings, StartJobSchedulerAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, Request::new);
this.jobManager = jobManager;
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected Response newResponse() {
return new Response();
}
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
jobManager.startJobScheduler(request, listener);
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
}
}
}

View File

@ -0,0 +1,164 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.manager.JobManager;
import java.io.IOException;
import java.util.Objects;
public class StopJobSchedulerAction
extends Action<StopJobSchedulerAction.Request, StopJobSchedulerAction.Response, StopJobSchedulerAction.RequestBuilder> {
public static final StopJobSchedulerAction INSTANCE = new StopJobSchedulerAction();
public static final String NAME = "cluster:admin/prelert/job/scheduler/stop";
private StopJobSchedulerAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public Response newResponse() {
return new Response();
}
public static class Request extends AcknowledgedRequest<Request> {
private String jobId;
public Request(String jobId) {
this.jobId = Objects.requireNonNull(jobId);
}
Request() {
}
public String getJobId() {
return jobId;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobId = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(jobId);
}
@Override
public int hashCode() {
return Objects.hash(jobId);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Request other = (Request) obj;
return Objects.equals(jobId, other.jobId);
}
}
static class RequestBuilder extends MasterNodeOperationRequestBuilder<Request, Response, RequestBuilder> {
public RequestBuilder(ElasticsearchClient client, StopJobSchedulerAction action) {
super(client, action, new Request());
}
}
public static class Response extends AcknowledgedResponse {
public Response(boolean acknowledged) {
super(acknowledged);
}
private Response() {
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}
public static class TransportAction extends TransportMasterNodeAction<Request, Response> {
private final JobManager jobManager;
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, JobManager jobManager) {
super(settings, StopJobSchedulerAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, Request::new);
this.jobManager = jobManager;
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected Response newResponse() {
return new Response();
}
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
jobManager.stopJobScheduler(request, listener);
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
}
}
}

View File

@ -0,0 +1,191 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.JobSchedulerStatus;
import org.elasticsearch.xpack.prelert.job.SchedulerState;
import org.elasticsearch.xpack.prelert.job.manager.JobManager;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.Objects;
public class UpdateJobSchedulerStatusAction extends Action<UpdateJobSchedulerStatusAction.Request,
UpdateJobSchedulerStatusAction.Response, UpdateJobSchedulerStatusAction.RequestBuilder> {
public static final UpdateJobSchedulerStatusAction INSTANCE = new UpdateJobSchedulerStatusAction();
public static final String NAME = "cluster:admin/prelert/job/scheduler/status/update";
private UpdateJobSchedulerStatusAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public Response newResponse() {
return new Response();
}
public static class Request extends AcknowledgedRequest<Request> {
private String jobId;
private JobSchedulerStatus schedulerStatus;
public Request(String jobId, JobSchedulerStatus schedulerStatus) {
this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName());
this.schedulerStatus = ExceptionsHelper.requireNonNull(schedulerStatus, SchedulerState.STATUS.getPreferredName());
}
Request() {}
public String getJobId() {
return jobId;
}
public void setJobId(String jobId) {
this.jobId = jobId;
}
public JobSchedulerStatus getSchedulerStatus() {
return schedulerStatus;
}
public void setSchedulerStatus(JobSchedulerStatus schedulerStatus) {
this.schedulerStatus = schedulerStatus;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobId = in.readString();
schedulerStatus = JobSchedulerStatus.fromStream(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(jobId);
schedulerStatus.writeTo(out);
}
@Override
public int hashCode() {
return Objects.hash(jobId, schedulerStatus);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || obj.getClass() != getClass()) {
return false;
}
UpdateJobSchedulerStatusAction.Request other = (UpdateJobSchedulerStatusAction.Request) obj;
return Objects.equals(jobId, other.jobId) && Objects.equals(schedulerStatus, other.schedulerStatus);
}
@Override
public String toString() {
return "Request{" +
"jobId='" + jobId + '\'' +
", schedulerStatus=" + schedulerStatus +
'}';
}
}
static class RequestBuilder extends MasterNodeOperationRequestBuilder<Request, Response, RequestBuilder> {
public RequestBuilder(ElasticsearchClient client, UpdateJobSchedulerStatusAction action) {
super(client, action, new Request());
}
}
public static class Response extends AcknowledgedResponse {
public Response(boolean acknowledged) {
super(acknowledged);
}
private Response() {}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}
public static class TransportAction extends TransportMasterNodeAction<Request, Response> {
private final JobManager jobManager;
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
JobManager jobManager) {
super(settings, UpdateJobSchedulerStatusAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, Request::new);
this.jobManager = jobManager;
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected Response newResponse() {
return new Response();
}
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
jobManager.updateSchedulerStatus(request.getJobId(), request.getSchedulerStatus());
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
}
}
}

View File

@ -0,0 +1,183 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.JobStatus;
import org.elasticsearch.xpack.prelert.job.manager.JobManager;
import org.elasticsearch.xpack.prelert.job.metadata.Allocation;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.Objects;
public class UpdateJobStatusAction
extends Action<UpdateJobStatusAction.Request, UpdateJobStatusAction.Response, UpdateJobStatusAction.RequestBuilder> {
public static final UpdateJobStatusAction INSTANCE = new UpdateJobStatusAction();
public static final String NAME = "cluster:admin/prelert/job/status/update";
private UpdateJobStatusAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public Response newResponse() {
return new Response();
}
public static class Request extends AcknowledgedRequest<Request> {
private String jobId;
private JobStatus status;
public Request(String jobId, JobStatus status) {
this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName());
this.status = ExceptionsHelper.requireNonNull(status, Allocation.STATUS.getPreferredName());
}
Request() {}
public String getJobId() {
return jobId;
}
public void setJobId(String jobId) {
this.jobId = jobId;
}
public JobStatus getStatus() {
return status;
}
public void setStatus(JobStatus status) {
this.status = status;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobId = in.readString();
status = JobStatus.fromStream(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(jobId);
status.writeTo(out);
}
@Override
public int hashCode() {
return Objects.hash(jobId, status);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || obj.getClass() != getClass()) {
return false;
}
UpdateJobStatusAction.Request other = (UpdateJobStatusAction.Request) obj;
return Objects.equals(jobId, other.jobId) && Objects.equals(status, other.status);
}
}
static class RequestBuilder extends MasterNodeOperationRequestBuilder<Request, Response, RequestBuilder> {
public RequestBuilder(ElasticsearchClient client, UpdateJobStatusAction action) {
super(client, action, new Request());
}
}
public static class Response extends AcknowledgedResponse {
public Response(boolean acknowledged) {
super(acknowledged);
}
private Response() {}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}
public static class TransportAction extends TransportMasterNodeAction<Request, Response> {
private final JobManager jobManager;
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
JobManager jobManager) {
super(settings, UpdateJobStatusAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, Request::new);
this.jobManager = jobManager;
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected Response newResponse() {
return new Response();
}
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
jobManager.setJobStatus(request.getJobId(), request.getStatus());
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
}
}
}

View File

@ -0,0 +1,165 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import java.io.IOException;
import java.util.Objects;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.Detector;
public class ValidateDetectorAction
extends Action<ValidateDetectorAction.Request, ValidateDetectorAction.Response, ValidateDetectorAction.RequestBuilder> {
public static final ValidateDetectorAction INSTANCE = new ValidateDetectorAction();
public static final String NAME = "cluster:admin/prelert/validate/detector";
protected ValidateDetectorAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, INSTANCE);
}
@Override
public Response newResponse() {
return new Response();
}
public static class RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder> {
protected RequestBuilder(ElasticsearchClient client, ValidateDetectorAction action) {
super(client, action, new Request());
}
}
public static class Request extends ActionRequest implements ToXContent {
private Detector detector;
// NORELEASE this needs to change so the body is not directly the
// detector but and object that contains a field for the detector
public static Request parseRequest(XContentParser parser, ParseFieldMatcherSupplier parseFieldMatcherSupplier) {
Detector detector = Detector.PARSER.apply(parser, parseFieldMatcherSupplier).build();
return new Request(detector);
}
Request() {
this.detector = null;
}
public Request(Detector detector) {
this.detector = detector;
}
public Detector getDetector() {
return detector;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
detector.writeTo(out);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
detector = new Detector(in);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
detector.toXContent(builder, params);
return builder;
}
@Override
public int hashCode() {
return Objects.hash(detector);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Request other = (Request) obj;
return Objects.equals(detector, other.detector);
}
}
public static class Response extends AcknowledgedResponse {
public Response() {
super();
}
public Response(boolean acknowledged) {
super(acknowledged);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}
public static class TransportAction extends HandledTransportAction<Request, Response> {
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ValidateDetectorAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
Request::new);
}
@Override
protected void doExecute(Request request, ActionListener<Response> listener) {
listener.onResponse(new Response(true));
}
}
}

View File

@ -0,0 +1,165 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import java.io.IOException;
import java.util.Objects;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.transform.TransformConfig;
import org.elasticsearch.xpack.prelert.job.transform.verification.TransformConfigVerifier;
public class ValidateTransformAction
extends Action<ValidateTransformAction.Request, ValidateTransformAction.Response, ValidateTransformAction.RequestBuilder> {
public static final ValidateTransformAction INSTANCE = new ValidateTransformAction();
public static final String NAME = "cluster:admin/prelert/validate/transform";
protected ValidateTransformAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, INSTANCE);
}
@Override
public Response newResponse() {
return new Response();
}
public static class RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder> {
protected RequestBuilder(ElasticsearchClient client, ValidateTransformAction action) {
super(client, action, new Request());
}
}
public static class Request extends ActionRequest implements ToXContent {
private TransformConfig transform;
public static Request parseRequest(XContentParser parser, ParseFieldMatcherSupplier parseFieldMatcherSupplier) {
TransformConfig transform = TransformConfig.PARSER.apply(parser, parseFieldMatcherSupplier);
return new Request(transform);
}
Request() {
this.transform = null;
}
public Request(TransformConfig transform) {
this.transform = transform;
}
public TransformConfig getTransform() {
return transform;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
transform.writeTo(out);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
transform = new TransformConfig(in);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
transform.toXContent(builder, params);
return builder;
}
@Override
public int hashCode() {
return Objects.hash(transform);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Request other = (Request) obj;
return Objects.equals(transform, other.transform);
}
}
public static class Response extends AcknowledgedResponse {
public Response() {
super();
}
public Response(boolean acknowledged) {
super(acknowledged);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}
public static class TransportAction extends HandledTransportAction<Request, Response> {
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ValidateTransformAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
Request::new);
}
@Override
protected void doExecute(Request request, ActionListener<Response> listener) {
TransformConfigVerifier.verify(request.getTransform());
listener.onResponse(new Response(true));
}
}
}

View File

@ -0,0 +1,174 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.action;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.prelert.job.transform.TransformConfig;
import org.elasticsearch.xpack.prelert.job.transform.verification.TransformConfigsVerifier;
import java.io.IOException;
import java.util.List;
import java.util.Objects;
public class ValidateTransformsAction
extends Action<ValidateTransformsAction.Request, ValidateTransformsAction.Response, ValidateTransformsAction.RequestBuilder> {
public static final ValidateTransformsAction INSTANCE = new ValidateTransformsAction();
public static final String NAME = "cluster:admin/prelert/validate/transforms";
protected ValidateTransformsAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, INSTANCE);
}
@Override
public Response newResponse() {
return new Response();
}
public static class RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder> {
protected RequestBuilder(ElasticsearchClient client, ValidateTransformsAction action) {
super(client, action, new Request());
}
}
public static class Request extends ActionRequest implements ToXContent {
public static final ParseField TRANSFORMS = new ParseField("transforms");
@SuppressWarnings("unchecked")
public static final ConstructingObjectParser<Request, ParseFieldMatcherSupplier> PARSER = new ConstructingObjectParser<>(NAME,
a -> new Request((List<TransformConfig>) a[0]));
static {
PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), TransformConfig.PARSER, TRANSFORMS);
}
private List<TransformConfig> transforms;
Request() {
this.transforms = null;
}
public Request(List<TransformConfig> transforms) {
this.transforms = transforms;
}
public List<TransformConfig> getTransforms() {
return transforms;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeList(transforms);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
transforms = in.readList(TransformConfig::new);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.array(TRANSFORMS.getPreferredName(), transforms.toArray(new Object[transforms.size()]));
builder.endObject();
return builder;
}
@Override
public int hashCode() {
return Objects.hash(transforms);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Request other = (Request) obj;
return Objects.equals(transforms, other.transforms);
}
}
public static class Response extends AcknowledgedResponse {
public Response() {
super();
}
public Response(boolean acknowledged) {
super(acknowledged);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}
public static class TransportAction extends HandledTransportAction<Request, Response> {
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ValidateTransformsAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
Request::new);
}
@Override
protected void doExecute(Request request, ActionListener<Response> listener) {
TransformConfigsVerifier.verify(request.getTransforms());
listener.onResponse(new Response(true));
}
}
}

View File

@ -0,0 +1,714 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.xpack.prelert.job.messages.Messages;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import java.util.TreeSet;
import java.util.function.Function;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
import java.util.stream.Collectors;
/**
* Autodetect analysis configuration options describes which fields are
* analysed and the functions to use.
* <p>
* The configuration can contain multiple detectors, a new anomaly detector will
* be created for each detector configuration. The fields
* <code>bucketSpan, batchSpan, summaryCountFieldName and categorizationFieldName</code>
* apply to all detectors.
* <p>
* If a value has not been set it will be <code>null</code>
* Object wrappers are used around integral types &amp; booleans so they can take
* <code>null</code> values.
*/
public class AnalysisConfig extends ToXContentToBytes implements Writeable {
/**
* Serialisation names
*/
private static final ParseField ANALYSIS_CONFIG = new ParseField("analysisConfig");
private static final ParseField BUCKET_SPAN = new ParseField("bucketSpan");
private static final ParseField BATCH_SPAN = new ParseField("batchSpan");
private static final ParseField CATEGORIZATION_FIELD_NAME = new ParseField("categorizationFieldName");
private static final ParseField CATEGORIZATION_FILTERS = new ParseField("categorizationFilters");
private static final ParseField LATENCY = new ParseField("latency");
private static final ParseField PERIOD = new ParseField("period");
private static final ParseField SUMMARY_COUNT_FIELD_NAME = new ParseField("summaryCountFieldName");
private static final ParseField DETECTORS = new ParseField("detectors");
private static final ParseField INFLUENCERS = new ParseField("influencers");
private static final ParseField OVERLAPPING_BUCKETS = new ParseField("overlappingBuckets");
private static final ParseField RESULT_FINALIZATION_WINDOW = new ParseField("resultFinalizationWindow");
private static final ParseField MULTIVARIATE_BY_FIELDS = new ParseField("multivariateByFields");
private static final ParseField MULTIPLE_BUCKET_SPANS = new ParseField("multipleBucketSpans");
private static final ParseField USER_PER_PARTITION_NORMALIZATION = new ParseField("usePerPartitionNormalization");
private static final String PRELERT_CATEGORY_FIELD = "prelertcategory";
public static final Set<String> AUTO_CREATED_FIELDS = new HashSet<>(Arrays.asList(PRELERT_CATEGORY_FIELD));
public static final long DEFAULT_RESULT_FINALIZATION_WINDOW = 2L;
@SuppressWarnings("unchecked")
public static final ConstructingObjectParser<AnalysisConfig.Builder, ParseFieldMatcherSupplier> PARSER =
new ConstructingObjectParser<>(ANALYSIS_CONFIG.getPreferredName(), a -> new AnalysisConfig.Builder((List<Detector>) a[0]));
static {
PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), (p, c) -> Detector.PARSER.apply(p, c).build(), DETECTORS);
PARSER.declareLong(Builder::setBucketSpan, BUCKET_SPAN);
PARSER.declareLong(Builder::setBatchSpan, BATCH_SPAN);
PARSER.declareString(Builder::setCategorizationFieldName, CATEGORIZATION_FIELD_NAME);
PARSER.declareStringArray(Builder::setCategorizationFilters, CATEGORIZATION_FILTERS);
PARSER.declareLong(Builder::setLatency, LATENCY);
PARSER.declareLong(Builder::setPeriod, PERIOD);
PARSER.declareString(Builder::setSummaryCountFieldName, SUMMARY_COUNT_FIELD_NAME);
PARSER.declareStringArray(Builder::setInfluencers, INFLUENCERS);
PARSER.declareBoolean(Builder::setOverlappingBuckets, OVERLAPPING_BUCKETS);
PARSER.declareLong(Builder::setResultFinalizationWindow, RESULT_FINALIZATION_WINDOW);
PARSER.declareBoolean(Builder::setMultivariateByFields, MULTIVARIATE_BY_FIELDS);
PARSER.declareLongArray(Builder::setMultipleBucketSpans, MULTIPLE_BUCKET_SPANS);
PARSER.declareBoolean(Builder::setUsePerPartitionNormalization, USER_PER_PARTITION_NORMALIZATION);
}
/**
* These values apply to all detectors
*/
private final long bucketSpan;
private final Long batchSpan;
private final String categorizationFieldName;
private final List<String> categorizationFilters;
private final long latency;
private final Long period;
private final String summaryCountFieldName;
private final List<Detector> detectors;
private final List<String> influencers;
private final Boolean overlappingBuckets;
private final Long resultFinalizationWindow;
private final Boolean multivariateByFields;
private final List<Long> multipleBucketSpans;
private final boolean usePerPartitionNormalization;
private AnalysisConfig(Long bucketSpan, Long batchSpan, String categorizationFieldName, List<String> categorizationFilters,
long latency, Long period, String summaryCountFieldName, List<Detector> detectors, List<String> influencers,
Boolean overlappingBuckets, Long resultFinalizationWindow, Boolean multivariateByFields,
List<Long> multipleBucketSpans, boolean usePerPartitionNormalization) {
this.detectors = detectors;
this.bucketSpan = bucketSpan;
this.batchSpan = batchSpan;
this.latency = latency;
this.period = period;
this.categorizationFieldName = categorizationFieldName;
this.categorizationFilters = categorizationFilters;
this.summaryCountFieldName = summaryCountFieldName;
this.influencers = influencers;
this.overlappingBuckets = overlappingBuckets;
this.resultFinalizationWindow = resultFinalizationWindow;
this.multivariateByFields = multivariateByFields;
this.multipleBucketSpans = multipleBucketSpans;
this.usePerPartitionNormalization = usePerPartitionNormalization;
}
public AnalysisConfig(StreamInput in) throws IOException {
bucketSpan = in.readLong();
batchSpan = in.readOptionalLong();
categorizationFieldName = in.readOptionalString();
categorizationFilters = in.readBoolean() ? in.readList(StreamInput::readString) : null;
latency = in.readLong();
period = in.readOptionalLong();
summaryCountFieldName = in.readOptionalString();
detectors = in.readList(Detector::new);
influencers = in.readList(StreamInput::readString);
overlappingBuckets = in.readOptionalBoolean();
resultFinalizationWindow = in.readOptionalLong();
multivariateByFields = in.readOptionalBoolean();
multipleBucketSpans = in.readBoolean() ? in.readList(StreamInput::readLong) : null;
usePerPartitionNormalization = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeLong(bucketSpan);
out.writeOptionalLong(batchSpan);
out.writeOptionalString(categorizationFieldName);
if (categorizationFilters != null) {
out.writeBoolean(true);
out.writeStringList(categorizationFilters);
} else {
out.writeBoolean(false);
}
out.writeLong(latency);
out.writeOptionalLong(period);
out.writeOptionalString(summaryCountFieldName);
out.writeList(detectors);
out.writeStringList(influencers);
out.writeOptionalBoolean(overlappingBuckets);
out.writeOptionalLong(resultFinalizationWindow);
out.writeOptionalBoolean(multivariateByFields);
if (multipleBucketSpans != null) {
out.writeBoolean(true);
out.writeVInt(multipleBucketSpans.size());
for (Long bucketSpan : multipleBucketSpans) {
out.writeLong(bucketSpan);
}
} else {
out.writeBoolean(false);
}
out.writeBoolean(usePerPartitionNormalization);
}
/**
* The size of the interval the analysis is aggregated into measured in
* seconds
*
* @return The bucketspan or <code>null</code> if not set
*/
public Long getBucketSpan() {
return bucketSpan;
}
public long getBucketSpanOrDefault() {
return bucketSpan;
}
/**
* Interval into which to batch seasonal data measured in seconds
*
* @return The batchspan or <code>null</code> if not set
*/
public Long getBatchSpan() {
return batchSpan;
}
public String getCategorizationFieldName() {
return categorizationFieldName;
}
public List<String> getCategorizationFilters() {
return categorizationFilters;
}
/**
* The latency interval (seconds) during which out-of-order records should be handled.
*
* @return The latency interval (seconds) or <code>null</code> if not set
*/
public Long getLatency() {
return latency;
}
/**
* The repeat interval for periodic data in multiples of
* {@linkplain #getBatchSpan()}
*
* @return The period or <code>null</code> if not set
*/
public Long getPeriod() {
return period;
}
/**
* The name of the field that contains counts for pre-summarised input
*
* @return The field name or <code>null</code> if not set
*/
public String getSummaryCountFieldName() {
return summaryCountFieldName;
}
/**
* The list of analysis detectors. In a valid configuration the list should
* contain at least 1 {@link Detector}
*
* @return The Detectors used in this job
*/
public List<Detector> getDetectors() {
return detectors;
}
/**
* The list of influence field names
*/
public List<String> getInfluencers() {
return influencers;
}
/**
* Return the list of term fields.
* These are the influencer fields, partition field,
* by field and over field of each detector.
* <code>null</code> and empty strings are filtered from the
* config.
*
* @return Set of term fields - never <code>null</code>
*/
public Set<String> termFields() {
Set<String> termFields = new TreeSet<>();
for (Detector d : getDetectors()) {
addIfNotNull(termFields, d.getByFieldName());
addIfNotNull(termFields, d.getOverFieldName());
addIfNotNull(termFields, d.getPartitionFieldName());
}
for (String i : getInfluencers()) {
addIfNotNull(termFields, i);
}
// remove empty strings
termFields.remove("");
return termFields;
}
public Set<String> extractReferencedLists() {
return detectors.stream().map(Detector::extractReferencedLists)
.flatMap(Set::stream).collect(Collectors.toSet());
}
public Boolean getOverlappingBuckets() {
return overlappingBuckets;
}
public Long getResultFinalizationWindow() {
return resultFinalizationWindow;
}
public Boolean getMultivariateByFields() {
return multivariateByFields;
}
public List<Long> getMultipleBucketSpans() {
return multipleBucketSpans;
}
public boolean getUsePerPartitionNormalization() {
return usePerPartitionNormalization;
}
/**
* Return the list of fields required by the analysis.
* These are the influencer fields, metric field, partition field,
* by field and over field of each detector, plus the summary count
* field and the categorization field name of the job.
* <code>null</code> and empty strings are filtered from the
* config.
*
* @return List of required analysis fields - never <code>null</code>
*/
public List<String> analysisFields() {
Set<String> analysisFields = termFields();
addIfNotNull(analysisFields, categorizationFieldName);
addIfNotNull(analysisFields, summaryCountFieldName);
for (Detector d : getDetectors()) {
addIfNotNull(analysisFields, d.getFieldName());
}
// remove empty strings
analysisFields.remove("");
return new ArrayList<>(analysisFields);
}
private static void addIfNotNull(Set<String> fields, String field) {
if (field != null) {
fields.add(field);
}
}
public List<String> fields() {
return collectNonNullAndNonEmptyDetectorFields(d -> d.getFieldName());
}
private List<String> collectNonNullAndNonEmptyDetectorFields(
Function<Detector, String> fieldGetter) {
Set<String> fields = new HashSet<>();
for (Detector d : getDetectors()) {
addIfNotNull(fields, fieldGetter.apply(d));
}
// remove empty strings
fields.remove("");
return new ArrayList<>(fields);
}
public List<String> byFields() {
return collectNonNullAndNonEmptyDetectorFields(d -> d.getByFieldName());
}
public List<String> overFields() {
return collectNonNullAndNonEmptyDetectorFields(d -> d.getOverFieldName());
}
public List<String> partitionFields() {
return collectNonNullAndNonEmptyDetectorFields(d -> d.getPartitionFieldName());
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(BUCKET_SPAN.getPreferredName(), bucketSpan);
if (batchSpan != null) {
builder.field(BATCH_SPAN.getPreferredName(), batchSpan);
}
if (categorizationFieldName != null) {
builder.field(CATEGORIZATION_FIELD_NAME.getPreferredName(), categorizationFieldName);
}
if (categorizationFilters != null) {
builder.field(CATEGORIZATION_FILTERS.getPreferredName(), categorizationFilters);
}
builder.field(LATENCY.getPreferredName(), latency);
if (period != null) {
builder.field(PERIOD.getPreferredName(), period);
}
if (summaryCountFieldName != null) {
builder.field(SUMMARY_COUNT_FIELD_NAME.getPreferredName(), summaryCountFieldName);
}
builder.field(DETECTORS.getPreferredName(), detectors);
builder.field(INFLUENCERS.getPreferredName(), influencers);
if (overlappingBuckets != null) {
builder.field(OVERLAPPING_BUCKETS.getPreferredName(), overlappingBuckets);
}
if (resultFinalizationWindow != null) {
builder.field(RESULT_FINALIZATION_WINDOW.getPreferredName(), resultFinalizationWindow);
}
if (multivariateByFields != null) {
builder.field(MULTIVARIATE_BY_FIELDS.getPreferredName(), multivariateByFields);
}
if (multipleBucketSpans != null) {
builder.field(MULTIPLE_BUCKET_SPANS.getPreferredName(), multipleBucketSpans);
}
builder.field(USER_PER_PARTITION_NORMALIZATION.getPreferredName(), usePerPartitionNormalization);
builder.endObject();
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AnalysisConfig that = (AnalysisConfig) o;
return latency == that.latency &&
usePerPartitionNormalization == that.usePerPartitionNormalization &&
Objects.equals(bucketSpan, that.bucketSpan) &&
Objects.equals(batchSpan, that.batchSpan) &&
Objects.equals(categorizationFieldName, that.categorizationFieldName) &&
Objects.equals(categorizationFilters, that.categorizationFilters) &&
Objects.equals(period, that.period) &&
Objects.equals(summaryCountFieldName, that.summaryCountFieldName) &&
Objects.equals(detectors, that.detectors) &&
Objects.equals(influencers, that.influencers) &&
Objects.equals(overlappingBuckets, that.overlappingBuckets) &&
Objects.equals(resultFinalizationWindow, that.resultFinalizationWindow) &&
Objects.equals(multivariateByFields, that.multivariateByFields) &&
Objects.equals(multipleBucketSpans, that.multipleBucketSpans);
}
@Override
public int hashCode() {
return Objects.hash(
bucketSpan, batchSpan, categorizationFieldName, categorizationFilters, latency, period,
summaryCountFieldName, detectors, influencers, overlappingBuckets, resultFinalizationWindow,
multivariateByFields, multipleBucketSpans, usePerPartitionNormalization
);
}
public static class Builder {
public static final long DEFAULT_BUCKET_SPAN = 300L;
private List<Detector> detectors;
private long bucketSpan = DEFAULT_BUCKET_SPAN;
private Long batchSpan;
private long latency = 0L;
private Long period;
private String categorizationFieldName;
private List<String> categorizationFilters;
private String summaryCountFieldName;
private List<String> influencers = new ArrayList<>();
private Boolean overlappingBuckets;
private Long resultFinalizationWindow;
private Boolean multivariateByFields;
private List<Long> multipleBucketSpans;
private boolean usePerPartitionNormalization = false;
public Builder(List<Detector> detectors) {
this.detectors = detectors;
}
Builder(AnalysisConfig analysisConfig) {
this.detectors = analysisConfig.detectors;
this.bucketSpan = analysisConfig.bucketSpan;
this.batchSpan = analysisConfig.batchSpan;
this.latency = analysisConfig.latency;
this.period = analysisConfig.period;
this.categorizationFieldName = analysisConfig.categorizationFieldName;
this.categorizationFilters = analysisConfig.categorizationFilters;
this.summaryCountFieldName = analysisConfig.summaryCountFieldName;
this.influencers = analysisConfig.influencers;
this.overlappingBuckets = analysisConfig.overlappingBuckets;
this.resultFinalizationWindow = analysisConfig.resultFinalizationWindow;
this.multivariateByFields = analysisConfig.multivariateByFields;
this.multipleBucketSpans = analysisConfig.multipleBucketSpans;
this.usePerPartitionNormalization = analysisConfig.usePerPartitionNormalization;
}
public void setDetectors(List<Detector> detectors) {
this.detectors = detectors;
}
public void setBucketSpan(long bucketSpan) {
this.bucketSpan = bucketSpan;
}
public void setBatchSpan(long batchSpan) {
this.batchSpan = batchSpan;
}
public void setLatency(long latency) {
this.latency = latency;
}
public void setPeriod(long period) {
this.period = period;
}
public void setCategorizationFieldName(String categorizationFieldName) {
this.categorizationFieldName = categorizationFieldName;
}
public void setCategorizationFilters(List<String> categorizationFilters) {
this.categorizationFilters = categorizationFilters;
}
public void setSummaryCountFieldName(String summaryCountFieldName) {
this.summaryCountFieldName = summaryCountFieldName;
}
public void setInfluencers(List<String> influencers) {
this.influencers = influencers;
}
public void setOverlappingBuckets(Boolean overlappingBuckets) {
this.overlappingBuckets = overlappingBuckets;
}
public void setResultFinalizationWindow(Long resultFinalizationWindow) {
this.resultFinalizationWindow = resultFinalizationWindow;
}
public void setMultivariateByFields(Boolean multivariateByFields) {
this.multivariateByFields = multivariateByFields;
}
public void setMultipleBucketSpans(List<Long> multipleBucketSpans) {
this.multipleBucketSpans = multipleBucketSpans;
}
public void setUsePerPartitionNormalization(boolean usePerPartitionNormalization) {
this.usePerPartitionNormalization = usePerPartitionNormalization;
}
/**
* Checks the configuration is valid
* <ol>
* <li>Check that if non-null BucketSpan, BatchSpan, Latency and Period are
* &gt;= 0</li>
* <li>Check that if non-null Latency is &lt;= MAX_LATENCY</li>
* <li>Check there is at least one detector configured</li>
* <li>Check all the detectors are configured correctly</li>
* <li>Check that OVERLAPPING_BUCKETS is set appropriately</li>
* <li>Check that MULTIPLE_BUCKETSPANS are set appropriately</li>
* <li>If Per Partition normalization is configured at least one detector
* must have a partition field and no influences can be used</li>
* </ol>
*/
public AnalysisConfig build() {
checkFieldIsNotNegativeIfSpecified(BUCKET_SPAN.getPreferredName(), bucketSpan);
checkFieldIsNotNegativeIfSpecified(BATCH_SPAN.getPreferredName(), batchSpan);
checkFieldIsNotNegativeIfSpecified(LATENCY.getPreferredName(), latency);
checkFieldIsNotNegativeIfSpecified(PERIOD.getPreferredName(), period);
verifyDetectorAreDefined(detectors);
verifyFieldName(summaryCountFieldName);
verifyFieldName(categorizationFieldName);
verifyCategorizationFilters(categorizationFilters, categorizationFieldName);
verifyMultipleBucketSpans(multipleBucketSpans, bucketSpan);
overlappingBuckets = verifyOverlappingBucketsConfig(overlappingBuckets, detectors);
if (usePerPartitionNormalization) {
checkDetectorsHavePartitionFields(detectors);
checkNoInfluencersAreSet(influencers);
}
return new AnalysisConfig(bucketSpan, batchSpan, categorizationFieldName, categorizationFilters,
latency, period, summaryCountFieldName, detectors, influencers, overlappingBuckets,
resultFinalizationWindow, multivariateByFields, multipleBucketSpans, usePerPartitionNormalization);
}
private static void checkFieldIsNotNegativeIfSpecified(String fieldName, Long value) {
if (value != null && value < 0) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_FIELD_VALUE_TOO_LOW, fieldName, 0, value);
throw new IllegalArgumentException(msg);
}
}
private static void verifyDetectorAreDefined(List<Detector> detectors) {
if (detectors == null || detectors.isEmpty()) {
throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_NO_DETECTORS));
}
}
private static void verifyCategorizationFilters(List<String> filters, String categorizationFieldName) {
if (filters == null || filters.isEmpty()) {
return;
}
verifyCategorizationFieldNameSetIfFiltersAreSet(categorizationFieldName);
verifyCategorizationFiltersAreDistinct(filters);
verifyCategorizationFiltersContainNoneEmpty(filters);
verifyCategorizationFiltersAreValidRegex(filters);
}
private static void verifyCategorizationFieldNameSetIfFiltersAreSet(String categorizationFieldName) {
if (categorizationFieldName == null) {
throw new IllegalArgumentException(Messages.getMessage(
Messages.JOB_CONFIG_CATEGORIZATION_FILTERS_REQUIRE_CATEGORIZATION_FIELD_NAME));
}
}
private static void verifyCategorizationFiltersAreDistinct(List<String> filters) {
if (filters.stream().distinct().count() != filters.size()) {
throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_CATEGORIZATION_FILTERS_CONTAINS_DUPLICATES));
}
}
private static void verifyCategorizationFiltersContainNoneEmpty(List<String> filters) {
if (filters.stream().anyMatch(f -> f.isEmpty())) {
throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_CATEGORIZATION_FILTERS_CONTAINS_EMPTY));
}
}
private static void verifyCategorizationFiltersAreValidRegex(List<String> filters) {
for (String filter : filters) {
if (!isValidRegex(filter)) {
throw new IllegalArgumentException(
Messages.getMessage(Messages.JOB_CONFIG_CATEGORIZATION_FILTERS_CONTAINS_INVALID_REGEX, filter));
}
}
}
private static void verifyMultipleBucketSpans(List<Long> multipleBucketSpans, Long bucketSpan) {
if (multipleBucketSpans == null) {
return;
}
if (bucketSpan == null) {
throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_MULTIPLE_BUCKETSPANS_REQUIRE_BUCKETSPAN));
}
for (Long span : multipleBucketSpans) {
if ((span % bucketSpan != 0L) || (span <= bucketSpan)) {
throw new IllegalArgumentException(
Messages.getMessage(Messages.JOB_CONFIG_MULTIPLE_BUCKETSPANS_MUST_BE_MULTIPLE, span, bucketSpan));
}
}
}
private static boolean checkDetectorsHavePartitionFields(List<Detector> detectors) {
for (Detector detector : detectors) {
if (!Strings.isNullOrEmpty(detector.getPartitionFieldName())) {
return true;
}
}
throw new IllegalArgumentException(Messages.getMessage(
Messages.JOB_CONFIG_PER_PARTITION_NORMALIZATION_REQUIRES_PARTITION_FIELD));
}
private static boolean checkNoInfluencersAreSet(List<String> influencers) {
if (!influencers.isEmpty()) {
throw new IllegalArgumentException(Messages.getMessage(
Messages.JOB_CONFIG_PER_PARTITION_NORMALIZATION_CANNOT_USE_INFLUENCERS));
}
return true;
}
/**
* Check that the characters used in a field name will not cause problems.
*
* @param field The field name to be validated
* @return true
*/
public static boolean verifyFieldName(String field) throws ElasticsearchParseException {
if (field != null && containsInvalidChar(field)) {
throw new IllegalArgumentException(
Messages.getMessage(Messages.JOB_CONFIG_INVALID_FIELDNAME_CHARS, field, Detector.PROHIBITED));
}
return true;
}
private static boolean containsInvalidChar(String field) {
for (Character ch : Detector.PROHIBITED_FIELDNAME_CHARACTERS) {
if (field.indexOf(ch) >= 0) {
return true;
}
}
return field.chars().anyMatch(ch -> Character.isISOControl(ch));
}
private static boolean isValidRegex(String exp) {
try {
Pattern.compile(exp);
return true;
} catch (PatternSyntaxException e) {
return false;
}
}
private static Boolean verifyOverlappingBucketsConfig(Boolean overlappingBuckets, List<Detector> detectors) {
// If any detector function is rare/freq_rare, mustn't use overlapping buckets
boolean mustNotUse = false;
List<String> illegalFunctions = new ArrayList<>();
for (Detector d : detectors) {
if (Detector.NO_OVERLAPPING_BUCKETS_FUNCTIONS.contains(d.getFunction())) {
illegalFunctions.add(d.getFunction());
mustNotUse = true;
}
}
if (Boolean.TRUE.equals(overlappingBuckets) && mustNotUse) {
throw new IllegalArgumentException(
Messages.getMessage(Messages.JOB_CONFIG_OVERLAPPING_BUCKETS_INCOMPATIBLE_FUNCTION, illegalFunctions.toString()));
}
return overlappingBuckets;
}
}
}

View File

@ -0,0 +1,131 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.xpack.prelert.job.messages.Messages;
import java.io.IOException;
import java.util.Objects;
/**
* Analysis limits for autodetect
* <p>
* If an option has not been set it shouldn't be used so the default value is picked up instead.
*/
public class AnalysisLimits extends ToXContentToBytes implements Writeable {
/**
* Serialisation field names
*/
public static final ParseField MODEL_MEMORY_LIMIT = new ParseField("modelMemoryLimit");
public static final ParseField CATEGORIZATION_EXAMPLES_LIMIT = new ParseField("categorizationExamplesLimit");
public static final ConstructingObjectParser<AnalysisLimits, ParseFieldMatcherSupplier> PARSER = new ConstructingObjectParser<>(
"analysis_limits", a -> new AnalysisLimits((Long) a[0], (Long) a[1]));
static {
PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), MODEL_MEMORY_LIMIT);
PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), CATEGORIZATION_EXAMPLES_LIMIT);
}
/**
* It is initialised to <code>null</code>.
* A value of <code>null</code> or <code>0</code> will result to the default being used.
*/
private final Long modelMemoryLimit;
/**
* It is initialised to <code>null</code>.
* A value of <code>null</code> will result to the default being used.
*/
private final Long categorizationExamplesLimit;
public AnalysisLimits(Long modelMemoryLimit, Long categorizationExamplesLimit) {
this.modelMemoryLimit = modelMemoryLimit;
if (categorizationExamplesLimit != null && categorizationExamplesLimit < 0) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_FIELD_VALUE_TOO_LOW, CATEGORIZATION_EXAMPLES_LIMIT, 0,
categorizationExamplesLimit);
throw new IllegalArgumentException(msg);
}
this.categorizationExamplesLimit = categorizationExamplesLimit;
}
public AnalysisLimits(StreamInput in) throws IOException {
this(in.readOptionalLong(), in.readOptionalLong());
}
/**
* Maximum size of the model in MB before the anomaly detector
* will drop new samples to prevent the model using any more
* memory
*
* @return The set memory limit or <code>null</code> if not set
*/
@Nullable
public Long getModelMemoryLimit() {
return modelMemoryLimit;
}
/**
* Gets the limit to the number of examples that are stored per category
*
* @return the limit or <code>null</code> if not set
*/
@Nullable
public Long getCategorizationExamplesLimit() {
return categorizationExamplesLimit;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalLong(modelMemoryLimit);
out.writeOptionalLong(categorizationExamplesLimit);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (modelMemoryLimit != null) {
builder.field(MODEL_MEMORY_LIMIT.getPreferredName(), modelMemoryLimit);
}
if (categorizationExamplesLimit != null) {
builder.field(CATEGORIZATION_EXAMPLES_LIMIT.getPreferredName(), categorizationExamplesLimit);
}
builder.endObject();
return builder;
}
/**
* Overridden equality test
*/
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other instanceof AnalysisLimits == false) {
return false;
}
AnalysisLimits that = (AnalysisLimits) other;
return Objects.equals(this.modelMemoryLimit, that.modelMemoryLimit) &&
Objects.equals(this.categorizationExamplesLimit, that.categorizationExamplesLimit);
}
@Override
public int hashCode() {
return Objects.hash(modelMemoryLimit, categorizationExamplesLimit);
}
}

View File

@ -0,0 +1,24 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job;
/**
* The categorizer state does not need to be loaded on the Java side.
* However, the Java process DOES set up a mapping on the Elasticsearch
* index to tell Elasticsearch not to analyse the categorizer state documents
* in any way.
*/
public class CategorizerState {
/**
* The type of this class used when persisting the data
*/
public static final String TYPE = "categorizerState";
private CategorizerState() {
}
}

View File

@ -0,0 +1,413 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.xpack.prelert.utils.time.TimeUtils;
import java.io.IOException;
import java.util.Date;
import java.util.Objects;
/**
* Job processed record counts.
* <p>
* The getInput... methods return the actual number of
* fields/records sent the the API including invalid records.
* The getProcessed... methods are the number sent to the
* Engine.
* <p>
* The <code>inputRecordCount</code> field is calculated so it
* should not be set in deserialisation but it should be serialised
* so the field is visible.
*/
public class DataCounts extends ToXContentToBytes implements Writeable {
public static final String DOCUMENT_SUFFIX = "-data-counts";
public static final String PROCESSED_RECORD_COUNT_STR = "processed_record_count";
public static final String PROCESSED_FIELD_COUNT_STR = "processed_field_count";
public static final String INPUT_BYTES_STR = "input_bytes";
public static final String INPUT_RECORD_COUNT_STR = "input_record_count";
public static final String INPUT_FIELD_COUNT_STR = "input_field_count";
public static final String INVALID_DATE_COUNT_STR = "invalid_date_count";
public static final String MISSING_FIELD_COUNT_STR = "missing_field_count";
public static final String OUT_OF_ORDER_TIME_COUNT_STR = "out_of_order_timestamp_count";
public static final String EARLIEST_RECORD_TIME_STR = "earliest_record_timestamp";
public static final String LATEST_RECORD_TIME_STR = "latest_record_timestamp";
public static final ParseField PROCESSED_RECORD_COUNT = new ParseField(PROCESSED_RECORD_COUNT_STR);
public static final ParseField PROCESSED_FIELD_COUNT = new ParseField(PROCESSED_FIELD_COUNT_STR);
public static final ParseField INPUT_BYTES = new ParseField(INPUT_BYTES_STR);
public static final ParseField INPUT_RECORD_COUNT = new ParseField(INPUT_RECORD_COUNT_STR);
public static final ParseField INPUT_FIELD_COUNT = new ParseField(INPUT_FIELD_COUNT_STR);
public static final ParseField INVALID_DATE_COUNT = new ParseField(INVALID_DATE_COUNT_STR);
public static final ParseField MISSING_FIELD_COUNT = new ParseField(MISSING_FIELD_COUNT_STR);
public static final ParseField OUT_OF_ORDER_TIME_COUNT = new ParseField(OUT_OF_ORDER_TIME_COUNT_STR);
public static final ParseField EARLIEST_RECORD_TIME = new ParseField(EARLIEST_RECORD_TIME_STR);
public static final ParseField LATEST_RECORD_TIME = new ParseField(LATEST_RECORD_TIME_STR);
public static final ParseField TYPE = new ParseField("dataCounts");
public static final ConstructingObjectParser<DataCounts, ParseFieldMatcherSupplier> PARSER =
new ConstructingObjectParser<>("data_counts", a -> new DataCounts((String) a[0], (long) a[1], (long) a[2], (long) a[3],
(long) a[4], (long) a[5], (long) a[6], (long) a[7], (Date) a[8], (Date) a[9]));
static {
PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID);
PARSER.declareLong(ConstructingObjectParser.constructorArg(), PROCESSED_RECORD_COUNT);
PARSER.declareLong(ConstructingObjectParser.constructorArg(), PROCESSED_FIELD_COUNT);
PARSER.declareLong(ConstructingObjectParser.constructorArg(), INPUT_BYTES);
PARSER.declareLong(ConstructingObjectParser.constructorArg(), INPUT_FIELD_COUNT);
PARSER.declareLong(ConstructingObjectParser.constructorArg(), INVALID_DATE_COUNT);
PARSER.declareLong(ConstructingObjectParser.constructorArg(), MISSING_FIELD_COUNT);
PARSER.declareLong(ConstructingObjectParser.constructorArg(), OUT_OF_ORDER_TIME_COUNT);
PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> {
if (p.currentToken() == Token.VALUE_NUMBER) {
return new Date(p.longValue());
} else if (p.currentToken() == Token.VALUE_STRING) {
return new Date(TimeUtils.dateStringToEpoch(p.text()));
}
throw new IllegalArgumentException(
"unexpected token [" + p.currentToken() + "] for [" + EARLIEST_RECORD_TIME.getPreferredName() + "]");
}, EARLIEST_RECORD_TIME, ValueType.VALUE);
PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> {
if (p.currentToken() == Token.VALUE_NUMBER) {
return new Date(p.longValue());
} else if (p.currentToken() == Token.VALUE_STRING) {
return new Date(TimeUtils.dateStringToEpoch(p.text()));
}
throw new IllegalArgumentException(
"unexpected token [" + p.currentToken() + "] for [" + LATEST_RECORD_TIME.getPreferredName() + "]");
}, LATEST_RECORD_TIME, ValueType.VALUE);
PARSER.declareLong((t, u) -> {;}, INPUT_RECORD_COUNT);
}
private final String jobId;
private long processedRecordCount;
private long processedFieldCount;
private long inputBytes;
private long inputFieldCount;
private long invalidDateCount;
private long missingFieldCount;
private long outOfOrderTimeStampCount;
// NORELEASE: Use Jodatime instead
private Date earliestRecordTimeStamp;
private Date latestRecordTimeStamp;
public DataCounts(String jobId, long processedRecordCount, long processedFieldCount, long inputBytes,
long inputFieldCount, long invalidDateCount, long missingFieldCount, long outOfOrderTimeStampCount,
Date earliestRecordTimeStamp, Date latestRecordTimeStamp) {
this.jobId = jobId;
this.processedRecordCount = processedRecordCount;
this.processedFieldCount = processedFieldCount;
this.inputBytes = inputBytes;
this.inputFieldCount = inputFieldCount;
this.invalidDateCount = invalidDateCount;
this.missingFieldCount = missingFieldCount;
this.outOfOrderTimeStampCount = outOfOrderTimeStampCount;
this.latestRecordTimeStamp = latestRecordTimeStamp;
this.earliestRecordTimeStamp = earliestRecordTimeStamp;
}
public DataCounts(String jobId) {
this.jobId = jobId;
}
public DataCounts(DataCounts lhs) {
jobId = lhs.jobId;
processedRecordCount = lhs.processedRecordCount;
processedFieldCount = lhs.processedFieldCount;
inputBytes = lhs.inputBytes;
inputFieldCount = lhs.inputFieldCount;
invalidDateCount = lhs.invalidDateCount;
missingFieldCount = lhs.missingFieldCount;
outOfOrderTimeStampCount = lhs.outOfOrderTimeStampCount;
latestRecordTimeStamp = lhs.latestRecordTimeStamp;
earliestRecordTimeStamp = lhs.earliestRecordTimeStamp;
}
public DataCounts(StreamInput in) throws IOException {
jobId = in.readString();
processedRecordCount = in.readVLong();
processedFieldCount = in.readVLong();
inputBytes = in.readVLong();
inputFieldCount = in.readVLong();
invalidDateCount = in.readVLong();
missingFieldCount = in.readVLong();
outOfOrderTimeStampCount = in.readVLong();
if (in.readBoolean()) {
latestRecordTimeStamp = new Date(in.readVLong());
}
if (in.readBoolean()) {
earliestRecordTimeStamp = new Date(in.readVLong());
}
in.readVLong(); // throw away inputRecordCount
}
public String getJobid() {
return jobId;
}
/**
* Number of records processed by this job.
* This value is the number of records sent passed on to
* the engine i.e. {@linkplain #getInputRecordCount()} minus
* records with bad dates or out of order
*
* @return Number of records processed by this job {@code long}
*/
public long getProcessedRecordCount() {
return processedRecordCount;
}
public void incrementProcessedRecordCount(long additional) {
processedRecordCount += additional;
}
/**
* Number of data points (processed record count * the number
* of analysed fields) processed by this job. This count does
* not include the time field.
*
* @return Number of data points processed by this job {@code long}
*/
public long getProcessedFieldCount() {
return processedFieldCount;
}
public void calcProcessedFieldCount(long analysisFieldsPerRecord) {
processedFieldCount =
(processedRecordCount * analysisFieldsPerRecord)
- missingFieldCount;
// processedFieldCount could be a -ve value if no
// records have been written in which case it should be 0
processedFieldCount = (processedFieldCount < 0) ? 0 : processedFieldCount;
}
/**
* Total number of input records read.
* This = processed record count + date parse error records count
* + out of order record count.
* <p>
* Records with missing fields are counted as they are still written.
*
* @return Total number of input records read {@code long}
*/
public long getInputRecordCount() {
return processedRecordCount + outOfOrderTimeStampCount
+ invalidDateCount;
}
/**
* The total number of bytes sent to this job.
* This value includes the bytes from any records
* that have been discarded for any reason
* e.g. because the date cannot be read
*
* @return Volume in bytes
*/
public long getInputBytes() {
return inputBytes;
}
public void incrementInputBytes(long additional) {
inputBytes += additional;
}
/**
* The total number of fields sent to the job
* including fields that aren't analysed.
*
* @return The total number of fields sent to the job
*/
public long getInputFieldCount() {
return inputFieldCount;
}
public void incrementInputFieldCount(long additional) {
inputFieldCount += additional;
}
/**
* The number of records with an invalid date field that could
* not be parsed or converted to epoch time.
*
* @return The number of records with an invalid date field
*/
public long getInvalidDateCount() {
return invalidDateCount;
}
public void incrementInvalidDateCount(long additional) {
invalidDateCount += additional;
}
/**
* The number of missing fields that had been
* configured for analysis.
*
* @return The number of missing fields
*/
public long getMissingFieldCount() {
return missingFieldCount;
}
public void incrementMissingFieldCount(long additional) {
missingFieldCount += additional;
}
/**
* The number of records with a timestamp that is
* before the time of the latest record. Records should
* be in ascending chronological order
*
* @return The number of records with a timestamp that is before the time of the latest record
*/
public long getOutOfOrderTimeStampCount() {
return outOfOrderTimeStampCount;
}
public void incrementOutOfOrderTimeStampCount(long additional) {
outOfOrderTimeStampCount += additional;
}
/**
* The time of the first record seen.
*
* @return The first record time
*/
public Date getEarliestRecordTimeStamp() {
return earliestRecordTimeStamp;
}
/**
* If {@code earliestRecordTimeStamp} has not been set (i.e. is {@code null})
* then set it to {@code timeStamp}
*
* @param timeStamp Candidate time
* @throws IllegalStateException if {@code earliestRecordTimeStamp} is already set
*/
public void setEarliestRecordTimeStamp(Date timeStamp) {
if (earliestRecordTimeStamp != null) {
throw new IllegalStateException("earliestRecordTimeStamp can only be set once");
}
earliestRecordTimeStamp = timeStamp;
}
/**
* The time of the latest record seen.
*
* @return Latest record time
*/
public Date getLatestRecordTimeStamp() {
return latestRecordTimeStamp;
}
public void setLatestRecordTimeStamp(Date latestRecordTimeStamp) {
this.latestRecordTimeStamp = latestRecordTimeStamp;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(jobId);
out.writeVLong(processedRecordCount);
out.writeVLong(processedFieldCount);
out.writeVLong(inputBytes);
out.writeVLong(inputFieldCount);
out.writeVLong(invalidDateCount);
out.writeVLong(missingFieldCount);
out.writeVLong(outOfOrderTimeStampCount);
if (latestRecordTimeStamp != null) {
out.writeBoolean(true);
out.writeVLong(latestRecordTimeStamp.getTime());
} else {
out.writeBoolean(false);
}
if (earliestRecordTimeStamp != null) {
out.writeBoolean(true);
out.writeVLong(earliestRecordTimeStamp.getTime());
} else {
out.writeBoolean(false);
}
out.writeVLong(getInputRecordCount());
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
doXContentBody(builder, params);
builder.endObject();
return builder;
}
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
builder.field(Job.ID.getPreferredName(), jobId);
builder.field(PROCESSED_RECORD_COUNT.getPreferredName(), processedRecordCount);
builder.field(PROCESSED_FIELD_COUNT.getPreferredName(), processedFieldCount);
builder.field(INPUT_BYTES.getPreferredName(), inputBytes);
builder.field(INPUT_FIELD_COUNT.getPreferredName(), inputFieldCount);
builder.field(INVALID_DATE_COUNT.getPreferredName(), invalidDateCount);
builder.field(MISSING_FIELD_COUNT.getPreferredName(), missingFieldCount);
builder.field(OUT_OF_ORDER_TIME_COUNT.getPreferredName(), outOfOrderTimeStampCount);
if (earliestRecordTimeStamp != null) {
builder.field(EARLIEST_RECORD_TIME.getPreferredName(), earliestRecordTimeStamp.getTime());
}
if (latestRecordTimeStamp != null) {
builder.field(LATEST_RECORD_TIME.getPreferredName(), latestRecordTimeStamp.getTime());
}
builder.field(INPUT_RECORD_COUNT.getPreferredName(), getInputRecordCount());
return builder;
}
/**
* Equality test
*/
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other instanceof DataCounts == false) {
return false;
}
DataCounts that = (DataCounts) other;
return Objects.equals(this.jobId, that.jobId) &&
this.processedRecordCount == that.processedRecordCount &&
this.processedFieldCount == that.processedFieldCount &&
this.inputBytes == that.inputBytes &&
this.inputFieldCount == that.inputFieldCount &&
this.invalidDateCount == that.invalidDateCount &&
this.missingFieldCount == that.missingFieldCount &&
this.outOfOrderTimeStampCount == that.outOfOrderTimeStampCount &&
Objects.equals(this.latestRecordTimeStamp, that.latestRecordTimeStamp) &&
Objects.equals(this.earliestRecordTimeStamp, that.earliestRecordTimeStamp);
}
@Override
public int hashCode() {
return Objects.hash(jobId, processedRecordCount, processedFieldCount,
inputBytes, inputFieldCount, invalidDateCount, missingFieldCount,
outOfOrderTimeStampCount, latestRecordTimeStamp, earliestRecordTimeStamp);
}
}

View File

@ -0,0 +1,358 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.xpack.prelert.job.messages.Messages;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import org.elasticsearch.xpack.prelert.utils.time.DateTimeFormatterTimestampConverter;
import java.io.IOException;
import java.util.Locale;
import java.util.Objects;
/**
* Describes the format of the data used in the job and how it should
* be interpreted by autodetect.
* <p>
* Data must either be in a textual delineated format (e.g. csv, tsv) or JSON
* the {@linkplain DataFormat} enum indicates which. {@link #getTimeField()}
* is the name of the field containing the timestamp and {@link #getTimeFormat()}
* is the format code for the date string in as described by
* {@link java.time.format.DateTimeFormatter}. The default quote character for
* delineated formats is {@value #DEFAULT_QUOTE_CHAR} but any other character can be
* used.
*/
public class DataDescription extends ToXContentToBytes implements Writeable {
/**
* Enum of the acceptable data formats.
*/
public enum DataFormat implements Writeable {
JSON("json"),
DELIMITED("delimited"),
SINGLE_LINE("single_line"),
ELASTICSEARCH("elasticsearch");
/**
* Delimited used to be called delineated. We keep supporting that for backwards
* compatibility.
*/
private static final String DEPRECATED_DELINEATED = "DELINEATED";
private String name;
private DataFormat(String name) {
this.name = name;
}
public String getName() {
return name;
}
/**
* Case-insensitive from string method.
* Works with either JSON, json, etc.
*
* @param value String representation
* @return The data format
*/
public static DataFormat forString(String value) {
String valueUpperCase = value.toUpperCase(Locale.ROOT);
return DEPRECATED_DELINEATED.equals(valueUpperCase) ? DELIMITED : DataFormat
.valueOf(valueUpperCase);
}
public static DataFormat readFromStream(StreamInput in) throws IOException {
int ordinal = in.readVInt();
if (ordinal < 0 || ordinal >= values().length) {
throw new IOException("Unknown DataFormat ordinal [" + ordinal + "]");
}
return values()[ordinal];
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(ordinal());
}
}
private static final ParseField DATA_DESCRIPTION_FIELD = new ParseField("dataDescription");
private static final ParseField FORMAT_FIELD = new ParseField("format");
private static final ParseField TIME_FIELD_NAME_FIELD = new ParseField("timeField");
private static final ParseField TIME_FORMAT_FIELD = new ParseField("timeFormat");
private static final ParseField FIELD_DELIMITER_FIELD = new ParseField("fieldDelimiter");
private static final ParseField QUOTE_CHARACTER_FIELD = new ParseField("quoteCharacter");
/**
* Special time format string for epoch times (seconds)
*/
public static final String EPOCH = "epoch";
/**
* Special time format string for epoch times (milli-seconds)
*/
public static final String EPOCH_MS = "epoch_ms";
/**
* By default autodetect expects the timestamp in a field with this name
*/
public static final String DEFAULT_TIME_FIELD = "time";
/**
* The default field delimiter expected by the native autodetect
* program.
*/
public static final char DEFAULT_DELIMITER = '\t';
/**
* Csv data must have this line ending
*/
public static final char LINE_ENDING = '\n';
/**
* The default quote character used to escape text in
* delineated data formats
*/
public static final char DEFAULT_QUOTE_CHAR = '"';
private final DataFormat dataFormat;
private final String timeFieldName;
private final String timeFormat;
private final char fieldDelimiter;
private final char quoteCharacter;
public static final ObjectParser<Builder, ParseFieldMatcherSupplier> PARSER =
new ObjectParser<>(DATA_DESCRIPTION_FIELD.getPreferredName(), Builder::new);
static {
PARSER.declareString(Builder::setFormat, FORMAT_FIELD);
PARSER.declareString(Builder::setTimeField, TIME_FIELD_NAME_FIELD);
PARSER.declareString(Builder::setTimeFormat, TIME_FORMAT_FIELD);
PARSER.declareField(Builder::setFieldDelimiter, DataDescription::extractChar, FIELD_DELIMITER_FIELD, ValueType.STRING);
PARSER.declareField(Builder::setQuoteCharacter, DataDescription::extractChar, QUOTE_CHARACTER_FIELD, ValueType.STRING);
}
public DataDescription(DataFormat dataFormat, String timeFieldName, String timeFormat, char fieldDelimiter, char quoteCharacter) {
this.dataFormat = dataFormat;
this.timeFieldName = timeFieldName;
this.timeFormat = timeFormat;
this.fieldDelimiter = fieldDelimiter;
this.quoteCharacter = quoteCharacter;
}
public DataDescription(StreamInput in) throws IOException {
dataFormat = DataFormat.readFromStream(in);
timeFieldName = in.readString();
timeFormat = in.readString();
fieldDelimiter = (char) in.read();
quoteCharacter = (char) in.read();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
dataFormat.writeTo(out);
out.writeString(timeFieldName);
out.writeString(timeFormat);
out.write(fieldDelimiter);
out.write(quoteCharacter);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(FORMAT_FIELD.getPreferredName(), dataFormat);
builder.field(TIME_FIELD_NAME_FIELD.getPreferredName(), timeFieldName);
builder.field(TIME_FORMAT_FIELD.getPreferredName(), timeFormat);
builder.field(FIELD_DELIMITER_FIELD.getPreferredName(), String.valueOf(fieldDelimiter));
builder.field(QUOTE_CHARACTER_FIELD.getPreferredName(), String.valueOf(quoteCharacter));
builder.endObject();
return builder;
}
/**
* The format of the data to be processed.
* Defaults to {@link DataDescription.DataFormat#DELIMITED}
*
* @return The data format
*/
public DataFormat getFormat() {
return dataFormat;
}
/**
* The name of the field containing the timestamp
*
* @return A String if set or <code>null</code>
*/
public String getTimeField() {
return timeFieldName;
}
/**
* Either {@value #EPOCH}, {@value #EPOCH_MS} or a SimpleDateTime format string.
* If not set (is <code>null</code> or an empty string) or set to
* {@value #EPOCH} (the default) then the date is assumed to be in
* seconds from the epoch.
*
* @return A String if set or <code>null</code>
*/
public String getTimeFormat() {
return timeFormat;
}
/**
* If the data is in a delineated format with a header e.g. csv or tsv
* this is the delimiter character used. This is only applicable if
* {@linkplain #getFormat()} is {@link DataDescription.DataFormat#DELIMITED}.
* The default value is {@value #DEFAULT_DELIMITER}
*
* @return A char
*/
public char getFieldDelimiter() {
return fieldDelimiter;
}
/**
* The quote character used in delineated formats.
* Defaults to {@value #DEFAULT_QUOTE_CHAR}
*
* @return The delineated format quote character
*/
public char getQuoteCharacter() {
return quoteCharacter;
}
/**
* Returns true if the data described by this object needs
* transforming before processing by autodetect.
* A transformation must be applied if either a timeformat is
* not in seconds since the epoch or the data is in Json format.
*
* @return True if the data should be transformed.
*/
public boolean transform() {
return dataFormat == DataFormat.JSON ||
isTransformTime();
}
/**
* Return true if the time is in a format that needs transforming.
* Anytime format this isn't {@value #EPOCH} or <code>null</code>
* needs transforming.
*
* @return True if the time field needs to be transformed.
*/
public boolean isTransformTime() {
return timeFormat != null && !EPOCH.equals(timeFormat);
}
/**
* Return true if the time format is {@value #EPOCH_MS}
*
* @return True if the date is in milli-seconds since the epoch.
*/
public boolean isEpochMs() {
return EPOCH_MS.equals(timeFormat);
}
private static char extractChar(XContentParser parser) throws IOException {
if (parser.currentToken() == XContentParser.Token.VALUE_STRING) {
String charStr = parser.text();
if (charStr.length() != 1) {
throw new IllegalArgumentException("String must be a single character, found [" + charStr + "]");
}
return charStr.charAt(0);
}
throw new IllegalArgumentException("Unsupported token [" + parser.currentToken() + "]");
}
/**
* Overridden equality test
*/
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other instanceof DataDescription == false) {
return false;
}
DataDescription that = (DataDescription) other;
return this.dataFormat == that.dataFormat &&
this.quoteCharacter == that.quoteCharacter &&
Objects.equals(this.timeFieldName, that.timeFieldName) &&
Objects.equals(this.timeFormat, that.timeFormat) &&
Objects.equals(this.fieldDelimiter, that.fieldDelimiter);
}
@Override
public int hashCode() {
return Objects.hash(dataFormat, quoteCharacter, timeFieldName,
timeFormat, fieldDelimiter);
}
public static class Builder {
private DataFormat dataFormat = DataFormat.DELIMITED;
private String timeFieldName = DEFAULT_TIME_FIELD;
private String timeFormat = EPOCH;
private char fieldDelimiter = DEFAULT_DELIMITER;
private char quoteCharacter = DEFAULT_QUOTE_CHAR;
public void setFormat(DataFormat format) {
dataFormat = ExceptionsHelper.requireNonNull(format, FORMAT_FIELD.getPreferredName() + " must not be null");
}
private void setFormat(String format) {
setFormat(DataFormat.forString(format));
}
public void setTimeField(String fieldName) {
timeFieldName = ExceptionsHelper.requireNonNull(fieldName, TIME_FIELD_NAME_FIELD.getPreferredName() + " must not be null");
}
public void setTimeFormat(String format) {
ExceptionsHelper.requireNonNull(format, TIME_FORMAT_FIELD.getPreferredName() + " must not be null");
switch (format) {
case EPOCH:
case EPOCH_MS:
break;
default:
try {
DateTimeFormatterTimestampConverter.ofPattern(format);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_INVALID_TIMEFORMAT, format));
}
}
timeFormat = format;
}
public void setFieldDelimiter(char delimiter) {
fieldDelimiter = delimiter;
}
public void setQuoteCharacter(char value) {
quoteCharacter = value;
}
public DataDescription build() {
return new DataDescription(dataFormat, timeFieldName, timeFormat, fieldDelimiter,quoteCharacter);
}
}
}

View File

@ -0,0 +1,780 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.xpack.prelert.job.config.DefaultDetectorDescription;
import org.elasticsearch.xpack.prelert.job.detectionrules.DetectionRule;
import org.elasticsearch.xpack.prelert.job.detectionrules.RuleCondition;
import org.elasticsearch.xpack.prelert.job.messages.Messages;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
/**
* Defines the fields to be used in the analysis.
* <code>fieldname</code> must be set and only one of <code>byFieldName</code>
* and <code>overFieldName</code> should be set.
*/
public class Detector extends ToXContentToBytes implements Writeable {
public enum ExcludeFrequent implements Writeable {
ALL("all"),
NONE("none"),
BY("by"),
OVER("over");
private final String token;
ExcludeFrequent(String token) {
this.token = token;
}
public String getToken() {
return token;
}
/**
* Case-insensitive from string method.
* Works with either JSON, json, etc.
*
* @param value String representation
* @return The data format
*/
public static ExcludeFrequent forString(String value) {
return valueOf(value.toUpperCase(Locale.ROOT));
}
public static ExcludeFrequent readFromStream(StreamInput in) throws IOException {
int ordinal = in.readVInt();
if (ordinal < 0 || ordinal >= values().length) {
throw new IOException("Unknown ExcludeFrequent ordinal [" + ordinal + "]");
}
return values()[ordinal];
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(ordinal());
}
}
public static final ParseField DETECTOR_FIELD = new ParseField("detector");
public static final ParseField DETECTOR_DESCRIPTION_FIELD = new ParseField("detectorDescription");
public static final ParseField FUNCTION_FIELD = new ParseField("function");
public static final ParseField FIELD_NAME_FIELD = new ParseField("fieldName");
public static final ParseField BY_FIELD_NAME_FIELD = new ParseField("byFieldName");
public static final ParseField OVER_FIELD_NAME_FIELD = new ParseField("overFieldName");
public static final ParseField PARTITION_FIELD_NAME_FIELD = new ParseField("partitionFieldName");
public static final ParseField USE_NULL_FIELD = new ParseField("useNull");
public static final ParseField EXCLUDE_FREQUENT_FIELD = new ParseField("excludeFrequent");
public static final ParseField DETECTOR_RULES_FIELD = new ParseField("detectorRules");
public static final ObjectParser<Builder, ParseFieldMatcherSupplier> PARSER = new ObjectParser<>("detector", Builder::new);
static {
PARSER.declareString(Builder::setDetectorDescription, DETECTOR_DESCRIPTION_FIELD);
PARSER.declareString(Builder::setFunction, FUNCTION_FIELD);
PARSER.declareString(Builder::setFieldName, FIELD_NAME_FIELD);
PARSER.declareString(Builder::setByFieldName, BY_FIELD_NAME_FIELD);
PARSER.declareString(Builder::setOverFieldName, OVER_FIELD_NAME_FIELD);
PARSER.declareString(Builder::setPartitionFieldName, PARTITION_FIELD_NAME_FIELD);
PARSER.declareBoolean(Builder::setUseNull, USE_NULL_FIELD);
PARSER.declareField(Builder::setExcludeFrequent, p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return ExcludeFrequent.forString(p.text());
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, EXCLUDE_FREQUENT_FIELD, ObjectParser.ValueType.STRING);
PARSER.declareObjectArray(Builder::setDetectorRules, DetectionRule.PARSER, DETECTOR_RULES_FIELD);
}
public static final String COUNT = "count";
public static final String HIGH_COUNT = "high_count";
public static final String LOW_COUNT = "low_count";
public static final String NON_ZERO_COUNT = "non_zero_count";
public static final String LOW_NON_ZERO_COUNT = "low_non_zero_count";
public static final String HIGH_NON_ZERO_COUNT = "high_non_zero_count";
public static final String NZC = "nzc";
public static final String LOW_NZC = "low_nzc";
public static final String HIGH_NZC = "high_nzc";
public static final String DISTINCT_COUNT = "distinct_count";
public static final String LOW_DISTINCT_COUNT = "low_distinct_count";
public static final String HIGH_DISTINCT_COUNT = "high_distinct_count";
public static final String DC = "dc";
public static final String LOW_DC = "low_dc";
public static final String HIGH_DC = "high_dc";
public static final String RARE = "rare";
public static final String FREQ_RARE = "freq_rare";
public static final String INFO_CONTENT = "info_content";
public static final String LOW_INFO_CONTENT = "low_info_content";
public static final String HIGH_INFO_CONTENT = "high_info_content";
public static final String METRIC = "metric";
public static final String MEAN = "mean";
public static final String MEDIAN = "median";
public static final String HIGH_MEAN = "high_mean";
public static final String LOW_MEAN = "low_mean";
public static final String AVG = "avg";
public static final String HIGH_AVG = "high_avg";
public static final String LOW_AVG = "low_avg";
public static final String MIN = "min";
public static final String MAX = "max";
public static final String SUM = "sum";
public static final String LOW_SUM = "low_sum";
public static final String HIGH_SUM = "high_sum";
public static final String NON_NULL_SUM = "non_null_sum";
public static final String LOW_NON_NULL_SUM = "low_non_null_sum";
public static final String HIGH_NON_NULL_SUM = "high_non_null_sum";
/**
* Population variance is called varp to match Splunk
*/
public static final String POPULATION_VARIANCE = "varp";
public static final String LOW_POPULATION_VARIANCE = "low_varp";
public static final String HIGH_POPULATION_VARIANCE = "high_varp";
public static final String TIME_OF_DAY = "time_of_day";
public static final String TIME_OF_WEEK = "time_of_week";
public static final String LAT_LONG = "lat_long";
/**
* The set of valid function names.
*/
public static final Set<String> ANALYSIS_FUNCTIONS =
new HashSet<>(Arrays.asList(
// The convention here is that synonyms (only) go on the same line
COUNT,
HIGH_COUNT,
LOW_COUNT,
NON_ZERO_COUNT, NZC,
LOW_NON_ZERO_COUNT, LOW_NZC,
HIGH_NON_ZERO_COUNT, HIGH_NZC,
DISTINCT_COUNT, DC,
LOW_DISTINCT_COUNT, LOW_DC,
HIGH_DISTINCT_COUNT, HIGH_DC,
RARE,
FREQ_RARE,
INFO_CONTENT,
LOW_INFO_CONTENT,
HIGH_INFO_CONTENT,
METRIC,
MEAN, AVG,
HIGH_MEAN, HIGH_AVG,
LOW_MEAN, LOW_AVG,
MEDIAN,
MIN,
MAX,
SUM,
LOW_SUM,
HIGH_SUM,
NON_NULL_SUM,
LOW_NON_NULL_SUM,
HIGH_NON_NULL_SUM,
POPULATION_VARIANCE,
LOW_POPULATION_VARIANCE,
HIGH_POPULATION_VARIANCE,
TIME_OF_DAY,
TIME_OF_WEEK,
LAT_LONG
));
/**
* The set of functions that do not require a field, by field or over field
*/
public static final Set<String> COUNT_WITHOUT_FIELD_FUNCTIONS =
new HashSet<>(Arrays.asList(
COUNT,
HIGH_COUNT,
LOW_COUNT,
NON_ZERO_COUNT, NZC,
LOW_NON_ZERO_COUNT, LOW_NZC,
HIGH_NON_ZERO_COUNT, HIGH_NZC,
TIME_OF_DAY,
TIME_OF_WEEK
));
/**
* The set of functions that require a fieldname
*/
public static final Set<String> FIELD_NAME_FUNCTIONS =
new HashSet<>(Arrays.asList(
DISTINCT_COUNT, DC,
LOW_DISTINCT_COUNT, LOW_DC,
HIGH_DISTINCT_COUNT, HIGH_DC,
INFO_CONTENT,
LOW_INFO_CONTENT,
HIGH_INFO_CONTENT,
METRIC,
MEAN, AVG,
HIGH_MEAN, HIGH_AVG,
LOW_MEAN, LOW_AVG,
MEDIAN,
MIN,
MAX,
SUM,
LOW_SUM,
HIGH_SUM,
NON_NULL_SUM,
LOW_NON_NULL_SUM,
HIGH_NON_NULL_SUM,
POPULATION_VARIANCE,
LOW_POPULATION_VARIANCE,
HIGH_POPULATION_VARIANCE,
LAT_LONG
));
/**
* The set of functions that require a by fieldname
*/
public static final Set<String> BY_FIELD_NAME_FUNCTIONS =
new HashSet<>(Arrays.asList(
RARE,
FREQ_RARE
));
/**
* The set of functions that require a over fieldname
*/
public static final Set<String> OVER_FIELD_NAME_FUNCTIONS =
new HashSet<>(Arrays.asList(
FREQ_RARE
));
/**
* The set of functions that cannot have a by fieldname
*/
public static final Set<String> NO_BY_FIELD_NAME_FUNCTIONS =
new HashSet<>();
/**
* The set of functions that cannot have an over fieldname
*/
public static final Set<String> NO_OVER_FIELD_NAME_FUNCTIONS =
new HashSet<>(Arrays.asList(
NON_ZERO_COUNT, NZC,
LOW_NON_ZERO_COUNT, LOW_NZC,
HIGH_NON_ZERO_COUNT, HIGH_NZC
));
/**
* The set of functions that must not be used with overlapping buckets
*/
public static final Set<String> NO_OVERLAPPING_BUCKETS_FUNCTIONS =
new HashSet<>(Arrays.asList(
RARE,
FREQ_RARE
));
/**
* The set of functions that should not be used with overlapping buckets
* as they gain no benefit but have overhead
*/
public static final Set<String> OVERLAPPING_BUCKETS_FUNCTIONS_NOT_NEEDED =
new HashSet<>(Arrays.asList(
MIN,
MAX,
TIME_OF_DAY,
TIME_OF_WEEK
));
/**
* field names cannot contain any of these characters
* ", \
*/
public static final Character[] PROHIBITED_FIELDNAME_CHARACTERS = {'"', '\\'};
public static final String PROHIBITED = String.join(",",
Arrays.stream(PROHIBITED_FIELDNAME_CHARACTERS).map(
c -> Character.toString(c)).collect(Collectors.toList()));
private final String detectorDescription;
private final String function;
private final String fieldName;
private final String byFieldName;
private final String overFieldName;
private final String partitionFieldName;
private final boolean useNull;
private final ExcludeFrequent excludeFrequent;
private final List<DetectionRule> detectorRules;
public Detector(StreamInput in) throws IOException {
detectorDescription = in.readString();
function = in.readString();
fieldName = in.readOptionalString();
byFieldName = in.readOptionalString();
overFieldName = in.readOptionalString();
partitionFieldName = in.readOptionalString();
useNull = in.readBoolean();
excludeFrequent = in.readBoolean() ? ExcludeFrequent.readFromStream(in) : null;
detectorRules = in.readList(DetectionRule::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(detectorDescription);
out.writeString(function);
out.writeOptionalString(fieldName);
out.writeOptionalString(byFieldName);
out.writeOptionalString(overFieldName);
out.writeOptionalString(partitionFieldName);
out.writeBoolean(useNull);
if (excludeFrequent != null) {
out.writeBoolean(true);
excludeFrequent.writeTo(out);
} else {
out.writeBoolean(false);
}
out.writeList(detectorRules);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(DETECTOR_DESCRIPTION_FIELD.getPreferredName(), detectorDescription);
builder.field(FUNCTION_FIELD.getPreferredName(), function);
if (fieldName != null) {
builder.field(FIELD_NAME_FIELD.getPreferredName(), fieldName);
}
if (byFieldName != null) {
builder.field(BY_FIELD_NAME_FIELD.getPreferredName(), byFieldName);
}
if (overFieldName != null) {
builder.field(OVER_FIELD_NAME_FIELD.getPreferredName(), overFieldName);
}
if (partitionFieldName != null) {
builder.field(PARTITION_FIELD_NAME_FIELD.getPreferredName(), partitionFieldName);
}
if (useNull) {
builder.field(USE_NULL_FIELD.getPreferredName(), useNull);
}
if (excludeFrequent != null) {
builder.field(EXCLUDE_FREQUENT_FIELD.getPreferredName(), excludeFrequent);
}
builder.field(DETECTOR_RULES_FIELD.getPreferredName(), detectorRules);
builder.endObject();
return builder;
}
private Detector(String detectorDescription, String function, String fieldName, String byFieldName, String overFieldName,
String partitionFieldName, boolean useNull, ExcludeFrequent excludeFrequent, List<DetectionRule> detectorRules) {
this.function = function;
this.fieldName = fieldName;
this.byFieldName = byFieldName;
this.overFieldName = overFieldName;
this.partitionFieldName = partitionFieldName;
this.useNull = useNull;
this.excludeFrequent = excludeFrequent;
// REMOVE THIS LINE WHEN REMOVING JACKSON_DATABIND:
detectorRules = detectorRules != null ? detectorRules : Collections.emptyList();
this.detectorRules = Collections.unmodifiableList(detectorRules);
this.detectorDescription = detectorDescription != null ? detectorDescription : DefaultDetectorDescription.of(this);
}
public String getDetectorDescription() {
return detectorDescription;
}
/**
* The analysis function used e.g. count, rare, min etc. There is no
* validation to check this value is one a predefined set
*
* @return The function or <code>null</code> if not set
*/
public String getFunction() {
return function;
}
/**
* The Analysis field
*
* @return The field to analyse
*/
public String getFieldName() {
return fieldName;
}
/**
* The 'by' field or <code>null</code> if not set.
*
* @return The 'by' field
*/
public String getByFieldName() {
return byFieldName;
}
/**
* The 'over' field or <code>null</code> if not set.
*
* @return The 'over' field
*/
public String getOverFieldName() {
return overFieldName;
}
/**
* Segments the analysis along another field to have completely
* independent baselines for each instance of partitionfield
*
* @return The Partition Field
*/
public String getPartitionFieldName() {
return partitionFieldName;
}
/**
* Where there isn't a value for the 'by' or 'over' field should a new
* series be used as the 'null' series.
*
* @return true if the 'null' series should be created
*/
public boolean isUseNull() {
return useNull;
}
/**
* Excludes frequently-occuring metrics from the analysis;
* can apply to 'by' field, 'over' field, or both
*
* @return the value that the user set
*/
public ExcludeFrequent getExcludeFrequent() {
return excludeFrequent;
}
public List<DetectionRule> getDetectorRules() {
return detectorRules;
}
/**
* Returns a list with the byFieldName, overFieldName and partitionFieldName that are not null
*
* @return a list with the byFieldName, overFieldName and partitionFieldName that are not null
*/
public List<String> extractAnalysisFields() {
List<String> analysisFields = Arrays.asList(getByFieldName(),
getOverFieldName(), getPartitionFieldName());
return analysisFields.stream().filter(item -> item != null).collect(Collectors.toList());
}
public Set<String> extractReferencedLists() {
return detectorRules == null ? Collections.emptySet()
: detectorRules.stream().map(DetectionRule::extractReferencedLists)
.flatMap(Set::stream).collect(Collectors.toSet());
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other instanceof Detector == false) {
return false;
}
Detector that = (Detector) other;
return Objects.equals(this.detectorDescription, that.detectorDescription) &&
Objects.equals(this.function, that.function) &&
Objects.equals(this.fieldName, that.fieldName) &&
Objects.equals(this.byFieldName, that.byFieldName) &&
Objects.equals(this.overFieldName, that.overFieldName) &&
Objects.equals(this.partitionFieldName, that.partitionFieldName) &&
Objects.equals(this.useNull, that.useNull) &&
Objects.equals(this.excludeFrequent, that.excludeFrequent) &&
Objects.equals(this.detectorRules, that.detectorRules);
}
@Override
public int hashCode() {
return Objects.hash(detectorDescription, function, fieldName, byFieldName,
overFieldName, partitionFieldName, useNull, excludeFrequent,
detectorRules);
}
public static class Builder {
/**
* Functions that do not support rules:
* <ul>
* <li>lat_long - because it is a multivariate feature
* <li>metric - because having the same conditions on min,max,mean is
* error-prone
* </ul>
*/
static final Set<String> FUNCTIONS_WITHOUT_RULE_SUPPORT = new HashSet<>(Arrays.asList(Detector.LAT_LONG, Detector.METRIC));
private String detectorDescription;
private String function;
private String fieldName;
private String byFieldName;
private String overFieldName;
private String partitionFieldName;
private boolean useNull = false;
private ExcludeFrequent excludeFrequent;
private List<DetectionRule> detectorRules = Collections.emptyList();
public Builder() {
}
public Builder(Detector detector) {
detectorDescription = detector.detectorDescription;
function = detector.function;
fieldName = detector.fieldName;
byFieldName = detector.byFieldName;
overFieldName = detector.overFieldName;
partitionFieldName = detector.partitionFieldName;
useNull = detector.useNull;
excludeFrequent = detector.excludeFrequent;
detectorRules = new ArrayList<>(detector.detectorRules.size());
for (DetectionRule rule : detector.getDetectorRules()) {
detectorRules.add(rule);
}
}
public Builder(String function, String fieldName) {
this.function = function;
this.fieldName = fieldName;
}
public void setDetectorDescription(String detectorDescription) {
this.detectorDescription = detectorDescription;
}
public void setFunction(String function) {
this.function = function;
}
public void setFieldName(String fieldName) {
this.fieldName = fieldName;
}
public void setByFieldName(String byFieldName) {
this.byFieldName = byFieldName;
}
public void setOverFieldName(String overFieldName) {
this.overFieldName = overFieldName;
}
public void setPartitionFieldName(String partitionFieldName) {
this.partitionFieldName = partitionFieldName;
}
public void setUseNull(boolean useNull) {
this.useNull = useNull;
}
public void setExcludeFrequent(ExcludeFrequent excludeFrequent) {
this.excludeFrequent = excludeFrequent;
}
public void setDetectorRules(List<DetectionRule> detectorRules) {
this.detectorRules = detectorRules;
}
public List<DetectionRule> getDetectorRules() {
return detectorRules;
}
public Detector build() {
return build(false);
}
public Detector build(boolean isSummarised) {
boolean emptyField = Strings.isEmpty(fieldName);
boolean emptyByField = Strings.isEmpty(byFieldName);
boolean emptyOverField = Strings.isEmpty(overFieldName);
if (Detector.ANALYSIS_FUNCTIONS.contains(function) == false) {
throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_UNKNOWN_FUNCTION, function));
}
if (emptyField && emptyByField && emptyOverField) {
if (!Detector.COUNT_WITHOUT_FIELD_FUNCTIONS.contains(function)) {
throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_NO_ANALYSIS_FIELD_NOT_COUNT));
}
}
if (isSummarised && Detector.METRIC.equals(function)) {
throw new IllegalArgumentException(
Messages.getMessage(Messages.JOB_CONFIG_FUNCTION_INCOMPATIBLE_PRESUMMARIZED, Detector.METRIC));
}
// check functions have required fields
if (emptyField && Detector.FIELD_NAME_FUNCTIONS.contains(function)) {
throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_FUNCTION_REQUIRES_FIELDNAME, function));
}
if (!emptyField && (Detector.FIELD_NAME_FUNCTIONS.contains(function) == false)) {
throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_FIELDNAME_INCOMPATIBLE_FUNCTION, function));
}
if (emptyByField && Detector.BY_FIELD_NAME_FUNCTIONS.contains(function)) {
throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_FUNCTION_REQUIRES_BYFIELD, function));
}
if (!emptyByField && Detector.NO_BY_FIELD_NAME_FUNCTIONS.contains(function)) {
throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_BYFIELD_INCOMPATIBLE_FUNCTION, function));
}
if (emptyOverField && Detector.OVER_FIELD_NAME_FUNCTIONS.contains(function)) {
throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_FUNCTION_REQUIRES_OVERFIELD, function));
}
if (!emptyOverField && Detector.NO_OVER_FIELD_NAME_FUNCTIONS.contains(function)) {
throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_OVERFIELD_INCOMPATIBLE_FUNCTION, function));
}
// field names cannot contain certain characters
String[] fields = { fieldName, byFieldName, overFieldName, partitionFieldName };
for (String field : fields) {
verifyFieldName(field);
}
String function = this.function == null ? Detector.METRIC : this.function;
if (detectorRules.isEmpty() == false) {
if (FUNCTIONS_WITHOUT_RULE_SUPPORT.contains(function)) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_NOT_SUPPORTED_BY_FUNCTION, function);
throw new IllegalArgumentException(msg);
}
for (DetectionRule rule : detectorRules) {
checkScoping(rule);
}
}
return new Detector(detectorDescription, function, fieldName, byFieldName, overFieldName, partitionFieldName,
useNull, excludeFrequent, detectorRules);
}
public List<String> extractAnalysisFields() {
List<String> analysisFields = Arrays.asList(byFieldName,
overFieldName, partitionFieldName);
return analysisFields.stream().filter(item -> item != null).collect(Collectors.toList());
}
/**
* Check that the characters used in a field name will not cause problems.
*
* @param field
* The field name to be validated
* @return true
*/
public static boolean verifyFieldName(String field) throws ElasticsearchParseException {
if (field != null && containsInvalidChar(field)) {
throw new IllegalArgumentException(
Messages.getMessage(Messages.JOB_CONFIG_INVALID_FIELDNAME_CHARS, field, Detector.PROHIBITED));
}
return true;
}
private static boolean containsInvalidChar(String field) {
for (Character ch : Detector.PROHIBITED_FIELDNAME_CHARACTERS) {
if (field.indexOf(ch) >= 0) {
return true;
}
}
return field.chars().anyMatch(ch -> Character.isISOControl(ch));
}
private void checkScoping(DetectionRule rule) throws ElasticsearchParseException {
String targetFieldName = rule.getTargetFieldName();
checkTargetFieldNameIsValid(extractAnalysisFields(), targetFieldName);
List<String> validOptions = getValidFieldNameOptions(rule);
for (RuleCondition condition : rule.getRuleConditions()) {
if (!validOptions.contains(condition.getFieldName())) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_CONDITION_INVALID_FIELD_NAME, validOptions,
condition.getFieldName());
throw new IllegalArgumentException(msg);
}
}
}
private void checkTargetFieldNameIsValid(List<String> analysisFields, String targetFieldName)
throws ElasticsearchParseException {
if (targetFieldName != null && !analysisFields.contains(targetFieldName)) {
String msg =
Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_INVALID_TARGET_FIELD_NAME, analysisFields, targetFieldName);
throw new IllegalArgumentException(msg);
}
}
private List<String> getValidFieldNameOptions(DetectionRule rule) {
List<String> result = new ArrayList<>();
if (overFieldName != null) {
result.add(byFieldName == null ? overFieldName : byFieldName);
} else if (byFieldName != null) {
result.add(byFieldName);
}
if (rule.getTargetFieldName() != null) {
ScopingLevel targetLevel = ScopingLevel.from(this, rule.getTargetFieldName());
result = result.stream().filter(field -> targetLevel.isHigherThan(ScopingLevel.from(this, field)))
.collect(Collectors.toList());
}
if (isEmptyFieldNameAllowed(rule)) {
result.add(null);
}
return result;
}
private boolean isEmptyFieldNameAllowed(DetectionRule rule) {
List<String> analysisFields = extractAnalysisFields();
return analysisFields.isEmpty() || (rule.getTargetFieldName() != null && analysisFields.size() == 1);
}
enum ScopingLevel {
PARTITION(3),
OVER(2),
BY(1);
int level;
ScopingLevel(int level) {
this.level = level;
}
boolean isHigherThan(ScopingLevel other) {
return level > other.level;
}
static ScopingLevel from(Detector.Builder detector, String fieldName) {
if (fieldName.equals(detector.partitionFieldName)) {
return ScopingLevel.PARTITION;
}
if (fieldName.equals(detector.overFieldName)) {
return ScopingLevel.OVER;
}
if (fieldName.equals(detector.byFieldName)) {
return ScopingLevel.BY;
}
throw new IllegalArgumentException(
"fieldName '" + fieldName + "' does not match an analysis field");
}
}
}
}

View File

@ -0,0 +1,51 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import java.io.IOException;
import java.util.Locale;
public enum IgnoreDowntime implements Writeable {
NEVER, ONCE, ALWAYS;
/**
* <p>
* Parses a string and returns the corresponding enum value.
* </p>
* <p>
* The method differs from {@link #valueOf(String)} by being
* able to handle leading/trailing whitespace and being case
* insensitive.
* </p>
* <p>
* If there is no match {@link IllegalArgumentException} is thrown.
* </p>
*
* @param value A String that should match one of the enum values
* @return the matching enum value
*/
public static IgnoreDowntime fromString(String value) {
return valueOf(value.trim().toUpperCase(Locale.ROOT));
}
public static IgnoreDowntime fromStream(StreamInput in) throws IOException {
int ordinal = in.readVInt();
if (ordinal < 0 || ordinal >= values().length) {
throw new IOException("Unknown public enum JobSchedulerStatus {\n ordinal [" + ordinal + "]");
}
return values()[ordinal];
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(ordinal());
}
}

View File

@ -0,0 +1,884 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job;
import org.elasticsearch.cluster.AbstractDiffable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.xpack.prelert.job.messages.Messages;
import org.elasticsearch.xpack.prelert.job.transform.TransformConfig;
import org.elasticsearch.xpack.prelert.job.transform.TransformConfigs;
import org.elasticsearch.xpack.prelert.job.transform.verification.TransformConfigsVerifier;
import org.elasticsearch.xpack.prelert.utils.time.TimeUtils;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.TreeSet;
import java.util.regex.Pattern;
/**
* This class represents a configured and created Job. The creation time is set
* to the time the object was constructed, Status is set to
* {@link JobStatus#RUNNING} and the finished time and last data time fields are
* {@code null} until the job has seen some data or it is finished respectively.
* If the job was created to read data from a list of files FileUrls will be a
* non-empty list else the expects data to be streamed to it.
*/
public class Job extends AbstractDiffable<Job> implements Writeable, ToXContent {
public static final Job PROTO = new Job(null, null, null, null, null, 0L, null, null, null, null, null,
null, null, null, null, null, null, null, null, null, null, null);
public static final long DEFAULT_BUCKETSPAN = 300;
public static final String TYPE = "job";
/*
* Field names used in serialization
*/
public static final ParseField ID = new ParseField("jobId");
public static final ParseField ANALYSIS_CONFIG = new ParseField("analysisConfig");
public static final ParseField ANALYSIS_LIMITS = new ParseField("analysisLimits");
public static final ParseField COUNTS = new ParseField("counts");
public static final ParseField CREATE_TIME = new ParseField("createTime");
public static final ParseField CUSTOM_SETTINGS = new ParseField("customSettings");
public static final ParseField DATA_DESCRIPTION = new ParseField("dataDescription");
public static final ParseField DESCRIPTION = new ParseField("description");
public static final ParseField FINISHED_TIME = new ParseField("finishedTime");
public static final ParseField IGNORE_DOWNTIME = new ParseField("ignoreDowntime");
public static final ParseField LAST_DATA_TIME = new ParseField("lastDataTime");
public static final ParseField MODEL_DEBUG_CONFIG = new ParseField("modelDebugConfig");
public static final ParseField SCHEDULER_CONFIG = new ParseField("schedulerConfig");
public static final ParseField RENORMALIZATION_WINDOW_DAYS = new ParseField("renormalizationWindowDays");
public static final ParseField BACKGROUND_PERSIST_INTERVAL = new ParseField("backgroundPersistInterval");
public static final ParseField MODEL_SNAPSHOT_RETENTION_DAYS = new ParseField("modelSnapshotRetentionDays");
public static final ParseField RESULTS_RETENTION_DAYS = new ParseField("resultsRetentionDays");
public static final ParseField TIMEOUT = new ParseField("timeout");
public static final ParseField TRANSFORMS = new ParseField("transforms");
public static final ParseField MODEL_SIZE_STATS = new ParseField("modelSizeStats");
public static final ParseField AVERAGE_BUCKET_PROCESSING_TIME = new ParseField("averageBucketProcessingTimeMs");
public static final ParseField MODEL_SNAPSHOT_ID = new ParseField("modelSnapshotId");
public static final ObjectParser<Builder, ParseFieldMatcherSupplier> PARSER = new ObjectParser<>("job_details", Builder::new);
static {
PARSER.declareString(Builder::setId, ID);
PARSER.declareStringOrNull(Builder::setDescription, DESCRIPTION);
PARSER.declareField(Builder::setCreateTime, p -> {
if (p.currentToken() == Token.VALUE_NUMBER) {
return new Date(p.longValue());
} else if (p.currentToken() == Token.VALUE_STRING) {
return new Date(TimeUtils.dateStringToEpoch(p.text()));
}
throw new IllegalArgumentException("unexpected token [" + p.currentToken() + "] for [" + CREATE_TIME.getPreferredName() + "]");
}, CREATE_TIME, ValueType.VALUE);
PARSER.declareField(Builder::setFinishedTime, p -> {
if (p.currentToken() == Token.VALUE_NUMBER) {
return new Date(p.longValue());
} else if (p.currentToken() == Token.VALUE_STRING) {
return new Date(TimeUtils.dateStringToEpoch(p.text()));
}
throw new IllegalArgumentException(
"unexpected token [" + p.currentToken() + "] for [" + FINISHED_TIME.getPreferredName() + "]");
}, FINISHED_TIME, ValueType.VALUE);
PARSER.declareField(Builder::setLastDataTime, p -> {
if (p.currentToken() == Token.VALUE_NUMBER) {
return new Date(p.longValue());
} else if (p.currentToken() == Token.VALUE_STRING) {
return new Date(TimeUtils.dateStringToEpoch(p.text()));
}
throw new IllegalArgumentException(
"unexpected token [" + p.currentToken() + "] for [" + LAST_DATA_TIME.getPreferredName() + "]");
}, LAST_DATA_TIME, ValueType.VALUE);
PARSER.declareObject(Builder::setAnalysisConfig, AnalysisConfig.PARSER, ANALYSIS_CONFIG);
PARSER.declareObject(Builder::setAnalysisLimits, AnalysisLimits.PARSER, ANALYSIS_LIMITS);
PARSER.declareObject(Builder::setSchedulerConfig, SchedulerConfig.PARSER, SCHEDULER_CONFIG);
PARSER.declareObject(Builder::setDataDescription, DataDescription.PARSER, DATA_DESCRIPTION);
PARSER.declareObject(Builder::setModelSizeStats, ModelSizeStats.PARSER, MODEL_SIZE_STATS);
PARSER.declareObjectArray(Builder::setTransforms, TransformConfig.PARSER, TRANSFORMS);
PARSER.declareObject(Builder::setModelDebugConfig, ModelDebugConfig.PARSER, MODEL_DEBUG_CONFIG);
PARSER.declareObject(Builder::setCounts, DataCounts.PARSER, COUNTS);
PARSER.declareField(Builder::setIgnoreDowntime, (p, c) -> IgnoreDowntime.fromString(p.text()), IGNORE_DOWNTIME, ValueType.STRING);
PARSER.declareLong(Builder::setTimeout, TIMEOUT);
PARSER.declareLong(Builder::setRenormalizationWindowDays, RENORMALIZATION_WINDOW_DAYS);
PARSER.declareLong(Builder::setBackgroundPersistInterval, BACKGROUND_PERSIST_INTERVAL);
PARSER.declareLong(Builder::setResultsRetentionDays, RESULTS_RETENTION_DAYS);
PARSER.declareLong(Builder::setModelSnapshotRetentionDays, MODEL_SNAPSHOT_RETENTION_DAYS);
PARSER.declareField(Builder::setCustomSettings, (p, c) -> p.map(), CUSTOM_SETTINGS, ValueType.OBJECT);
PARSER.declareDouble(Builder::setAverageBucketProcessingTimeMs, AVERAGE_BUCKET_PROCESSING_TIME);
PARSER.declareStringOrNull(Builder::setModelSnapshotId, MODEL_SNAPSHOT_ID);
}
private final String jobId;
private final String description;
// NORELEASE: Use Jodatime instead
private final Date createTime;
private final Date finishedTime;
private final Date lastDataTime;
private final long timeout;
private final AnalysisConfig analysisConfig;
private final AnalysisLimits analysisLimits;
private final SchedulerConfig schedulerConfig;
private final DataDescription dataDescription;
private final ModelSizeStats modelSizeStats;
private final List<TransformConfig> transforms;
private final ModelDebugConfig modelDebugConfig;
private final DataCounts counts;
private final IgnoreDowntime ignoreDowntime;
private final Long renormalizationWindowDays;
private final Long backgroundPersistInterval;
private final Long modelSnapshotRetentionDays;
private final Long resultsRetentionDays;
private final Map<String, Object> customSettings;
private final Double averageBucketProcessingTimeMs;
private final String modelSnapshotId;
public Job(String jobId, String description, Date createTime, Date finishedTime, Date lastDataTime, long timeout,
AnalysisConfig analysisConfig, AnalysisLimits analysisLimits, SchedulerConfig schedulerConfig,
DataDescription dataDescription, ModelSizeStats modelSizeStats, List<TransformConfig> transforms,
ModelDebugConfig modelDebugConfig, DataCounts counts, IgnoreDowntime ignoreDowntime, Long renormalizationWindowDays,
Long backgroundPersistInterval, Long modelSnapshotRetentionDays, Long resultsRetentionDays,
Map<String, Object> customSettings, Double averageBucketProcessingTimeMs, String modelSnapshotId) {
this.jobId = jobId;
this.description = description;
this.createTime = createTime;
this.finishedTime = finishedTime;
this.lastDataTime = lastDataTime;
this.timeout = timeout;
this.analysisConfig = analysisConfig;
this.analysisLimits = analysisLimits;
this.schedulerConfig = schedulerConfig;
this.dataDescription = dataDescription;
this.modelSizeStats = modelSizeStats;
this.transforms = transforms;
this.modelDebugConfig = modelDebugConfig;
this.counts = counts;
this.ignoreDowntime = ignoreDowntime;
this.renormalizationWindowDays = renormalizationWindowDays;
this.backgroundPersistInterval = backgroundPersistInterval;
this.modelSnapshotRetentionDays = modelSnapshotRetentionDays;
this.resultsRetentionDays = resultsRetentionDays;
this.customSettings = customSettings;
this.averageBucketProcessingTimeMs = averageBucketProcessingTimeMs;
this.modelSnapshotId = modelSnapshotId;
}
public Job(StreamInput in) throws IOException {
jobId = in.readString();
description = in.readOptionalString();
createTime = new Date(in.readVLong());
finishedTime = in.readBoolean() ? new Date(in.readVLong()) : null;
lastDataTime = in.readBoolean() ? new Date(in.readVLong()) : null;
timeout = in.readVLong();
analysisConfig = new AnalysisConfig(in);
analysisLimits = in.readOptionalWriteable(AnalysisLimits::new);
schedulerConfig = in.readOptionalWriteable(SchedulerConfig::new);
dataDescription = in.readOptionalWriteable(DataDescription::new);
modelSizeStats = in.readOptionalWriteable(ModelSizeStats::new);
transforms = in.readList(TransformConfig::new);
modelDebugConfig = in.readOptionalWriteable(ModelDebugConfig::new);
counts = in.readOptionalWriteable(DataCounts::new);
ignoreDowntime = in.readOptionalWriteable(IgnoreDowntime::fromStream);
renormalizationWindowDays = in.readOptionalLong();
backgroundPersistInterval = in.readOptionalLong();
modelSnapshotRetentionDays = in.readOptionalLong();
resultsRetentionDays = in.readOptionalLong();
customSettings = in.readMap();
averageBucketProcessingTimeMs = in.readOptionalDouble();
modelSnapshotId = in.readOptionalString();
}
@Override
public Job readFrom(StreamInput in) throws IOException {
return new Job(in);
}
/**
* Return the Job Id. This name is preferred when serialising to the REST
* API.
*
* @return The job Id string
*/
public String getId() {
return jobId;
}
/**
* Return the Job Id. This name is preferred when serialising to the data
* store.
*
* @return The job Id string
*/
public String getJobId() {
return jobId;
}
/**
* The job description
*
* @return job description
*/
public String getDescription() {
return description;
}
/**
* The Job creation time. This name is preferred when serialising to the
* REST API.
*
* @return The date the job was created
*/
public Date getCreateTime() {
return createTime;
}
/**
* The Job creation time. This name is preferred when serialising to the
* data store.
*
* @return The date the job was created
*/
public Date getAtTimestamp() {
return createTime;
}
/**
* The time the job was finished or <code>null</code> if not finished.
*
* @return The date the job was last retired or <code>null</code>
*/
public Date getFinishedTime() {
return finishedTime;
}
/**
* The last time data was uploaded to the job or <code>null</code> if no
* data has been seen.
*
* @return The date at which the last data was processed
*/
public Date getLastDataTime() {
return lastDataTime;
}
/**
* The job timeout setting in seconds. Jobs are retired if they do not
* receive data for this period of time. The default is 600 seconds
*
* @return The timeout period in seconds
*/
public long getTimeout() {
return timeout;
}
/**
* The analysis configuration object
*
* @return The AnalysisConfig
*/
public AnalysisConfig getAnalysisConfig() {
return analysisConfig;
}
/**
* The analysis options object
*
* @return The AnalysisLimits
*/
public AnalysisLimits getAnalysisLimits() {
return analysisLimits;
}
public IgnoreDowntime getIgnoreDowntime() {
return ignoreDowntime;
}
public SchedulerConfig getSchedulerConfig() {
return schedulerConfig;
}
public ModelDebugConfig getModelDebugConfig() {
return modelDebugConfig;
}
/**
* The memory usage object
*
* @return The ModelSizeStats
*/
public ModelSizeStats getModelSizeStats() {
return modelSizeStats;
}
/**
* If not set the input data is assumed to be csv with a '_time' field in
* epoch format.
*
* @return A DataDescription or <code>null</code>
* @see DataDescription
*/
public DataDescription getDataDescription() {
return dataDescription;
}
public List<TransformConfig> getTransforms() {
return transforms;
}
/**
* Processed records count
*
* @return the processed records counts
*/
public DataCounts getCounts() {
return counts;
}
/**
* The duration of the renormalization window in days
*
* @return renormalization window in days
*/
public Long getRenormalizationWindowDays() {
return renormalizationWindowDays;
}
/**
* The background persistence interval in seconds
*
* @return background persistence interval in seconds
*/
public Long getBackgroundPersistInterval() {
return backgroundPersistInterval;
}
public Long getModelSnapshotRetentionDays() {
return modelSnapshotRetentionDays;
}
public Long getResultsRetentionDays() {
return resultsRetentionDays;
}
public Map<String, Object> getCustomSettings() {
return customSettings;
}
public Double getAverageBucketProcessingTimeMs() {
return averageBucketProcessingTimeMs;
}
public String getModelSnapshotId() {
return modelSnapshotId;
}
/**
* Get a list of all input data fields mentioned in the job configuration,
* namely analysis fields, time field and transform input fields.
*
* @return the list of fields - never <code>null</code>
*/
public List<String> allFields() {
Set<String> allFields = new TreeSet<>();
// analysis fields
if (analysisConfig != null) {
allFields.addAll(analysisConfig.analysisFields());
}
// transform input fields
if (transforms != null) {
for (TransformConfig tc : transforms) {
List<String> inputFields = tc.getInputs();
if (inputFields != null) {
allFields.addAll(inputFields);
}
}
}
// time field
if (dataDescription != null) {
String timeField = dataDescription.getTimeField();
if (timeField != null) {
allFields.add(timeField);
}
}
// remove empty strings
allFields.remove("");
return new ArrayList<>(allFields);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(jobId);
out.writeOptionalString(description);
out.writeVLong(createTime.getTime());
if (finishedTime != null) {
out.writeBoolean(true);
out.writeVLong(finishedTime.getTime());
} else {
out.writeBoolean(false);
}
if (lastDataTime != null) {
out.writeBoolean(true);
out.writeVLong(lastDataTime.getTime());
} else {
out.writeBoolean(false);
}
out.writeVLong(timeout);
analysisConfig.writeTo(out);
out.writeOptionalWriteable(analysisLimits);
out.writeOptionalWriteable(schedulerConfig);
out.writeOptionalWriteable(dataDescription);
out.writeOptionalWriteable(modelSizeStats);
out.writeList(transforms);
out.writeOptionalWriteable(modelDebugConfig);
out.writeOptionalWriteable(counts);
out.writeOptionalWriteable(ignoreDowntime);
out.writeOptionalLong(renormalizationWindowDays);
out.writeOptionalLong(backgroundPersistInterval);
out.writeOptionalLong(modelSnapshotRetentionDays);
out.writeOptionalLong(resultsRetentionDays);
out.writeMap(customSettings);
out.writeOptionalDouble(averageBucketProcessingTimeMs);
out.writeOptionalString(modelSnapshotId);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
doXContentBody(builder, params);
builder.endObject();
return builder;
}
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
builder.field(ID.getPreferredName(), jobId);
builder.field(DESCRIPTION.getPreferredName(), description);
builder.field(CREATE_TIME.getPreferredName(), createTime.getTime());
if (finishedTime != null) {
builder.field(FINISHED_TIME.getPreferredName(), finishedTime.getTime());
}
if (lastDataTime != null) {
builder.field(LAST_DATA_TIME.getPreferredName(), lastDataTime.getTime());
}
builder.field(TIMEOUT.getPreferredName(), timeout);
builder.field(ANALYSIS_CONFIG.getPreferredName(), analysisConfig, params);
if (analysisLimits != null) {
builder.field(ANALYSIS_LIMITS.getPreferredName(), analysisLimits, params);
}
if (schedulerConfig != null) {
builder.field(SCHEDULER_CONFIG.getPreferredName(), schedulerConfig, params);
}
if (dataDescription != null) {
builder.field(DATA_DESCRIPTION.getPreferredName(), dataDescription, params);
}
if (modelSizeStats != null) {
builder.field(MODEL_SIZE_STATS.getPreferredName(), modelSizeStats, params);
}
if (transforms != null) {
builder.field(TRANSFORMS.getPreferredName(), transforms);
}
if (modelDebugConfig != null) {
builder.field(MODEL_DEBUG_CONFIG.getPreferredName(), modelDebugConfig, params);
}
if (counts != null) {
builder.field(COUNTS.getPreferredName(), counts, params);
}
if (ignoreDowntime != null) {
builder.field(IGNORE_DOWNTIME.getPreferredName(), ignoreDowntime);
}
if (renormalizationWindowDays != null) {
builder.field(RENORMALIZATION_WINDOW_DAYS.getPreferredName(), renormalizationWindowDays);
}
if (backgroundPersistInterval != null) {
builder.field(BACKGROUND_PERSIST_INTERVAL.getPreferredName(), backgroundPersistInterval);
}
if (modelSnapshotRetentionDays != null) {
builder.field(MODEL_SNAPSHOT_RETENTION_DAYS.getPreferredName(), modelSnapshotRetentionDays);
}
if (resultsRetentionDays != null) {
builder.field(RESULTS_RETENTION_DAYS.getPreferredName(), resultsRetentionDays);
}
if (customSettings != null) {
builder.field(CUSTOM_SETTINGS.getPreferredName(), customSettings);
}
if (averageBucketProcessingTimeMs != null) {
builder.field(AVERAGE_BUCKET_PROCESSING_TIME.getPreferredName(), averageBucketProcessingTimeMs);
}
if (modelSnapshotId != null){
builder.field(MODEL_SNAPSHOT_ID.getPreferredName(), modelSnapshotId);
}
return builder;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other instanceof Job == false) {
return false;
}
Job that = (Job) other;
return Objects.equals(this.jobId, that.jobId) && Objects.equals(this.description, that.description)
&& Objects.equals(this.createTime, that.createTime)
&& Objects.equals(this.finishedTime, that.finishedTime) && Objects.equals(this.lastDataTime, that.lastDataTime)
&& (this.timeout == that.timeout) && Objects.equals(this.analysisConfig, that.analysisConfig)
&& Objects.equals(this.analysisLimits, that.analysisLimits) && Objects.equals(this.dataDescription, that.dataDescription)
&& Objects.equals(this.modelDebugConfig, that.modelDebugConfig) && Objects.equals(this.modelSizeStats, that.modelSizeStats)
&& Objects.equals(this.transforms, that.transforms) && Objects.equals(this.counts, that.counts)
&& Objects.equals(this.ignoreDowntime, that.ignoreDowntime)
&& Objects.equals(this.renormalizationWindowDays, that.renormalizationWindowDays)
&& Objects.equals(this.backgroundPersistInterval, that.backgroundPersistInterval)
&& Objects.equals(this.modelSnapshotRetentionDays, that.modelSnapshotRetentionDays)
&& Objects.equals(this.resultsRetentionDays, that.resultsRetentionDays)
&& Objects.equals(this.customSettings, that.customSettings)
&& Objects.equals(this.modelSnapshotId, that.modelSnapshotId);
}
@Override
public int hashCode() {
return Objects.hash(jobId, description, createTime, finishedTime, lastDataTime, timeout, analysisConfig,
analysisLimits, dataDescription, modelDebugConfig, modelSizeStats, transforms, counts, renormalizationWindowDays,
backgroundPersistInterval, modelSnapshotRetentionDays, resultsRetentionDays, ignoreDowntime, customSettings,
modelSnapshotId);
}
// Class alreadt extends from AbstractDiffable, so copied from ToXContentToBytes#toString()
@SuppressWarnings("deprecation")
@Override
public final String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.prettyPrint();
toXContent(builder, EMPTY_PARAMS);
return builder.string();
} catch (Exception e) {
// So we have a stack trace logged somewhere
return "{ \"error\" : \"" + org.elasticsearch.ExceptionsHelper.detailedMessage(e) + "\"}";
}
}
public static class Builder {
/**
* Valid jobId characters. Note that '.' is allowed but not documented.
*/
private static final Pattern VALID_JOB_ID_CHAR_PATTERN = Pattern.compile("[a-z0-9_\\-\\.]+");
public static final int MAX_JOB_ID_LENGTH = 64;
public static final long MIN_BACKGROUND_PERSIST_INTERVAL = 3600;
public static final long DEFAULT_TIMEOUT = 600;
private String id;
private String description;
private AnalysisConfig analysisConfig;
private AnalysisLimits analysisLimits;
private SchedulerConfig schedulerConfig;
private List<TransformConfig> transforms = new ArrayList<>();
private ModelSizeStats modelSizeStats;
private DataDescription dataDescription;
private Date createTime;
private Date finishedTime;
private Date lastDataTime;
private Long timeout = DEFAULT_TIMEOUT;
private ModelDebugConfig modelDebugConfig;
private Long renormalizationWindowDays;
private Long backgroundPersistInterval;
private Long modelSnapshotRetentionDays;
private Long resultsRetentionDays;
private DataCounts counts;
private IgnoreDowntime ignoreDowntime;
private Map<String, Object> customSettings;
private Double averageBucketProcessingTimeMs;
private String modelSnapshotId;
public Builder() {
}
public Builder(String id) {
this.id = id;
}
public Builder(Job job) {
this.id = job.getId();
this.description = job.getDescription();
this.analysisConfig = job.getAnalysisConfig();
this.schedulerConfig = job.getSchedulerConfig();
this.transforms = job.getTransforms();
this.modelSizeStats = job.getModelSizeStats();
this.dataDescription = job.getDataDescription();
this.createTime = job.getCreateTime();
this.finishedTime = job.getFinishedTime();
this.lastDataTime = job.getLastDataTime();
this.timeout = job.getTimeout();
this.modelDebugConfig = job.getModelDebugConfig();
this.renormalizationWindowDays = job.getRenormalizationWindowDays();
this.backgroundPersistInterval = job.getBackgroundPersistInterval();
this.resultsRetentionDays = job.getResultsRetentionDays();
this.counts = job.getCounts();
this.ignoreDowntime = job.getIgnoreDowntime();
this.customSettings = job.getCustomSettings();
this.averageBucketProcessingTimeMs = job.getAverageBucketProcessingTimeMs();
this.modelSnapshotId = job.getModelSnapshotId();
}
public void setId(String id) {
this.id = id;
}
public String getId() {
return id;
}
public void setCustomSettings(Map<String, Object> customSettings) {
this.customSettings = customSettings;
}
public void setDescription(String description) {
this.description = description;
}
public void setAnalysisConfig(AnalysisConfig.Builder configBuilder) {
analysisConfig = configBuilder.build();
}
public void setAnalysisLimits(AnalysisLimits analysisLimits) {
if (this.analysisLimits != null) {
long oldMemoryLimit = this.analysisLimits.getModelMemoryLimit();
long newMemoryLimit = analysisLimits.getModelMemoryLimit();
if (newMemoryLimit < oldMemoryLimit) {
throw new IllegalArgumentException(
Messages.getMessage(Messages.JOB_CONFIG_UPDATE_ANALYSIS_LIMITS_MODEL_MEMORY_LIMIT_CANNOT_BE_DECREASED,
oldMemoryLimit, newMemoryLimit));
}
}
this.analysisLimits = analysisLimits;
}
public void setSchedulerConfig(SchedulerConfig.Builder config) {
schedulerConfig = config.build();
}
public void setTimeout(Long timeout) {
this.timeout = timeout;
}
public void setCreateTime(Date createTime) {
this.createTime = createTime;
}
public void setFinishedTime(Date finishedTime) {
this.finishedTime = finishedTime;
}
public void setLastDataTime(Date lastDataTime) {
this.lastDataTime = lastDataTime;
}
public void setTransforms(List<TransformConfig> transforms) {
this.transforms = transforms;
}
public void setModelSizeStats(ModelSizeStats.Builder modelSizeStats) {
this.modelSizeStats = modelSizeStats.build();
}
public void setDataDescription(DataDescription.Builder description) {
dataDescription = description.build();
}
public void setModelDebugConfig(ModelDebugConfig modelDebugConfig) {
this.modelDebugConfig = modelDebugConfig;
}
public void setBackgroundPersistInterval(Long backgroundPersistInterval) {
this.backgroundPersistInterval = backgroundPersistInterval;
}
public void setRenormalizationWindowDays(Long renormalizationWindowDays) {
this.renormalizationWindowDays = renormalizationWindowDays;
}
public void setModelSnapshotRetentionDays(Long modelSnapshotRetentionDays) {
this.modelSnapshotRetentionDays = modelSnapshotRetentionDays;
}
public void setResultsRetentionDays(Long resultsRetentionDays) {
this.resultsRetentionDays = resultsRetentionDays;
}
public void setIgnoreDowntime(IgnoreDowntime ignoreDowntime) {
this.ignoreDowntime = ignoreDowntime;
}
public void setCounts(DataCounts counts) {
this.counts = counts;
}
public void setAverageBucketProcessingTimeMs(Double averageBucketProcessingTimeMs) {
this.averageBucketProcessingTimeMs = averageBucketProcessingTimeMs;
}
public void setModelSnapshotId(String modelSnapshotId) {
this.modelSnapshotId = modelSnapshotId;
}
public Job build() {
return build(false);
}
public Job build(boolean fromApi) {
if (analysisConfig == null) {
throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_MISSING_ANALYSISCONFIG));
}
if (schedulerConfig != null) {
if (analysisConfig.getBucketSpan() == null) {
throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_SCHEDULER_REQUIRES_BUCKET_SPAN));
}
if (schedulerConfig.getDataSource() == SchedulerConfig.DataSource.ELASTICSEARCH) {
if (analysisConfig.getLatency() != null && analysisConfig.getLatency() > 0) {
throw new IllegalArgumentException(
Messages.getMessage(Messages.JOB_CONFIG_SCHEDULER_ELASTICSEARCH_DOES_NOT_SUPPORT_LATENCY));
}
if (schedulerConfig.getAggregationsOrAggs() != null
&& !SchedulerConfig.DOC_COUNT.equals(analysisConfig.getSummaryCountFieldName())) {
throw new IllegalArgumentException(
Messages.getMessage(Messages.JOB_CONFIG_SCHEDULER_AGGREGATIONS_REQUIRES_SUMMARY_COUNT_FIELD,
SchedulerConfig.DataSource.ELASTICSEARCH.toString(), SchedulerConfig.DOC_COUNT));
}
if (dataDescription == null || dataDescription.getFormat() != DataDescription.DataFormat.ELASTICSEARCH) {
throw new IllegalArgumentException(
Messages.getMessage(Messages.JOB_CONFIG_SCHEDULER_ELASTICSEARCH_REQUIRES_DATAFORMAT_ELASTICSEARCH));
}
}
}
if (transforms != null && transforms.isEmpty() == false) {
TransformConfigsVerifier.verify(transforms);
checkTransformOutputIsUsed();
} else {
if (dataDescription != null && dataDescription.getFormat() == DataDescription.DataFormat.SINGLE_LINE) {
String msg = Messages.getMessage(
Messages.JOB_CONFIG_DATAFORMAT_REQUIRES_TRANSFORM,
DataDescription.DataFormat.SINGLE_LINE);
throw new IllegalArgumentException(msg);
}
}
checkValueNotLessThan(0, "timeout", timeout);
checkValueNotLessThan(0, "renormalizationWindowDays", renormalizationWindowDays);
checkValueNotLessThan(MIN_BACKGROUND_PERSIST_INTERVAL, "backgroundPersistInterval", backgroundPersistInterval);
checkValueNotLessThan(0, "modelSnapshotRetentionDays", modelSnapshotRetentionDays);
checkValueNotLessThan(0, "resultsRetentionDays", resultsRetentionDays);
String id;
Date createTime;
Date finishedTime;
Date lastDataTime;
DataCounts counts;
ModelSizeStats modelSizeStats;
Double averageBucketProcessingTimeMs;
String modelSnapshotId;
if (fromApi) {
id = this.id == null ? UUIDs.base64UUID().toLowerCase(Locale.ROOT): this.id;
createTime = this.createTime == null ? new Date() : this.createTime;
finishedTime = null;
lastDataTime = null;
counts = new DataCounts(id);
modelSizeStats = null;
averageBucketProcessingTimeMs = null;
modelSnapshotId = null;
} else {
id = this.id;
createTime = this.createTime;
finishedTime = this.finishedTime;
lastDataTime = this.lastDataTime;
counts = this.counts;
modelSizeStats = this.modelSizeStats;
averageBucketProcessingTimeMs = this.averageBucketProcessingTimeMs;
modelSnapshotId = this.modelSnapshotId;
}
if (id.length() > MAX_JOB_ID_LENGTH) {
throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_ID_TOO_LONG, MAX_JOB_ID_LENGTH));
}
if (!VALID_JOB_ID_CHAR_PATTERN.matcher(id).matches()) {
throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_INVALID_JOBID_CHARS));
}
return new Job(
id, description, createTime, finishedTime, lastDataTime, timeout, analysisConfig, analysisLimits,
schedulerConfig, dataDescription, modelSizeStats, transforms, modelDebugConfig, counts,
ignoreDowntime, renormalizationWindowDays, backgroundPersistInterval, modelSnapshotRetentionDays,
resultsRetentionDays, customSettings, averageBucketProcessingTimeMs, modelSnapshotId
);
}
private static void checkValueNotLessThan(long minVal, String name, Long value) {
if (value != null && value < minVal) {
throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_FIELD_VALUE_TOO_LOW, name, minVal, value));
}
}
/**
* Transform outputs should be used in either the date field,
* as an analysis field or input to another transform
*/
private boolean checkTransformOutputIsUsed() {
Set<String> usedFields = new TransformConfigs(transforms).inputFieldNames();
usedFields.addAll(analysisConfig.analysisFields());
String summaryCountFieldName = analysisConfig.getSummaryCountFieldName();
boolean isSummarised = !Strings.isNullOrEmpty(summaryCountFieldName);
if (isSummarised) {
usedFields.remove(summaryCountFieldName);
}
String timeField = dataDescription == null ? DataDescription.DEFAULT_TIME_FIELD : dataDescription.getTimeField();
usedFields.add(timeField);
for (TransformConfig tc : transforms) {
// if the type has no default outputs it doesn't need an output
boolean usesAnOutput = tc.type().defaultOutputNames().isEmpty()
|| tc.getOutputs().stream().anyMatch(outputName -> usedFields.contains(outputName));
if (isSummarised && tc.getOutputs().contains(summaryCountFieldName)) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_TRANSFORM_DUPLICATED_OUTPUT_NAME, tc.type().prettyName());
throw new IllegalArgumentException(msg);
}
if (!usesAnOutput) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_TRANSFORM_OUTPUTS_UNUSED,
tc.type().prettyName());
throw new IllegalArgumentException(msg);
}
}
return false;
}
}
}

View File

@ -0,0 +1,35 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import java.io.IOException;
import java.util.Locale;
public enum JobSchedulerStatus implements Writeable {
STARTING, STARTED, STOPPING, STOPPED;
public static JobSchedulerStatus fromString(String name) {
return valueOf(name.trim().toUpperCase(Locale.ROOT));
}
public static JobSchedulerStatus fromStream(StreamInput in) throws IOException {
int ordinal = in.readVInt();
if (ordinal < 0 || ordinal >= values().length) {
throw new IOException("Unknown public enum JobSchedulerStatus {\n ordinal [" + ordinal + "]");
}
return values()[ordinal];
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(ordinal());
}
}

View File

@ -0,0 +1,48 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import java.io.IOException;
import java.util.Arrays;
import java.util.Locale;
/**
* Jobs whether running or complete are in one of these states.
* When a job is created it is initialised in to the status closed
* i.e. it is not running.
*/
public enum JobStatus implements Writeable {
RUNNING, CLOSING, CLOSED, FAILED, PAUSING, PAUSED;
public static JobStatus fromString(String name) {
return valueOf(name.trim().toUpperCase(Locale.ROOT));
}
public static JobStatus fromStream(StreamInput in) throws IOException {
int ordinal = in.readVInt();
if (ordinal < 0 || ordinal >= values().length) {
throw new IOException("Unknown public enum JobStatus {\n ordinal [" + ordinal + "]");
}
return values()[ordinal];
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(ordinal());
}
/**
* @return {@code true} if status matches any of the given {@code candidates}
*/
public boolean isAnyOf(JobStatus... candidates) {
return Arrays.stream(candidates).anyMatch(candidate -> this == candidate);
}
}

View File

@ -0,0 +1,166 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.xpack.prelert.job.messages.Messages;
import java.io.IOException;
import java.util.Locale;
import java.util.Objects;
public class ModelDebugConfig extends ToXContentToBytes implements Writeable {
/**
* Enum of the acceptable output destinations.
*/
public enum DebugDestination implements Writeable {
FILE("file"),
DATA_STORE("data_store");
private String name;
DebugDestination(String name) {
this.name = name;
}
public String getName() {
return name;
}
/**
* Case-insensitive from string method. Works with FILE, File, file,
* etc.
*
* @param value
* String representation
* @return The output destination
*/
public static DebugDestination forString(String value) {
String valueUpperCase = value.toUpperCase(Locale.ROOT);
return DebugDestination.valueOf(valueUpperCase);
}
public static DebugDestination readFromStream(StreamInput in) throws IOException {
int ordinal = in.readVInt();
if (ordinal < 0 || ordinal >= values().length) {
throw new IOException("Unknown DebugDestination ordinal [" + ordinal + "]");
}
return values()[ordinal];
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(ordinal());
}
}
private static final double MAX_PERCENTILE = 100.0;
private static final ParseField TYPE_FIELD = new ParseField("modelDebugConfig");
private static final ParseField WRITE_TO_FIELD = new ParseField("writeTo");
private static final ParseField BOUNDS_PERCENTILE_FIELD = new ParseField("boundsPercentile");
private static final ParseField TERMS_FIELD = new ParseField("terms");
public static final ConstructingObjectParser<ModelDebugConfig, ParseFieldMatcherSupplier> PARSER = new ConstructingObjectParser<>(
TYPE_FIELD.getPreferredName(), a -> {
if (a[0] == null) {
return new ModelDebugConfig((Double) a[1], (String) a[2]);
} else {
return new ModelDebugConfig((DebugDestination) a[0], (Double) a[1], (String) a[2]);
}
});
static {
PARSER.declareField(ConstructingObjectParser.constructorArg(), p -> DebugDestination.forString(p.text()), WRITE_TO_FIELD,
ValueType.STRING);
PARSER.declareDouble(ConstructingObjectParser.constructorArg(), BOUNDS_PERCENTILE_FIELD);
PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), TERMS_FIELD);
}
private final DebugDestination writeTo;
private final double boundsPercentile;
private final String terms;
public ModelDebugConfig(double boundsPercentile, String terms) {
this(DebugDestination.FILE, boundsPercentile, terms);
}
public ModelDebugConfig(DebugDestination writeTo, double boundsPercentile, String terms) {
if (boundsPercentile < 0.0 || boundsPercentile > MAX_PERCENTILE) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_MODEL_DEBUG_CONFIG_INVALID_BOUNDS_PERCENTILE);
throw new IllegalArgumentException(msg);
}
this.writeTo = writeTo;
this.boundsPercentile = boundsPercentile;
this.terms = terms;
}
public ModelDebugConfig(StreamInput in) throws IOException {
writeTo = in.readOptionalWriteable(DebugDestination::readFromStream);
boundsPercentile = in.readDouble();
terms = in.readOptionalString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalWriteable(writeTo);
out.writeDouble(boundsPercentile);
out.writeOptionalString(terms);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (writeTo != null) {
builder.field(WRITE_TO_FIELD.getPreferredName(), writeTo.getName());
}
builder.field(BOUNDS_PERCENTILE_FIELD.getPreferredName(), boundsPercentile);
if (terms != null) {
builder.field(TERMS_FIELD.getPreferredName(), terms);
}
builder.endObject();
return builder;
}
public DebugDestination getWriteTo() {
return this.writeTo;
}
public double getBoundsPercentile() {
return this.boundsPercentile;
}
public String getTerms() {
return this.terms;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other instanceof ModelDebugConfig == false) {
return false;
}
ModelDebugConfig that = (ModelDebugConfig) other;
return Objects.equals(this.writeTo, that.writeTo) && Objects.equals(this.boundsPercentile, that.boundsPercentile)
&& Objects.equals(this.terms, that.terms);
}
@Override
public int hashCode() {
return Objects.hash(this.writeTo, boundsPercentile, terms);
}
}

View File

@ -0,0 +1,334 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.xpack.prelert.utils.time.TimeUtils;
import java.io.IOException;
import java.util.Date;
import java.util.Objects;
/**
* Provide access to the C++ model memory usage numbers for the Java process.
*/
public class ModelSizeStats extends ToXContentToBytes implements Writeable {
/**
* Field Names
*/
private static final ParseField MODEL_SIZE_STATS_FIELD = new ParseField("modelSizeStats");
public static final ParseField JOB_ID = new ParseField("jobId");
public static final ParseField MODEL_BYTES_FIELD = new ParseField("modelBytes");
public static final ParseField TOTAL_BY_FIELD_COUNT_FIELD = new ParseField("totalByFieldCount");
public static final ParseField TOTAL_OVER_FIELD_COUNT_FIELD = new ParseField("totalOverFieldCount");
public static final ParseField TOTAL_PARTITION_FIELD_COUNT_FIELD = new ParseField("totalPartitionFieldCount");
public static final ParseField BUCKET_ALLOCATION_FAILURES_COUNT_FIELD = new ParseField("bucketAllocationFailuresCount");
public static final ParseField MEMORY_STATUS_FIELD = new ParseField("memoryStatus");
public static final ParseField LOG_TIME_FIELD = new ParseField("logTime");
public static final ParseField TIMESTAMP_FIELD = new ParseField("timestamp");
public static final ConstructingObjectParser<Builder, ParseFieldMatcherSupplier> PARSER = new ConstructingObjectParser<>(
MODEL_SIZE_STATS_FIELD.getPreferredName(), a -> new Builder((String) a[0]));
static {
PARSER.declareString(ConstructingObjectParser.constructorArg(), JOB_ID);
PARSER.declareLong(Builder::setModelBytes, MODEL_BYTES_FIELD);
PARSER.declareLong(Builder::setBucketAllocationFailuresCount, BUCKET_ALLOCATION_FAILURES_COUNT_FIELD);
PARSER.declareLong(Builder::setTotalByFieldCount, TOTAL_BY_FIELD_COUNT_FIELD);
PARSER.declareLong(Builder::setTotalOverFieldCount, TOTAL_OVER_FIELD_COUNT_FIELD);
PARSER.declareLong(Builder::setTotalPartitionFieldCount, TOTAL_PARTITION_FIELD_COUNT_FIELD);
PARSER.declareField(Builder::setLogTime, p -> {
if (p.currentToken() == Token.VALUE_NUMBER) {
return new Date(p.longValue());
} else if (p.currentToken() == Token.VALUE_STRING) {
return new Date(TimeUtils.dateStringToEpoch(p.text()));
}
throw new IllegalArgumentException(
"unexpected token [" + p.currentToken() + "] for [" + LOG_TIME_FIELD.getPreferredName() + "]");
}, LOG_TIME_FIELD, ValueType.VALUE);
PARSER.declareField(Builder::setTimestamp, p -> {
if (p.currentToken() == Token.VALUE_NUMBER) {
return new Date(p.longValue());
} else if (p.currentToken() == Token.VALUE_STRING) {
return new Date(TimeUtils.dateStringToEpoch(p.text()));
}
throw new IllegalArgumentException(
"unexpected token [" + p.currentToken() + "] for [" + TIMESTAMP_FIELD.getPreferredName() + "]");
}, TIMESTAMP_FIELD, ValueType.VALUE);
PARSER.declareField(Builder::setMemoryStatus, p -> MemoryStatus.fromString(p.text()), MEMORY_STATUS_FIELD, ValueType.STRING);
}
/**
* Elasticsearch type
*/
public static final ParseField TYPE = new ParseField("modelSizeStats");
/**
* The status of the memory monitored by the ResourceMonitor. OK is default,
* SOFT_LIMIT means that the models have done some aggressive pruning to
* keep the memory below the limit, and HARD_LIMIT means that samples have
* been dropped
*/
public enum MemoryStatus implements Writeable {
OK("ok"), SOFT_LIMIT("soft_limit"), HARD_LIMIT("hard_limit");
private String name;
private MemoryStatus(String name) {
this.name = name;
}
public String getName() {
return name;
}
public static MemoryStatus fromString(String statusName) {
for (MemoryStatus status : values()) {
if (status.name.equals(statusName)) {
return status;
}
}
throw new IllegalArgumentException("Unknown MemoryStatus [" + statusName + "]");
}
public static MemoryStatus readFromStream(StreamInput in) throws IOException {
int ordinal = in.readVInt();
if (ordinal < 0 || ordinal >= values().length) {
throw new IOException("Unknown MemoryStatus ordinal [" + ordinal + "]");
}
return values()[ordinal];
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(ordinal());
}
}
private final String jobId;
private final String id;
private final long modelBytes;
private final long totalByFieldCount;
private final long totalOverFieldCount;
private final long totalPartitionFieldCount;
private final long bucketAllocationFailuresCount;
private final MemoryStatus memoryStatus;
private final Date timestamp;
private final Date logTime;
private ModelSizeStats(String jobId, String id, long modelBytes, long totalByFieldCount, long totalOverFieldCount,
long totalPartitionFieldCount, long bucketAllocationFailuresCount, MemoryStatus memoryStatus,
Date timestamp, Date logTime) {
this.jobId = jobId;
this.id = id;
this.modelBytes = modelBytes;
this.totalByFieldCount = totalByFieldCount;
this.totalOverFieldCount = totalOverFieldCount;
this.totalPartitionFieldCount = totalPartitionFieldCount;
this.bucketAllocationFailuresCount = bucketAllocationFailuresCount;
this.memoryStatus = memoryStatus;
this.timestamp = timestamp;
this.logTime = logTime;
}
public ModelSizeStats(StreamInput in) throws IOException {
jobId = in.readString();
id = null;
modelBytes = in.readVLong();
totalByFieldCount = in.readVLong();
totalOverFieldCount = in.readVLong();
totalPartitionFieldCount = in.readVLong();
bucketAllocationFailuresCount = in.readVLong();
memoryStatus = MemoryStatus.readFromStream(in);
logTime = new Date(in.readLong());
timestamp = in.readBoolean() ? new Date(in.readLong()) : null;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(jobId);
out.writeVLong(modelBytes);
out.writeVLong(totalByFieldCount);
out.writeVLong(totalOverFieldCount);
out.writeVLong(totalPartitionFieldCount);
out.writeVLong(bucketAllocationFailuresCount);
memoryStatus.writeTo(out);
out.writeLong(logTime.getTime());
boolean hasTimestamp = timestamp != null;
out.writeBoolean(hasTimestamp);
if (hasTimestamp) {
out.writeLong(timestamp.getTime());
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
doXContentBody(builder);
builder.endObject();
return builder;
}
public XContentBuilder doXContentBody(XContentBuilder builder) throws IOException {
builder.field(JOB_ID.getPreferredName(), jobId);
builder.field(MODEL_BYTES_FIELD.getPreferredName(), modelBytes);
builder.field(TOTAL_BY_FIELD_COUNT_FIELD.getPreferredName(), totalByFieldCount);
builder.field(TOTAL_OVER_FIELD_COUNT_FIELD.getPreferredName(), totalOverFieldCount);
builder.field(TOTAL_PARTITION_FIELD_COUNT_FIELD.getPreferredName(), totalPartitionFieldCount);
builder.field(BUCKET_ALLOCATION_FAILURES_COUNT_FIELD.getPreferredName(), bucketAllocationFailuresCount);
builder.field(MEMORY_STATUS_FIELD.getPreferredName(), memoryStatus.getName());
builder.field(LOG_TIME_FIELD.getPreferredName(), logTime.getTime());
if (timestamp != null) {
builder.field(TIMESTAMP_FIELD.getPreferredName(), timestamp.getTime());
}
return builder;
}
public String getJobId() {
return jobId;
}
public String getId() {
return id;
}
public long getModelBytes() {
return modelBytes;
}
public long getTotalByFieldCount() {
return totalByFieldCount;
}
public long getTotalPartitionFieldCount() {
return totalPartitionFieldCount;
}
public long getTotalOverFieldCount() {
return totalOverFieldCount;
}
public long getBucketAllocationFailuresCount() {
return bucketAllocationFailuresCount;
}
public MemoryStatus getMemoryStatus() {
return memoryStatus;
}
public Date getTimestamp() {
return timestamp;
}
public Date getLogTime() {
return logTime;
}
@Override
public int hashCode() {
// this.id excluded here as it is generated by the datastore
return Objects.hash(jobId, modelBytes, totalByFieldCount, totalOverFieldCount, totalPartitionFieldCount,
this.bucketAllocationFailuresCount, memoryStatus, timestamp, logTime);
}
/**
* Compare all the fields.
*/
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other instanceof ModelSizeStats == false) {
return false;
}
ModelSizeStats that = (ModelSizeStats) other;
return this.modelBytes == that.modelBytes && this.totalByFieldCount == that.totalByFieldCount
&& this.totalOverFieldCount == that.totalOverFieldCount && this.totalPartitionFieldCount == that.totalPartitionFieldCount
&& this.bucketAllocationFailuresCount == that.bucketAllocationFailuresCount
&& Objects.equals(this.memoryStatus, that.memoryStatus) && Objects.equals(this.timestamp, that.timestamp)
&& Objects.equals(this.logTime, that.logTime)
&& Objects.equals(this.jobId, that.jobId);
}
// NORELEASE This will not be needed once we are able to parse ModelSizeStats all at once.
public static class Builder {
private final String jobId;
private String id;
private long modelBytes;
private long totalByFieldCount;
private long totalOverFieldCount;
private long totalPartitionFieldCount;
private long bucketAllocationFailuresCount;
private MemoryStatus memoryStatus;
private Date timestamp;
private Date logTime;
public Builder(String jobId) {
this.jobId = jobId;
id = TYPE.getPreferredName();
memoryStatus = MemoryStatus.OK;
logTime = new Date();
}
public void setId(String id) {
this.id = Objects.requireNonNull(id);
}
public void setModelBytes(long modelBytes) {
this.modelBytes = modelBytes;
}
public void setTotalByFieldCount(long totalByFieldCount) {
this.totalByFieldCount = totalByFieldCount;
}
public void setTotalPartitionFieldCount(long totalPartitionFieldCount) {
this.totalPartitionFieldCount = totalPartitionFieldCount;
}
public void setTotalOverFieldCount(long totalOverFieldCount) {
this.totalOverFieldCount = totalOverFieldCount;
}
public void setBucketAllocationFailuresCount(long bucketAllocationFailuresCount) {
this.bucketAllocationFailuresCount = bucketAllocationFailuresCount;
}
public void setMemoryStatus(MemoryStatus memoryStatus) {
Objects.requireNonNull(memoryStatus, "[" + MEMORY_STATUS_FIELD.getPreferredName() + "] must not be null");
this.memoryStatus = memoryStatus;
}
public void setTimestamp(Date timestamp) {
this.timestamp = timestamp;
}
public void setLogTime(Date logTime) {
this.logTime = logTime;
}
public ModelSizeStats build() {
return new ModelSizeStats(jobId, id, modelBytes, totalByFieldCount, totalOverFieldCount, totalPartitionFieldCount,
bucketAllocationFailuresCount, memoryStatus, timestamp, logTime);
}
}
}

View File

@ -0,0 +1,297 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.xpack.prelert.job.quantiles.Quantiles;
import org.elasticsearch.xpack.prelert.utils.time.TimeUtils;
import java.io.IOException;
import java.util.Date;
import java.util.Objects;
/**
* ModelSnapshot Result POJO
*/
public class ModelSnapshot extends ToXContentToBytes implements Writeable {
/**
* Field Names
*/
public static final ParseField JOB_ID = new ParseField("jobId");
public static final ParseField TIMESTAMP = new ParseField("timestamp");
public static final ParseField DESCRIPTION = new ParseField("description");
public static final ParseField RESTORE_PRIORITY = new ParseField("restorePriority");
public static final ParseField SNAPSHOT_ID = new ParseField("snapshotId");
public static final ParseField SNAPSHOT_DOC_COUNT = new ParseField("snapshotDocCount");
public static final ParseField LATEST_RECORD_TIME = new ParseField("latestRecordTimeStamp");
public static final ParseField LATEST_RESULT_TIME = new ParseField("latestResultTimeStamp");
/**
* Elasticsearch type
*/
public static final ParseField TYPE = new ParseField("modelSnapshot");
public static final ConstructingObjectParser<ModelSnapshot, ParseFieldMatcherSupplier> PARSER =
new ConstructingObjectParser<>(TYPE.getPreferredName(), a -> new ModelSnapshot((String) a[0]));
static {
PARSER.declareString(ConstructingObjectParser.constructorArg(), JOB_ID);
PARSER.declareField(ModelSnapshot::setTimestamp, p -> {
if (p.currentToken() == Token.VALUE_NUMBER) {
return new Date(p.longValue());
} else if (p.currentToken() == Token.VALUE_STRING) {
return new Date(TimeUtils.dateStringToEpoch(p.text()));
}
throw new IllegalArgumentException("unexpected token [" + p.currentToken() + "] for [" + TIMESTAMP.getPreferredName() + "]");
}, TIMESTAMP, ValueType.VALUE);
PARSER.declareString(ModelSnapshot::setDescription, DESCRIPTION);
PARSER.declareLong(ModelSnapshot::setRestorePriority, RESTORE_PRIORITY);
PARSER.declareString(ModelSnapshot::setSnapshotId, SNAPSHOT_ID);
PARSER.declareInt(ModelSnapshot::setSnapshotDocCount, SNAPSHOT_DOC_COUNT);
PARSER.declareObject(ModelSnapshot::setModelSizeStats, ModelSizeStats.PARSER, ModelSizeStats.TYPE);
PARSER.declareField(ModelSnapshot::setLatestRecordTimeStamp, p -> {
if (p.currentToken() == Token.VALUE_NUMBER) {
return new Date(p.longValue());
} else if (p.currentToken() == Token.VALUE_STRING) {
return new Date(TimeUtils.dateStringToEpoch(p.text()));
}
throw new IllegalArgumentException(
"unexpected token [" + p.currentToken() + "] for [" + LATEST_RECORD_TIME.getPreferredName() + "]");
}, LATEST_RECORD_TIME, ValueType.VALUE);
PARSER.declareField(ModelSnapshot::setLatestResultTimeStamp, p -> {
if (p.currentToken() == Token.VALUE_NUMBER) {
return new Date(p.longValue());
} else if (p.currentToken() == Token.VALUE_STRING) {
return new Date(TimeUtils.dateStringToEpoch(p.text()));
}
throw new IllegalArgumentException(
"unexpected token [" + p.currentToken() + "] for [" + LATEST_RESULT_TIME.getPreferredName() + "]");
}, LATEST_RESULT_TIME, ValueType.VALUE);
PARSER.declareObject(ModelSnapshot::setQuantiles, Quantiles.PARSER, Quantiles.TYPE);
}
private final String jobId;
private Date timestamp;
private String description;
private long restorePriority;
private String snapshotId;
private int snapshotDocCount;
private ModelSizeStats modelSizeStats;
private Date latestRecordTimeStamp;
private Date latestResultTimeStamp;
private Quantiles quantiles;
public ModelSnapshot(String jobId) {
this.jobId = jobId;
}
public ModelSnapshot(StreamInput in) throws IOException {
jobId = in.readString();
if (in.readBoolean()) {
timestamp = new Date(in.readLong());
}
description = in.readOptionalString();
restorePriority = in.readLong();
snapshotId = in.readOptionalString();
snapshotDocCount = in.readInt();
if (in.readBoolean()) {
modelSizeStats = new ModelSizeStats(in);
}
if (in.readBoolean()) {
latestRecordTimeStamp = new Date(in.readLong());
}
if (in.readBoolean()) {
latestResultTimeStamp = new Date(in.readLong());
}
if (in.readBoolean()) {
quantiles = new Quantiles(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(jobId);
boolean hasTimestamp = timestamp != null;
out.writeBoolean(hasTimestamp);
if (hasTimestamp) {
out.writeLong(timestamp.getTime());
}
out.writeOptionalString(description);
out.writeLong(restorePriority);
out.writeOptionalString(snapshotId);
out.writeInt(snapshotDocCount);
boolean hasModelSizeStats = modelSizeStats != null;
out.writeBoolean(hasModelSizeStats);
if (hasModelSizeStats) {
modelSizeStats.writeTo(out);
}
boolean hasLatestRecordTimeStamp = latestRecordTimeStamp != null;
out.writeBoolean(hasLatestRecordTimeStamp);
if (hasLatestRecordTimeStamp) {
out.writeLong(latestRecordTimeStamp.getTime());
}
boolean hasLatestResultTimeStamp = latestResultTimeStamp != null;
out.writeBoolean(hasLatestResultTimeStamp);
if (hasLatestResultTimeStamp) {
out.writeLong(latestResultTimeStamp.getTime());
}
boolean hasQuantiles = quantiles != null;
out.writeBoolean(hasQuantiles);
if (hasQuantiles) {
quantiles.writeTo(out);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(JOB_ID.getPreferredName(), jobId);
if (timestamp != null) {
builder.field(TIMESTAMP.getPreferredName(), timestamp.getTime());
}
if (description != null) {
builder.field(DESCRIPTION.getPreferredName(), description);
}
builder.field(RESTORE_PRIORITY.getPreferredName(), restorePriority);
if (snapshotId != null) {
builder.field(SNAPSHOT_ID.getPreferredName(), snapshotId);
}
builder.field(SNAPSHOT_DOC_COUNT.getPreferredName(), snapshotDocCount);
if (modelSizeStats != null) {
builder.field(ModelSizeStats.TYPE.getPreferredName(), modelSizeStats);
}
if (latestRecordTimeStamp != null) {
builder.field(LATEST_RECORD_TIME.getPreferredName(), latestRecordTimeStamp.getTime());
}
if (latestResultTimeStamp != null) {
builder.field(LATEST_RESULT_TIME.getPreferredName(), latestResultTimeStamp.getTime());
}
if (quantiles != null) {
builder.field(Quantiles.TYPE.getPreferredName(), quantiles);
}
builder.endObject();
return builder;
}
public String getJobId() {
return jobId;
}
public Date getTimestamp() {
return timestamp;
}
public void setTimestamp(Date timestamp) {
this.timestamp = timestamp;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public long getRestorePriority() {
return restorePriority;
}
public void setRestorePriority(long restorePriority) {
this.restorePriority = restorePriority;
}
public String getSnapshotId() {
return snapshotId;
}
public void setSnapshotId(String snapshotId) {
this.snapshotId = snapshotId;
}
public int getSnapshotDocCount() {
return snapshotDocCount;
}
public void setSnapshotDocCount(int snapshotDocCount) {
this.snapshotDocCount = snapshotDocCount;
}
public ModelSizeStats getModelSizeStats() {
return modelSizeStats;
}
public void setModelSizeStats(ModelSizeStats.Builder modelSizeStats) {
this.modelSizeStats = modelSizeStats.build();
}
public Quantiles getQuantiles() {
return quantiles;
}
public void setQuantiles(Quantiles q) {
quantiles = q;
}
public Date getLatestRecordTimeStamp() {
return latestRecordTimeStamp;
}
public void setLatestRecordTimeStamp(Date latestRecordTimeStamp) {
this.latestRecordTimeStamp = latestRecordTimeStamp;
}
public Date getLatestResultTimeStamp() {
return latestResultTimeStamp;
}
public void setLatestResultTimeStamp(Date latestResultTimeStamp) {
this.latestResultTimeStamp = latestResultTimeStamp;
}
@Override
public int hashCode() {
return Objects.hash(jobId, timestamp, description, restorePriority, snapshotId, quantiles,
snapshotDocCount, modelSizeStats, latestRecordTimeStamp, latestResultTimeStamp);
}
/**
* Compare all the fields.
*/
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other instanceof ModelSnapshot == false) {
return false;
}
ModelSnapshot that = (ModelSnapshot) other;
return Objects.equals(this.jobId, that.jobId)
&& Objects.equals(this.timestamp, that.timestamp)
&& Objects.equals(this.description, that.description)
&& this.restorePriority == that.restorePriority
&& Objects.equals(this.snapshotId, that.snapshotId)
&& this.snapshotDocCount == that.snapshotDocCount
&& Objects.equals(this.modelSizeStats, that.modelSizeStats)
&& Objects.equals(this.quantiles, that.quantiles)
&& Objects.equals(this.latestRecordTimeStamp, that.latestRecordTimeStamp)
&& Objects.equals(this.latestResultTimeStamp, that.latestResultTimeStamp);
}
}

View File

@ -0,0 +1,30 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job;
/**
* The serialised models can get very large and only the C++ code
* understands how to decode them, hence there is no reason to load
* them into the Java process.
*
* However, the Java process DOES set up a mapping on the Elasticsearch
* index to tell Elasticsearch not to analyse the model state documents
* in any way. (Otherwise Elasticsearch would go into a spin trying to
* make sense of such large JSON documents.)
*/
public class ModelState
{
/**
* The type of this class used when persisting the data
*/
public static final String TYPE = "modelState";
private ModelState()
{
}
}

View File

@ -0,0 +1,962 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.xpack.prelert.job.messages.Messages;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.SortedMap;
import java.util.TreeMap;
/**
* Scheduler configuration options. Describes where to proactively pull input
* data from.
* <p>
* If a value has not been set it will be <code>null</code>. Object wrappers are
* used around integral types and booleans so they can take <code>null</code>
* values.
*/
public class SchedulerConfig extends ToXContentToBytes implements Writeable {
/**
* The field name used to specify aggregation fields in Elasticsearch
* aggregations
*/
private static final String FIELD = "field";
/**
* The field name used to specify document counts in Elasticsearch
* aggregations
*/
public static final String DOC_COUNT = "doc_count";
// NORELEASE: no camel casing:
public static final ParseField DATA_SOURCE = new ParseField("dataSource");
public static final ParseField QUERY_DELAY = new ParseField("queryDelay");
public static final ParseField FREQUENCY = new ParseField("frequency");
public static final ParseField FILE_PATH = new ParseField("filePath");
public static final ParseField TAIL_FILE = new ParseField("tailFile");
public static final ParseField BASE_URL = new ParseField("baseUrl");
public static final ParseField USERNAME = new ParseField("username");
public static final ParseField PASSWORD = new ParseField("password");
public static final ParseField ENCRYPTED_PASSWORD = new ParseField("encryptedPassword");
public static final ParseField INDEXES = new ParseField("indexes");
public static final ParseField TYPES = new ParseField("types");
public static final ParseField QUERY = new ParseField("query");
public static final ParseField RETRIEVE_WHOLE_SOURCE = new ParseField("retrieveWholeSource");
public static final ParseField SCROLL_SIZE = new ParseField("scrollSize");
public static final ParseField AGGREGATIONS = new ParseField("aggregations");
public static final ParseField AGGS = new ParseField("aggs");
/**
* Named to match Elasticsearch, hence lowercase_with_underscores instead of
* camelCase
*/
public static final ParseField SCRIPT_FIELDS = new ParseField("script_fields");
public static final ConstructingObjectParser<SchedulerConfig.Builder, ParseFieldMatcherSupplier> PARSER =
new ConstructingObjectParser<>("schedule_config", a -> new SchedulerConfig.Builder((DataSource) a[0]));
static {
PARSER.declareField(ConstructingObjectParser.constructorArg(), p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return DataSource.readFromString(p.text());
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, DATA_SOURCE, ObjectParser.ValueType.STRING);
PARSER.declareLong(Builder::setQueryDelay, QUERY_DELAY);
PARSER.declareLong(Builder::setFrequency, FREQUENCY);
PARSER.declareString(Builder::setFilePath, FILE_PATH);
PARSER.declareBoolean(Builder::setTailFile, TAIL_FILE);
PARSER.declareString(Builder::setUsername, USERNAME);
PARSER.declareString(Builder::setPassword, PASSWORD);
PARSER.declareString(Builder::setEncryptedPassword, ENCRYPTED_PASSWORD);
PARSER.declareString(Builder::setBaseUrl, BASE_URL);
PARSER.declareStringArray(Builder::setIndexes, INDEXES);
PARSER.declareStringArray(Builder::setTypes, TYPES);
PARSER.declareObject(Builder::setQuery, (p, c) -> {
try {
return p.map();
} catch (IOException e) {
throw new RuntimeException(e);
}
}, QUERY);
PARSER.declareObject(Builder::setAggregations, (p, c) -> {
try {
return p.map();
} catch (IOException e) {
throw new RuntimeException(e);
}
}, AGGREGATIONS);
PARSER.declareObject(Builder::setAggs, (p, c) -> {
try {
return p.map();
} catch (IOException e) {
throw new RuntimeException(e);
}
}, AGGS);
PARSER.declareObject(Builder::setScriptFields, (p, c) -> {
try {
return p.map();
} catch (IOException e) {
throw new RuntimeException(e);
}
}, SCRIPT_FIELDS);
PARSER.declareBoolean(Builder::setRetrieveWholeSource, RETRIEVE_WHOLE_SOURCE);
PARSER.declareInt(Builder::setScrollSize, SCROLL_SIZE);
}
// NORELEASE: please use primitives where possible here:
private final DataSource dataSource;
/**
* The delay in seconds before starting to query a period of time
*/
private final Long queryDelay;
/**
* The frequency in seconds with which queries are executed
*/
private final Long frequency;
/**
* These values apply to the FILE data source
*/
private final String filePath;
private final Boolean tailFile;
/**
* Used for data sources that require credentials. May be null in the case
* where credentials are sometimes needed and sometimes not (e.g.
* Elasticsearch).
*/
private final String username;
private final String password;
private final String encryptedPassword;
/**
* These values apply to the ELASTICSEARCH data source
*/
private final String baseUrl;
private final List<String> indexes;
private final List<String> types;
// NORELEASE: These 4 fields can be reduced to a single
// SearchSourceBuilder field holding the entire source:
private final Map<String, Object> query;
private final Map<String, Object> aggregations;
private final Map<String, Object> aggs;
private final Map<String, Object> scriptFields;
private final Boolean retrieveWholeSource;
private final Integer scrollSize;
private SchedulerConfig(DataSource dataSource, Long queryDelay, Long frequency, String filePath, Boolean tailFile, String username,
String password, String encryptedPassword, String baseUrl, List<String> indexes, List<String> types, Map<String, Object> query,
Map<String, Object> aggregations, Map<String, Object> aggs, Map<String, Object> scriptFields, Boolean retrieveWholeSource,
Integer scrollSize) {
this.dataSource = dataSource;
this.queryDelay = queryDelay;
this.frequency = frequency;
this.filePath = filePath;
this.tailFile = tailFile;
this.username = username;
this.password = password;
this.encryptedPassword = encryptedPassword;
this.baseUrl = baseUrl;
this.indexes = indexes;
this.types = types;
this.query = query;
this.aggregations = aggregations;
this.aggs = aggs;
this.scriptFields = scriptFields;
this.retrieveWholeSource = retrieveWholeSource;
this.scrollSize = scrollSize;
}
public SchedulerConfig(StreamInput in) throws IOException {
this.dataSource = DataSource.readFromStream(in);
this.queryDelay = in.readOptionalLong();
this.frequency = in.readOptionalLong();
this.filePath = in.readOptionalString();
this.tailFile = in.readOptionalBoolean();
this.username = in.readOptionalString();
this.password = in.readOptionalString();
this.encryptedPassword = in.readOptionalString();
this.baseUrl = in.readOptionalString();
if (in.readBoolean()) {
this.indexes = in.readList(StreamInput::readString);
} else {
this.indexes = null;
}
if (in.readBoolean()) {
this.types = in.readList(StreamInput::readString);
} else {
this.types = null;
}
if (in.readBoolean()) {
this.query = in.readMap();
} else {
this.query = null;
}
if (in.readBoolean()) {
this.aggregations = in.readMap();
} else {
this.aggregations = null;
}
if (in.readBoolean()) {
this.aggs = in.readMap();
} else {
this.aggs = null;
}
if (in.readBoolean()) {
this.scriptFields = in.readMap();
} else {
this.scriptFields = null;
}
this.retrieveWholeSource = in.readOptionalBoolean();
this.scrollSize = in.readOptionalVInt();
}
/**
* The data source that the scheduler is to pull data from.
*
* @return The data source.
*/
public DataSource getDataSource() {
return this.dataSource;
}
public Long getQueryDelay() {
return this.queryDelay;
}
public Long getFrequency() {
return this.frequency;
}
/**
* For the FILE data source only, the path to the file.
*
* @return The path to the file, or <code>null</code> if not set.
*/
public String getFilePath() {
return this.filePath;
}
/**
* For the FILE data source only, should the file be tailed? If not it will
* just be read from once.
*
* @return Should the file be tailed? (<code>null</code> if not set.)
*/
public Boolean getTailFile() {
return this.tailFile;
}
/**
* For the ELASTICSEARCH data source only, the base URL to connect to
* Elasticsearch on.
*
* @return The URL, or <code>null</code> if not set.
*/
public String getBaseUrl() {
return this.baseUrl;
}
/**
* The username to use to connect to the data source (if any).
*
* @return The username, or <code>null</code> if not set.
*/
public String getUsername() {
return this.username;
}
/**
* For the ELASTICSEARCH data source only, one or more indexes to search for
* input data.
*
* @return The indexes to search, or <code>null</code> if not set.
*/
public List<String> getIndexes() {
return this.indexes;
}
/**
* For the ELASTICSEARCH data source only, one or more types to search for
* input data.
*
* @return The types to search, or <code>null</code> if not set.
*/
public List<String> getTypes() {
return this.types;
}
/**
* For the ELASTICSEARCH data source only, the Elasticsearch query DSL
* representing the query to submit to Elasticsearch to get the input data.
* This should not include time bounds, as these are added separately. This
* class does not attempt to interpret the query. The map will be converted
* back to an arbitrary JSON object.
*
* @return The search query, or <code>null</code> if not set.
*/
public Map<String, Object> getQuery() {
return this.query;
}
/**
* For the ELASTICSEARCH data source only, should the whole _source document
* be retrieved for analysis, or just the analysis fields?
*
* @return Should the whole of _source be retrieved? (<code>null</code> if
* not set.)
*/
public Boolean getRetrieveWholeSource() {
return this.retrieveWholeSource;
}
/**
* For the ELASTICSEARCH data source only, get the size of documents to be
* retrieved from each shard via a scroll search
*
* @return The size of documents to be retrieved from each shard via a
* scroll search
*/
public Integer getScrollSize() {
return this.scrollSize;
}
/**
* The encrypted password to use to connect to the data source (if any). A
* class outside this package is responsible for encrypting and decrypting
* the password.
*
* @return The password, or <code>null</code> if not set.
*/
public String getEncryptedPassword() {
return encryptedPassword;
}
/**
* The plain text password to use to connect to the data source (if any).
* This is likely to return <code>null</code> most of the time, as the
* intention is that it is only present it initial configurations, and gets
* replaced with an encrypted password as soon as possible after receipt.
*
* @return The password, or <code>null</code> if not set.
*/
public String getPassword() {
return password;
}
/**
* For the ELASTICSEARCH data source only, optional Elasticsearch
* script_fields to add to the search to be submitted to Elasticsearch to
* get the input data. This class does not attempt to interpret the script
* fields. The map will be converted back to an arbitrary JSON object.
*
* @return The script fields, or <code>null</code> if not set.
*/
public Map<String, Object> getScriptFields() {
return this.scriptFields;
}
/**
* For the ELASTICSEARCH data source only, optional Elasticsearch
* aggregations to apply to the search to be submitted to Elasticsearch to
* get the input data. This class does not attempt to interpret the
* aggregations. The map will be converted back to an arbitrary JSON object.
* Synonym for {@link #getAggs()} (like Elasticsearch).
*
* @return The aggregations, or <code>null</code> if not set.
*/
public Map<String, Object> getAggregations() {
return this.aggregations;
}
/**
* For the ELASTICSEARCH data source only, optional Elasticsearch
* aggregations to apply to the search to be submitted to Elasticsearch to
* get the input data. This class does not attempt to interpret the
* aggregations. The map will be converted back to an arbitrary JSON object.
* Synonym for {@link #getAggregations()} (like Elasticsearch).
*
* @return The aggregations, or <code>null</code> if not set.
*/
public Map<String, Object> getAggs() {
return this.aggs;
}
/**
* Convenience method to get either aggregations or aggs.
*
* @return The aggregations (whether initially specified in aggregations or
* aggs), or <code>null</code> if neither are set.
*/
public Map<String, Object> getAggregationsOrAggs() {
return (this.aggregations != null) ? this.aggregations : this.aggs;
}
/**
* Build the list of fields expected in the output from aggregations
* submitted to Elasticsearch.
*
* @return The list of fields, or empty list if there are no aggregations.
*/
public List<String> buildAggregatedFieldList() {
Map<String, Object> aggs = getAggregationsOrAggs();
if (aggs == null) {
return Collections.emptyList();
}
SortedMap<Integer, String> orderedFields = new TreeMap<>();
scanSubLevel(aggs, 0, orderedFields);
return new ArrayList<>(orderedFields.values());
}
@SuppressWarnings("unchecked")
private void scanSubLevel(Map<String, Object> subLevel, int depth, SortedMap<Integer, String> orderedFields) {
for (Map.Entry<String, Object> entry : subLevel.entrySet()) {
Object value = entry.getValue();
if (value instanceof Map<?, ?>) {
scanSubLevel((Map<String, Object>) value, depth + 1, orderedFields);
} else if (value instanceof String && FIELD.equals(entry.getKey())) {
orderedFields.put(depth, (String) value);
}
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
dataSource.writeTo(out);
out.writeOptionalLong(queryDelay);
out.writeOptionalLong(frequency);
out.writeOptionalString(filePath);
out.writeOptionalBoolean(tailFile);
out.writeOptionalString(username);
out.writeOptionalString(password);
out.writeOptionalString(encryptedPassword);
out.writeOptionalString(baseUrl);
if (indexes != null) {
out.writeBoolean(true);
out.writeStringList(indexes);
} else {
out.writeBoolean(false);
}
if (types != null) {
out.writeBoolean(true);
out.writeStringList(types);
} else {
out.writeBoolean(false);
}
if (query != null) {
out.writeBoolean(true);
out.writeMap(query);
} else {
out.writeBoolean(false);
}
if (aggregations != null) {
out.writeBoolean(true);
out.writeMap(aggregations);
} else {
out.writeBoolean(false);
}
if (aggs != null) {
out.writeBoolean(true);
out.writeMap(aggs);
} else {
out.writeBoolean(false);
}
if (scriptFields != null) {
out.writeBoolean(true);
out.writeMap(scriptFields);
} else {
out.writeBoolean(false);
}
out.writeOptionalBoolean(retrieveWholeSource);
out.writeOptionalVInt(scrollSize);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(DATA_SOURCE.getPreferredName(), dataSource.name().toUpperCase(Locale.ROOT));
if (queryDelay != null) {
builder.field(QUERY_DELAY.getPreferredName(), queryDelay);
}
if (frequency != null) {
builder.field(FREQUENCY.getPreferredName(), frequency);
}
if (filePath != null) {
builder.field(FILE_PATH.getPreferredName(), filePath);
}
if (tailFile != null) {
builder.field(TAIL_FILE.getPreferredName(), tailFile);
}
if (username != null) {
builder.field(USERNAME.getPreferredName(), username);
}
if (password != null) {
builder.field(PASSWORD.getPreferredName(), password);
}
if (encryptedPassword != null) {
builder.field(ENCRYPTED_PASSWORD.getPreferredName(), encryptedPassword);
}
if (baseUrl != null) {
builder.field(BASE_URL.getPreferredName(), baseUrl);
}
if (indexes != null) {
builder.field(INDEXES.getPreferredName(), indexes);
}
if (types != null) {
builder.field(TYPES.getPreferredName(), types);
}
if (query != null) {
builder.field(QUERY.getPreferredName(), query);
}
if (aggregations != null) {
builder.field(AGGREGATIONS.getPreferredName(), aggregations);
}
if (aggs != null) {
builder.field(AGGS.getPreferredName(), aggs);
}
if (scriptFields != null) {
builder.field(SCRIPT_FIELDS.getPreferredName(), scriptFields);
}
if (retrieveWholeSource != null) {
builder.field(RETRIEVE_WHOLE_SOURCE.getPreferredName(), retrieveWholeSource);
}
if (scrollSize != null) {
builder.field(SCROLL_SIZE.getPreferredName(), scrollSize);
}
builder.endObject();
return builder;
}
/**
* The lists of indexes and types are compared for equality but they are not
* sorted first so this test could fail simply because the indexes and types
* lists are in different orders.
*/
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other instanceof SchedulerConfig == false) {
return false;
}
SchedulerConfig that = (SchedulerConfig) other;
return Objects.equals(this.dataSource, that.dataSource) && Objects.equals(this.frequency, that.frequency)
&& Objects.equals(this.queryDelay, that.queryDelay) && Objects.equals(this.filePath, that.filePath)
&& Objects.equals(this.tailFile, that.tailFile) && Objects.equals(this.baseUrl, that.baseUrl)
&& Objects.equals(this.username, that.username) && Objects.equals(this.password, that.password)
&& Objects.equals(this.encryptedPassword, that.encryptedPassword) && Objects.equals(this.indexes, that.indexes)
&& Objects.equals(this.types, that.types) && Objects.equals(this.query, that.query)
&& Objects.equals(this.retrieveWholeSource, that.retrieveWholeSource) && Objects.equals(this.scrollSize, that.scrollSize)
&& Objects.equals(this.getAggregationsOrAggs(), that.getAggregationsOrAggs())
&& Objects.equals(this.scriptFields, that.scriptFields);
}
@Override
public int hashCode() {
return Objects.hash(this.dataSource, frequency, queryDelay, this.filePath, tailFile, baseUrl, username, password, encryptedPassword,
this.indexes, types, query, retrieveWholeSource, scrollSize, getAggregationsOrAggs(), this.scriptFields);
}
/**
* Enum of the acceptable data sources.
*/
public enum DataSource implements Writeable {
FILE, ELASTICSEARCH;
/**
* Case-insensitive from string method. Works with ELASTICSEARCH,
* Elasticsearch, ElasticSearch, etc.
*
* @param value
* String representation
* @return The data source
*/
public static DataSource readFromString(String value) {
String valueUpperCase = value.toUpperCase(Locale.ROOT);
return DataSource.valueOf(valueUpperCase);
}
public static DataSource readFromStream(StreamInput in) throws IOException {
int ordinal = in.readVInt();
if (ordinal < 0 || ordinal >= values().length) {
throw new IOException("Unknown Operator ordinal [" + ordinal + "]");
}
return values()[ordinal];
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(ordinal());
}
}
public static class Builder {
private static final int DEFAULT_SCROLL_SIZE = 1000;
private static final long DEFAULT_ELASTICSEARCH_QUERY_DELAY = 60L;
/**
* The default query for elasticsearch searches
*/
private static final String MATCH_ALL_ES_QUERY = "match_all";
private final DataSource dataSource;
private Long queryDelay;
private Long frequency;
private String filePath;
private Boolean tailFile;
private String username;
private String password;
private String encryptedPassword;
private String baseUrl;
// NORELEASE: use Collections.emptyList() instead of null as initial
// value:
private List<String> indexes = null;
private List<String> types = null;
// NORELEASE: use Collections.emptyMap() instead of null as initial
// value:
// NORELEASE: Use SearchSourceBuilder
private Map<String, Object> query = null;
private Map<String, Object> aggregations = null;
private Map<String, Object> aggs = null;
private Map<String, Object> scriptFields = null;
private Boolean retrieveWholeSource;
private Integer scrollSize;
// NORELEASE: figure out what the required fields are and made part of
// the only public constructor
public Builder(DataSource dataSource) {
this.dataSource = Objects.requireNonNull(dataSource);
switch (dataSource) {
case FILE:
setTailFile(false);
break;
case ELASTICSEARCH:
Map<String, Object> query = new HashMap<>();
query.put(MATCH_ALL_ES_QUERY, new HashMap<String, Object>());
setQuery(query);
setQueryDelay(DEFAULT_ELASTICSEARCH_QUERY_DELAY);
setRetrieveWholeSource(false);
setScrollSize(DEFAULT_SCROLL_SIZE);
break;
default:
throw new UnsupportedOperationException("unsupported datasource " + dataSource);
}
}
public Builder(SchedulerConfig config) {
this.dataSource = config.dataSource;
this.queryDelay = config.queryDelay;
this.frequency = config.frequency;
this.filePath = config.filePath;
this.tailFile = config.tailFile;
this.username = config.username;
this.password = config.password;
this.encryptedPassword = config.encryptedPassword;
this.baseUrl = config.baseUrl;
this.indexes = config.indexes;
this.types = config.types;
this.query = config.query;
this.aggregations = config.aggregations;
this.aggs = config.aggs;
this.scriptFields = config.scriptFields;
this.retrieveWholeSource = config.retrieveWholeSource;
this.scrollSize = config.scrollSize;
}
public void setQueryDelay(long queryDelay) {
if (queryDelay < 0) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_SCHEDULER_INVALID_OPTION_VALUE,
SchedulerConfig.QUERY_DELAY.getPreferredName(), queryDelay);
throw new IllegalArgumentException(msg);
}
this.queryDelay = queryDelay;
}
public void setFrequency(long frequency) {
if (frequency <= 0) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_SCHEDULER_INVALID_OPTION_VALUE,
SchedulerConfig.FREQUENCY.getPreferredName(), frequency);
throw new IllegalArgumentException(msg);
}
this.frequency = frequency;
}
public void setFilePath(String filePath) {
this.filePath = Objects.requireNonNull(filePath);
}
public void setTailFile(boolean tailFile) {
this.tailFile = tailFile;
}
public void setUsername(String username) {
this.username = Objects.requireNonNull(username);
}
public void setPassword(String password) {
this.password = Objects.requireNonNull(password);
}
public void setEncryptedPassword(String encryptedPassword) {
this.encryptedPassword = Objects.requireNonNull(encryptedPassword);
}
public void setBaseUrl(String baseUrl) {
this.baseUrl = Objects.requireNonNull(baseUrl);
}
public void setIndexes(List<String> indexes) {
// NORELEASE: make use of Collections.unmodifiableList(...)
this.indexes = Objects.requireNonNull(indexes);
}
public void setTypes(List<String> types) {
// NORELEASE: make use of Collections.unmodifiableList(...)
this.types = Objects.requireNonNull(types);
}
public void setQuery(Map<String, Object> query) {
// NORELEASE: make use of Collections.unmodifiableMap(...)
this.query = Objects.requireNonNull(query);
}
public void setAggregations(Map<String, Object> aggregations) {
// NORELEASE: make use of Collections.unmodifiableMap(...)
this.aggregations = Objects.requireNonNull(aggregations);
}
public void setAggs(Map<String, Object> aggs) {
// NORELEASE: make use of Collections.unmodifiableMap(...)
this.aggs = Objects.requireNonNull(aggs);
}
public void setScriptFields(Map<String, Object> scriptFields) {
// NORELEASE: make use of Collections.unmodifiableMap(...)
this.scriptFields = Objects.requireNonNull(scriptFields);
}
public void setRetrieveWholeSource(boolean retrieveWholeSource) {
this.retrieveWholeSource = retrieveWholeSource;
}
public void setScrollSize(int scrollSize) {
if (scrollSize < 0) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_SCHEDULER_INVALID_OPTION_VALUE,
SchedulerConfig.SCROLL_SIZE.getPreferredName(), scrollSize);
throw new IllegalArgumentException(msg);
}
this.scrollSize = scrollSize;
}
public DataSource getDataSource() {
return dataSource;
}
public Long getQueryDelay() {
return queryDelay;
}
public Long getFrequency() {
return frequency;
}
public String getFilePath() {
return filePath;
}
public Boolean getTailFile() {
return tailFile;
}
public String getUsername() {
return username;
}
public String getPassword() {
return password;
}
public String getEncryptedPassword() {
return encryptedPassword;
}
public String getBaseUrl() {
return baseUrl;
}
public List<String> getIndexes() {
return indexes;
}
public List<String> getTypes() {
return types;
}
public Map<String, Object> getQuery() {
return query;
}
public Map<String, Object> getAggregations() {
return aggregations;
}
public Map<String, Object> getAggs() {
return aggs;
}
/**
* Convenience method to get either aggregations or aggs.
*
* @return The aggregations (whether initially specified in aggregations
* or aggs), or <code>null</code> if neither are set.
*/
public Map<String, Object> getAggregationsOrAggs() {
return (this.aggregations != null) ? this.aggregations : this.aggs;
}
public Map<String, Object> getScriptFields() {
return scriptFields;
}
public Boolean getRetrieveWholeSource() {
return retrieveWholeSource;
}
public Integer getScrollSize() {
return scrollSize;
}
public SchedulerConfig build() {
switch (dataSource) {
case FILE:
if (Strings.hasLength(filePath) == false) {
throw invalidOptionValue(FILE_PATH.getPreferredName(), filePath);
}
if (baseUrl != null) {
throw notSupportedValue(BASE_URL, dataSource, Messages.JOB_CONFIG_SCHEDULER_FIELD_NOT_SUPPORTED);
}
if (username != null) {
throw notSupportedValue(USERNAME, dataSource, Messages.JOB_CONFIG_SCHEDULER_FIELD_NOT_SUPPORTED);
}
if (password != null) {
throw notSupportedValue(PASSWORD, dataSource, Messages.JOB_CONFIG_SCHEDULER_FIELD_NOT_SUPPORTED);
}
if (encryptedPassword != null) {
throw notSupportedValue(ENCRYPTED_PASSWORD, dataSource, Messages.JOB_CONFIG_SCHEDULER_FIELD_NOT_SUPPORTED);
}
if (indexes != null) {
throw notSupportedValue(INDEXES, dataSource, Messages.JOB_CONFIG_SCHEDULER_FIELD_NOT_SUPPORTED);
}
if (types != null) {
throw notSupportedValue(TYPES, dataSource, Messages.JOB_CONFIG_SCHEDULER_FIELD_NOT_SUPPORTED);
}
if (retrieveWholeSource != null) {
throw notSupportedValue(RETRIEVE_WHOLE_SOURCE, dataSource, Messages.JOB_CONFIG_SCHEDULER_FIELD_NOT_SUPPORTED);
}
if (aggregations != null) {
throw notSupportedValue(AGGREGATIONS, dataSource, Messages.JOB_CONFIG_SCHEDULER_FIELD_NOT_SUPPORTED);
}
if (query != null) {
throw notSupportedValue(QUERY, dataSource, Messages.JOB_CONFIG_SCHEDULER_FIELD_NOT_SUPPORTED);
}
if (scriptFields != null) {
throw notSupportedValue(SCRIPT_FIELDS, dataSource, Messages.JOB_CONFIG_SCHEDULER_FIELD_NOT_SUPPORTED);
}
if (scrollSize != null) {
throw notSupportedValue(SCROLL_SIZE, dataSource, Messages.JOB_CONFIG_SCHEDULER_FIELD_NOT_SUPPORTED);
}
break;
case ELASTICSEARCH:
try {
new URL(baseUrl);
} catch (MalformedURLException e) {
throw invalidOptionValue(BASE_URL.getPreferredName(), baseUrl);
}
boolean isNoPasswordSet = password == null && encryptedPassword == null;
boolean isMultiplePasswordSet = password != null && encryptedPassword != null;
if ((username != null && isNoPasswordSet) || (isNoPasswordSet == false && username == null)) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_SCHEDULER_INCOMPLETE_CREDENTIALS);
throw new IllegalArgumentException(msg);
}
if (isMultiplePasswordSet) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_SCHEDULER_MULTIPLE_PASSWORDS);
throw new IllegalArgumentException(msg);
}
if (indexes == null || indexes.isEmpty() || indexes.contains(null) || indexes.contains("")) {
throw invalidOptionValue(INDEXES.getPreferredName(), indexes);
}
if (types == null || types.isEmpty() || types.contains(null) || types.contains("")) {
throw invalidOptionValue(TYPES.getPreferredName(), types);
}
if (aggregations != null && aggs != null) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_SCHEDULER_MULTIPLE_AGGREGATIONS);
throw new IllegalArgumentException(msg);
}
if (Boolean.TRUE.equals(retrieveWholeSource)) {
if (scriptFields != null) {
throw notSupportedValue(SCRIPT_FIELDS, dataSource, Messages.JOB_CONFIG_SCHEDULER_FIELD_NOT_SUPPORTED);
}
}
if (filePath != null) {
throw notSupportedValue(FILE_PATH, dataSource, Messages.JOB_CONFIG_SCHEDULER_FIELD_NOT_SUPPORTED);
}
if (tailFile != null) {
throw notSupportedValue(TAIL_FILE, dataSource, Messages.JOB_CONFIG_SCHEDULER_FIELD_NOT_SUPPORTED);
}
break;
default:
throw new IllegalStateException("Unexpected datasource [" + dataSource + "]");
}
return new SchedulerConfig(dataSource, queryDelay, frequency, filePath, tailFile, username, password, encryptedPassword,
baseUrl, indexes, types, query, aggregations, aggs, scriptFields, retrieveWholeSource, scrollSize);
}
private static ElasticsearchException invalidOptionValue(String fieldName, Object value) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_SCHEDULER_INVALID_OPTION_VALUE, fieldName, value);
throw new IllegalArgumentException(msg);
}
private static ElasticsearchException notSupportedValue(ParseField field, DataSource dataSource, String key) {
String msg = Messages.getMessage(key, field.getPreferredName(), dataSource.toString());
throw new IllegalArgumentException(msg);
}
}
}

View File

@ -0,0 +1,116 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import java.io.IOException;
import java.util.Locale;
import java.util.Objects;
public class SchedulerState extends ToXContentToBytes implements Writeable {
// NORELEASE: no camel casing:
public static final ParseField TYPE_FIELD = new ParseField("schedulerState");
public static final ParseField STATUS = new ParseField("status");
public static final ParseField START_TIME_MILLIS = new ParseField("start");
public static final ParseField END_TIME_MILLIS = new ParseField("end");
public static final ConstructingObjectParser<SchedulerState, ParseFieldMatcherSupplier> PARSER = new ConstructingObjectParser<>(
TYPE_FIELD.getPreferredName(), a -> new SchedulerState((JobSchedulerStatus) a[0], (long) a[1], (Long) a[2]));
static {
PARSER.declareField(ConstructingObjectParser.constructorArg(), p -> JobSchedulerStatus.fromString(p.text()), STATUS,
ValueType.STRING);
PARSER.declareLong(ConstructingObjectParser.constructorArg(), START_TIME_MILLIS);
PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), END_TIME_MILLIS);
}
private JobSchedulerStatus status;
private long startTimeMillis;
@Nullable
private Long endTimeMillis;
public SchedulerState(JobSchedulerStatus status, long startTimeMillis, Long endTimeMillis) {
this.status = status;
this.startTimeMillis = startTimeMillis;
this.endTimeMillis = endTimeMillis;
}
public SchedulerState(StreamInput in) throws IOException {
status = JobSchedulerStatus.fromStream(in);
startTimeMillis = in.readLong();
endTimeMillis = in.readOptionalLong();
}
public JobSchedulerStatus getStatus() {
return status;
}
public long getStartTimeMillis() {
return startTimeMillis;
}
/**
* The end time as epoch milliseconds. An {@code null} end time indicates
* real-time mode.
*
* @return The optional end time as epoch milliseconds.
*/
@Nullable
public Long getEndTimeMillis() {
return endTimeMillis;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other instanceof SchedulerState == false) {
return false;
}
SchedulerState that = (SchedulerState) other;
return Objects.equals(this.status, that.status) && Objects.equals(this.startTimeMillis, that.startTimeMillis)
&& Objects.equals(this.endTimeMillis, that.endTimeMillis);
}
@Override
public int hashCode() {
return Objects.hash(status, startTimeMillis, endTimeMillis);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
status.writeTo(out);
out.writeLong(startTimeMillis);
out.writeOptionalLong(endTimeMillis);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(STATUS.getPreferredName(), status.name().toUpperCase(Locale.ROOT));
builder.field(START_TIME_MILLIS.getPreferredName(), startTimeMillis);
if (endTimeMillis != null) {
builder.field(END_TIME_MILLIS.getPreferredName(), endTimeMillis);
}
builder.endObject();
return builder;
}
}

View File

@ -0,0 +1,182 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.audit;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.xpack.prelert.utils.time.TimeUtils;
import java.io.IOException;
import java.util.Date;
import java.util.Objects;
public class AuditActivity extends ToXContentToBytes implements Writeable
{
public static final ParseField TYPE = new ParseField("auditActivity");
public static final ParseField TOTAL_JOBS = new ParseField("totalJobs");
public static final ParseField TOTAL_DETECTORS = new ParseField("totalDetectors");
public static final ParseField RUNNING_JOBS = new ParseField("runningJobs");
public static final ParseField RUNNING_DETECTORS = new ParseField("runningDetectors");
public static final ParseField TIMESTAMP = new ParseField("timestamp");
public static final ObjectParser<AuditActivity, ParseFieldMatcherSupplier> PARSER = new ObjectParser<>(TYPE.getPreferredName(),
AuditActivity::new);
static {
PARSER.declareInt(AuditActivity::setTotalJobs, TOTAL_JOBS);
PARSER.declareInt(AuditActivity::setTotalDetectors, TOTAL_DETECTORS);
PARSER.declareInt(AuditActivity::setRunningJobs, RUNNING_JOBS);
PARSER.declareInt(AuditActivity::setRunningDetectors, RUNNING_DETECTORS);
PARSER.declareField(AuditActivity::setTimestamp, p -> {
if (p.currentToken() == Token.VALUE_NUMBER) {
return new Date(p.longValue());
} else if (p.currentToken() == Token.VALUE_STRING) {
return new Date(TimeUtils.dateStringToEpoch(p.text()));
}
throw new IllegalArgumentException("unexpected token [" + p.currentToken() + "] for [" + TIMESTAMP.getPreferredName() + "]");
}, TIMESTAMP, ValueType.VALUE);
}
private int totalJobs;
private int totalDetectors;
private int runningJobs;
private int runningDetectors;
private Date timestamp;
public AuditActivity() {
}
private AuditActivity(int totalJobs, int totalDetectors, int runningJobs, int runningDetectors)
{
this.totalJobs = totalJobs;
this.totalDetectors = totalDetectors;
this.runningJobs = runningJobs;
this.runningDetectors = runningDetectors;
timestamp = new Date();
}
public AuditActivity(StreamInput in) throws IOException {
totalJobs = in.readInt();
totalDetectors = in.readInt();
runningJobs = in.readInt();
runningDetectors = in.readInt();
if (in.readBoolean()) {
timestamp = new Date(in.readLong());
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeInt(totalJobs);
out.writeInt(totalDetectors);
out.writeInt(runningJobs);
out.writeInt(runningDetectors);
boolean hasTimestamp = timestamp != null;
out.writeBoolean(hasTimestamp);
if (hasTimestamp) {
out.writeLong(timestamp.getTime());
}
}
public int getTotalJobs()
{
return totalJobs;
}
public void setTotalJobs(int totalJobs)
{
this.totalJobs = totalJobs;
}
public int getTotalDetectors()
{
return totalDetectors;
}
public void setTotalDetectors(int totalDetectors)
{
this.totalDetectors = totalDetectors;
}
public int getRunningJobs()
{
return runningJobs;
}
public void setRunningJobs(int runningJobs)
{
this.runningJobs = runningJobs;
}
public int getRunningDetectors()
{
return runningDetectors;
}
public void setRunningDetectors(int runningDetectors)
{
this.runningDetectors = runningDetectors;
}
public Date getTimestamp()
{
return timestamp;
}
public void setTimestamp(Date timestamp)
{
this.timestamp = timestamp;
}
public static AuditActivity newActivity(int totalJobs, int totalDetectors, int runningJobs, int runningDetectors)
{
return new AuditActivity(totalJobs, totalDetectors, runningJobs, runningDetectors);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(TOTAL_JOBS.getPreferredName(), totalJobs);
builder.field(TOTAL_DETECTORS.getPreferredName(), totalDetectors);
builder.field(RUNNING_JOBS.getPreferredName(), runningJobs);
builder.field(RUNNING_DETECTORS.getPreferredName(), runningDetectors);
if (timestamp != null) {
builder.field(TIMESTAMP.getPreferredName(), timestamp.getTime());
}
builder.endObject();
return builder;
}
@Override
public int hashCode() {
return Objects.hash(totalDetectors, totalJobs, runningDetectors, runningJobs, timestamp);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
AuditActivity other = (AuditActivity) obj;
return Objects.equals(totalDetectors, other.totalDetectors) &&
Objects.equals(totalJobs, other.totalJobs) &&
Objects.equals(runningDetectors, other.runningDetectors) &&
Objects.equals(runningJobs, other.runningJobs) &&
Objects.equals(timestamp, other.timestamp);
}
}

View File

@ -0,0 +1,199 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.audit;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.utils.time.TimeUtils;
import java.io.IOException;
import java.util.Date;
import java.util.Objects;
public class AuditMessage extends ToXContentToBytes implements Writeable
{
public static final ParseField TYPE = new ParseField("auditMessage");
public static final ParseField MESSAGE = new ParseField("message");
public static final ParseField LEVEL = new ParseField("level");
public static final ParseField TIMESTAMP = new ParseField("timestamp");
public static final ObjectParser<AuditMessage, ParseFieldMatcherSupplier> PARSER = new ObjectParser<>(TYPE.getPreferredName(),
AuditMessage::new);
static {
PARSER.declareString(AuditMessage::setJobId, Job.ID);
PARSER.declareString(AuditMessage::setMessage, MESSAGE);
PARSER.declareField(AuditMessage::setLevel, p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return Level.forString(p.text());
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, LEVEL, ValueType.STRING);
PARSER.declareField(AuditMessage::setTimestamp, p -> {
if (p.currentToken() == Token.VALUE_NUMBER) {
return new Date(p.longValue());
} else if (p.currentToken() == Token.VALUE_STRING) {
return new Date(TimeUtils.dateStringToEpoch(p.text()));
}
throw new IllegalArgumentException("unexpected token [" + p.currentToken() + "] for [" + TIMESTAMP.getPreferredName() + "]");
}, TIMESTAMP, ValueType.VALUE);
}
private String jobId;
private String message;
private Level level;
private Date timestamp;
public AuditMessage()
{
// Default constructor
}
private AuditMessage(String jobId, String message, Level severity)
{
this.jobId = jobId;
this.message = message;
level = severity;
timestamp = new Date();
}
public AuditMessage(StreamInput in) throws IOException {
jobId = in.readOptionalString();
message = in.readOptionalString();
if (in.readBoolean()) {
level = Level.readFromStream(in);
}
if (in.readBoolean()) {
timestamp = new Date(in.readLong());
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalString(jobId);
out.writeOptionalString(message);
boolean hasLevel = level != null;
out.writeBoolean(hasLevel);
if (hasLevel) {
level.writeTo(out);
}
boolean hasTimestamp = timestamp != null;
out.writeBoolean(hasTimestamp);
if (hasTimestamp) {
out.writeLong(timestamp.getTime());
}
}
public String getJobId()
{
return jobId;
}
public void setJobId(String jobId)
{
this.jobId = jobId;
}
public String getMessage()
{
return message;
}
public void setMessage(String message)
{
this.message = message;
}
public Level getLevel()
{
return level;
}
public void setLevel(Level level)
{
this.level = level;
}
public Date getTimestamp()
{
return timestamp;
}
public void setTimestamp(Date timestamp)
{
this.timestamp = timestamp;
}
public static AuditMessage newInfo(String jobId, String message)
{
return new AuditMessage(jobId, message, Level.INFO);
}
public static AuditMessage newWarning(String jobId, String message)
{
return new AuditMessage(jobId, message, Level.WARNING);
}
public static AuditMessage newActivity(String jobId, String message)
{
return new AuditMessage(jobId, message, Level.ACTIVITY);
}
public static AuditMessage newError(String jobId, String message)
{
return new AuditMessage(jobId, message, Level.ERROR);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (jobId != null) {
builder.field(Job.ID.getPreferredName(), jobId);
}
if (message != null) {
builder.field(MESSAGE.getPreferredName(), message);
}
if (level != null) {
builder.field(LEVEL.getPreferredName(), level);
}
if (timestamp != null) {
builder.field(TIMESTAMP.getPreferredName(), timestamp.getTime());
}
builder.endObject();
return builder;
}
@Override
public int hashCode() {
return Objects.hash(jobId, message, level, timestamp);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
AuditMessage other = (AuditMessage) obj;
return Objects.equals(jobId, other.jobId) &&
Objects.equals(message, other.message) &&
Objects.equals(level, other.level) &&
Objects.equals(timestamp, other.timestamp);
}
}

View File

@ -0,0 +1,15 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.audit;
public interface Auditor
{
void info(String message);
void warning(String message);
void error(String message);
void activity(String message);
void activity(int totalJobs, int totalDetectors, int runningJobs, int runningDetectors);
}

View File

@ -0,0 +1,51 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.audit;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import java.io.IOException;
import java.util.Locale;
public enum Level implements Writeable {
INFO("info"), ACTIVITY("activity"), WARNING("warning"), ERROR("error");
private String name;
private Level(String name) {
this.name = name;
}
public String getName() {
return name;
}
/**
* Case-insensitive from string method.
*
* @param value
* String representation
* @return The condition type
*/
public static Level forString(String value) {
return Level.valueOf(value.toUpperCase(Locale.ROOT));
}
public static Level readFromStream(StreamInput in) throws IOException {
int ordinal = in.readVInt();
if (ordinal < 0 || ordinal >= values().length) {
throw new IOException("Unknown Level ordinal [" + ordinal + "]");
}
return values()[ordinal];
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(ordinal());
}
}

View File

@ -0,0 +1,132 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.condition;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.xpack.prelert.job.messages.Messages;
import java.io.IOException;
import java.util.Objects;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
/**
* A class that describes a condition.
* The {@linkplain Operator} enum defines the available
* comparisons a condition can use.
*/
public class Condition extends ToXContentToBytes implements Writeable {
public static final ParseField CONDITION_FIELD = new ParseField("condition");
public static final ParseField FILTER_VALUE_FIELD = new ParseField("value");
public static final ConstructingObjectParser<Condition, ParseFieldMatcherSupplier> PARSER = new ConstructingObjectParser<>(
CONDITION_FIELD.getPreferredName(), a -> new Condition((Operator) a[0], (String) a[1]));
static {
PARSER.declareField(ConstructingObjectParser.constructorArg(), p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return Operator.fromString(p.text());
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, Operator.OPERATOR_FIELD, ValueType.STRING);
PARSER.declareField(ConstructingObjectParser.constructorArg(), p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return p.text();
}
if (p.currentToken() == XContentParser.Token.VALUE_NULL) {
return null;
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, FILTER_VALUE_FIELD, ValueType.STRING_OR_NULL);
}
private final Operator op;
private final String filterValue;
public Condition(StreamInput in) throws IOException {
op = Operator.readFromStream(in);
filterValue = in.readOptionalString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
op.writeTo(out);
out.writeOptionalString(filterValue);
}
public Condition(Operator op, String filterValue) {
if (filterValue == null) {
throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_CONDITION_INVALID_VALUE_NULL));
}
if (op.expectsANumericArgument()) {
try {
Double.parseDouble(filterValue);
} catch (NumberFormatException nfe) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_CONDITION_INVALID_VALUE_NUMBER, filterValue);
throw new IllegalArgumentException(msg);
}
} else {
try {
Pattern.compile(filterValue);
} catch (PatternSyntaxException e) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_CONDITION_INVALID_VALUE_REGEX, filterValue);
throw new IllegalArgumentException(msg);
}
}
this.op = op;
this.filterValue = filterValue;
}
public Operator getOperator() {
return op;
}
public String getValue() {
return filterValue;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Operator.OPERATOR_FIELD.getPreferredName(), op.getName());
builder.field(FILTER_VALUE_FIELD.getPreferredName(), filterValue);
builder.endObject();
return builder;
}
@Override
public int hashCode() {
return Objects.hash(op, filterValue);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Condition other = (Condition) obj;
return Objects.equals(this.op, other.op) &&
Objects.equals(this.filterValue, other.filterValue);
}
}

View File

@ -0,0 +1,115 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.condition;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.xpack.prelert.job.messages.Messages;
import java.io.IOException;
import java.util.EnumSet;
import java.util.Locale;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Enum representing logical comparisons on doubles
*/
public enum Operator implements Writeable {
EQ("eq") {
@Override
public boolean test(double lhs, double rhs) {
return Double.compare(lhs, rhs) == 0;
}
},
GT("gt") {
@Override
public boolean test(double lhs, double rhs) {
return Double.compare(lhs, rhs) > 0;
}
},
GTE("gte") {
@Override
public boolean test(double lhs, double rhs) {
return Double.compare(lhs, rhs) >= 0;
}
},
LT("lt") {
@Override
public boolean test(double lhs, double rhs) {
return Double.compare(lhs, rhs) < 0;
}
},
LTE("lte") {
@Override
public boolean test(double lhs, double rhs) {
return Double.compare(lhs, rhs) <= 0;
}
},
MATCH("match") {
@Override
public boolean match(Pattern pattern, String field) {
Matcher match = pattern.matcher(field);
return match.matches();
}
@Override
public boolean expectsANumericArgument() {
return false;
}
};
public static final ParseField OPERATOR_FIELD = new ParseField("operator");
private final String name;
private Operator(String name) {
this.name = name;
}
public String getName() {
return name;
}
public boolean test(double lhs, double rhs) {
return false;
}
public boolean match(Pattern pattern, String field) {
return false;
}
public boolean expectsANumericArgument() {
return true;
}
public static Operator fromString(String name) {
Set<Operator> all = EnumSet.allOf(Operator.class);
String ucName = name.toUpperCase(Locale.ROOT);
for (Operator type : all) {
if (type.toString().equals(ucName)) {
return type;
}
}
throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_CONDITION_UNKNOWN_OPERATOR, name));
}
public static Operator readFromStream(StreamInput in) throws IOException {
int ordinal = in.readVInt();
if (ordinal < 0 || ordinal >= values().length) {
throw new IOException("Unknown Operator ordinal [" + ordinal + "]");
}
return values()[ordinal];
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(ordinal());
}
}

View File

@ -0,0 +1,83 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.config;
import org.elasticsearch.common.Strings;
import org.elasticsearch.xpack.prelert.job.Detector;
import org.elasticsearch.xpack.prelert.utils.PrelertStrings;
public final class DefaultDetectorDescription {
private static final String BY_TOKEN = " by ";
private static final String OVER_TOKEN = " over ";
private static final String USE_NULL_OPTION = " usenull=";
private static final String PARTITION_FIELD_OPTION = " partitionfield=";
private static final String EXCLUDE_FREQUENT_OPTION = " excludefrequent=";
private DefaultDetectorDescription() {
// do nothing
}
/**
* Returns the default description for the given {@code detector}
*
* @param detector the {@code Detector} for which a default description is requested
* @return the default description
*/
public static String of(Detector detector) {
StringBuilder sb = new StringBuilder();
appendOn(detector, sb);
return sb.toString();
}
/**
* Appends to the given {@code StringBuilder} the default description
* for the given {@code detector}
*
* @param detector the {@code Detector} for which a default description is requested
* @param sb the {@code StringBuilder} to append to
*/
public static void appendOn(Detector detector, StringBuilder sb) {
if (isNotNullOrEmpty(detector.getFunction())) {
sb.append(detector.getFunction());
if (isNotNullOrEmpty(detector.getFieldName())) {
sb.append('(').append(quoteField(detector.getFieldName()))
.append(')');
}
} else if (isNotNullOrEmpty(detector.getFieldName())) {
sb.append(quoteField(detector.getFieldName()));
}
if (isNotNullOrEmpty(detector.getByFieldName())) {
sb.append(BY_TOKEN).append(quoteField(detector.getByFieldName()));
}
if (isNotNullOrEmpty(detector.getOverFieldName())) {
sb.append(OVER_TOKEN).append(quoteField(detector.getOverFieldName()));
}
if (detector.isUseNull()) {
sb.append(USE_NULL_OPTION).append(detector.isUseNull());
}
if (isNotNullOrEmpty(detector.getPartitionFieldName())) {
sb.append(PARTITION_FIELD_OPTION).append(quoteField(detector.getPartitionFieldName()));
}
if (detector.getExcludeFrequent() != null) {
sb.append(EXCLUDE_FREQUENT_OPTION).append(detector.getExcludeFrequent().getToken());
}
}
private static String quoteField(String field) {
return PrelertStrings.doubleQuoteIfNotAlphaNumeric(field);
}
private static boolean isNotNullOrEmpty(String arg) {
return !Strings.isNullOrEmpty(arg);
}
}

View File

@ -0,0 +1,62 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.config;
import java.time.Duration;
/**
* Factory methods for a sensible default for the scheduler frequency
*/
public final class DefaultFrequency
{
private static final int SECONDS_IN_MINUTE = 60;
private static final int TWO_MINS_SECONDS = 2 * SECONDS_IN_MINUTE;
private static final int TWENTY_MINS_SECONDS = 20 * SECONDS_IN_MINUTE;
private static final int HALF_DAY_SECONDS = 12 * 60 * SECONDS_IN_MINUTE;
private static final Duration TEN_MINUTES = Duration.ofMinutes(10);
private static final Duration ONE_HOUR = Duration.ofHours(1);
private DefaultFrequency()
{
// Do nothing
}
/**
* Creates a sensible default frequency for a given bucket span.
*
* The default depends on the bucket span:
* <ul>
* <li> &lt;= 2 mins -&gt; 1 min</li>
* <li> &lt;= 20 mins -&gt; bucket span / 2</li>
* <li> &lt;= 12 hours -&gt; 10 mins</li>
* <li> &gt; 12 hours -&gt; 1 hour</li>
* </ul>
*
* @param bucketSpanSeconds the bucket span in seconds
* @return the default frequency
*/
public static Duration ofBucketSpan(long bucketSpanSeconds)
{
if (bucketSpanSeconds <= 0)
{
throw new IllegalArgumentException("Bucket span has to be > 0");
}
if (bucketSpanSeconds <= TWO_MINS_SECONDS)
{
return Duration.ofSeconds(SECONDS_IN_MINUTE);
}
if (bucketSpanSeconds <= TWENTY_MINS_SECONDS)
{
return Duration.ofSeconds(bucketSpanSeconds / 2);
}
if (bucketSpanSeconds <= HALF_DAY_SECONDS)
{
return TEN_MINUTES;
}
return ONE_HOUR;
}
}

View File

@ -0,0 +1,53 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.data;
import org.elasticsearch.xpack.prelert.job.DataCounts;
import org.elasticsearch.xpack.prelert.job.process.autodetect.params.DataLoadParams;
import org.elasticsearch.xpack.prelert.job.process.autodetect.params.InterimResultsParams;
import java.io.InputStream;
public interface DataProcessor {
/**
* Passes data to the native process.
* This is a blocking call that won't return until all the data has been
* written to the process.
*
* An ElasticsearchStatusException will be thrown is any of these error conditions occur:
* <ol>
* <li>If a configured field is missing from the CSV header</li>
* <li>If JSON data is malformed and we cannot recover parsing</li>
* <li>If a high proportion of the records the timestamp field that cannot be parsed</li>
* <li>If a high proportion of the records chronologically out of order</li>
* </ol>
*
* @param jobId the jobId
* @param input Data input stream
* @param params Data processing parameters
* @return Count of records, fields, bytes, etc written
*/
DataCounts processData(String jobId, InputStream input, DataLoadParams params);
/**
* Flush the running job, ensuring that the native process has had the
* opportunity to process all data previously sent to it with none left
* sitting in buffers.
*
* @param jobId The job to flush
* @param interimResultsParams Parameters about whether interim results calculation
* should occur and for which period of time
*/
void flushJob(String jobId, InterimResultsParams interimResultsParams);
/**
* Stop the running job and mark it as finished.<br>
*
* @param jobId The job to stop
*/
void closeJob(String jobId);
}

View File

@ -0,0 +1,66 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.data;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.xpack.prelert.job.DataCounts;
import org.elasticsearch.xpack.prelert.job.messages.Messages;
import org.elasticsearch.xpack.prelert.job.process.autodetect.params.DataLoadParams;
import java.io.IOException;
import java.io.InputStream;
import java.util.Objects;
import java.util.zip.GZIPInputStream;
import java.util.zip.ZipException;
public class DataStreamer {
private static final Logger LOGGER = Loggers.getLogger(DataStreamer.class);
private final DataProcessor dataProccesor;
public DataStreamer(DataProcessor dataProcessor) {
dataProccesor = Objects.requireNonNull(dataProcessor);
}
/**
* Stream the data to the native process.
*
* @return Count of records, fields, bytes, etc written
*/
public DataCounts streamData(String contentEncoding, String jobId, InputStream input, DataLoadParams params) throws IOException {
LOGGER.trace("Handle Post data to job {} ", jobId);
input = tryDecompressingInputStream(contentEncoding, jobId, input);
DataCounts stats = handleStream(jobId, input, params);
LOGGER.debug("Data uploaded to job {}", jobId);
return stats;
}
private InputStream tryDecompressingInputStream(String contentEncoding, String jobId, InputStream input) throws IOException {
if ("gzip".equals(contentEncoding)) {
LOGGER.debug("Decompressing post data in job {}", jobId);
try {
return new GZIPInputStream(input);
} catch (ZipException ze) {
LOGGER.error("Failed to decompress data file", ze);
throw new IllegalArgumentException(Messages.getMessage(Messages.REST_GZIP_ERROR), ze);
}
}
return input;
}
/**
* Pass the data stream to the native process.
*
* @return Count of records, fields, bytes, etc written
*/
private DataCounts handleStream(String jobId, InputStream input, DataLoadParams params) {
return dataProccesor.processData(jobId, input, params);
}
}

View File

@ -0,0 +1,87 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.data;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.xpack.prelert.job.DataCounts;
import org.elasticsearch.xpack.prelert.job.process.autodetect.params.DataLoadParams;
import java.io.IOException;
import java.io.InputStream;
import java.util.Optional;
// NORELEASE - Use ES ThreadPool
public final class DataStreamerThread extends Thread {
private static final Logger LOGGER = Loggers.getLogger(DataStreamerThread.class);
private DataCounts stats;
private final String jobId;
private final String contentEncoding;
private final DataLoadParams params;
private final InputStream input;
private final DataStreamer dataStreamer;
private ElasticsearchException jobException;
private IOException iOException;
public DataStreamerThread(DataStreamer dataStreamer, String jobId, String contentEncoding,
DataLoadParams params, InputStream input) {
super("DataStreamer-" + jobId);
this.dataStreamer = dataStreamer;
this.jobId = jobId;
this.contentEncoding = contentEncoding;
this.params = params;
this.input = input;
}
@Override
public void run() {
try {
stats = dataStreamer.streamData(contentEncoding, jobId, input, params);
} catch (ElasticsearchException e) {
jobException = e;
} catch (IOException e) {
iOException = e;
} finally {
try {
input.close();
} catch (IOException e) {
LOGGER.warn("Exception closing the data input stream", e);
}
}
}
/**
* This method should only be called <b>after</b> the thread
* has joined other wise the result could be <code>null</code>
* (best case) or undefined.
*/
public DataCounts getDataCounts() {
return stats;
}
/**
* If a Job exception was thrown during the run of this thread it
* is accessed here. Only call this method after the thread has joined.
*/
public Optional<ElasticsearchException> getJobException() {
return Optional.ofNullable(jobException);
}
/**
* If an IOException was thrown during the run of this thread it
* is accessed here. Only call this method after the thread has joined.
*/
public Optional<IOException> getIOException() {
return Optional.ofNullable(iOException);
}
public String getJobId() {
return jobId;
}
}

View File

@ -0,0 +1,52 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.detectionrules;
import java.io.IOException;
import java.util.Locale;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
public enum Connective implements Writeable {
OR("or"),
AND("and");
private String name;
private Connective(String name) {
this.name = name;
}
public String getName() {
return name;
}
/**
* Case-insensitive from string method.
*
* @param value
* String representation
* @return The connective type
*/
public static Connective fromString(String value) {
return Connective.valueOf(value.toUpperCase(Locale.ROOT));
}
public static Connective readFromStream(StreamInput in) throws IOException {
int ordinal = in.readVInt();
if (ordinal < 0 || ordinal >= values().length) {
throw new IOException("Unknown Connective ordinal [" + ordinal + "]");
}
return values()[ordinal];
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(ordinal());
}
}

View File

@ -0,0 +1,169 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.detectionrules;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.xpack.prelert.job.messages.Messages;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
public class DetectionRule extends ToXContentToBytes implements Writeable {
public static final ParseField DETECTION_RULE_FIELD = new ParseField("detection_rule");
public static final ParseField RULE_ACTION_FIELD = new ParseField("rule_action");
public static final ParseField TARGET_FIELD_NAME_FIELD = new ParseField("target_field_name");
public static final ParseField TARGET_FIELD_VALUE_FIELD = new ParseField("target_field_value");
public static final ParseField CONDITIONS_CONNECTIVE_FIELD = new ParseField("conditions_connective");
public static final ParseField RULE_CONDITIONS_FIELD = new ParseField("rule_conditions");
public static final ConstructingObjectParser<DetectionRule, ParseFieldMatcherSupplier> PARSER = new ConstructingObjectParser<>(
DETECTION_RULE_FIELD.getPreferredName(),
arr -> {
@SuppressWarnings("unchecked")
List<RuleCondition> rules = (List<RuleCondition>) arr[3];
return new DetectionRule((String) arr[0], (String) arr[1], (Connective) arr[2], rules);
}
);
static {
PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), TARGET_FIELD_NAME_FIELD);
PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), TARGET_FIELD_VALUE_FIELD);
PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return Connective.fromString(p.text());
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, CONDITIONS_CONNECTIVE_FIELD, ValueType.STRING);
PARSER.declareObjectArray(ConstructingObjectParser.optionalConstructorArg(),
(parser, parseFieldMatcher) -> RuleCondition.PARSER.apply(parser, parseFieldMatcher), RULE_CONDITIONS_FIELD);
}
private final RuleAction ruleAction = RuleAction.FILTER_RESULTS;
private final String targetFieldName;
private final String targetFieldValue;
private final Connective conditionsConnective;
private final List<RuleCondition> ruleConditions;
public DetectionRule(StreamInput in) throws IOException {
conditionsConnective = Connective.readFromStream(in);
int size = in.readVInt();
ruleConditions = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
ruleConditions.add(new RuleCondition(in));
}
targetFieldName = in.readOptionalString();
targetFieldValue = in.readOptionalString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
conditionsConnective.writeTo(out);
out.writeVInt(ruleConditions.size());
for (RuleCondition condition : ruleConditions) {
condition.writeTo(out);
}
out.writeOptionalString(targetFieldName);
out.writeOptionalString(targetFieldValue);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(CONDITIONS_CONNECTIVE_FIELD.getPreferredName(), conditionsConnective.getName());
builder.field(RULE_CONDITIONS_FIELD.getPreferredName(), ruleConditions);
if (targetFieldName != null) {
builder.field(TARGET_FIELD_NAME_FIELD.getPreferredName(), targetFieldName);
}
if (targetFieldValue != null) {
builder.field(TARGET_FIELD_VALUE_FIELD.getPreferredName(), targetFieldValue);
}
builder.endObject();
return builder;
}
public DetectionRule(String targetFieldName, String targetFieldValue, Connective conditionsConnective,
List<RuleCondition> ruleConditions) {
if (targetFieldValue != null && targetFieldName == null) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_MISSING_TARGET_FIELD_NAME, targetFieldValue);
throw new IllegalArgumentException(msg);
}
if (ruleConditions == null || ruleConditions.isEmpty()) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_REQUIRES_AT_LEAST_ONE_CONDITION);
throw new IllegalArgumentException(msg);
}
for (RuleCondition condition : ruleConditions) {
if (condition.getConditionType() == RuleConditionType.CATEGORICAL && targetFieldName != null) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_CONDITION_CATEGORICAL_INVALID_OPTION,
DetectionRule.TARGET_FIELD_NAME_FIELD.getPreferredName());
throw new IllegalArgumentException(msg);
}
}
this.targetFieldName = targetFieldName;
this.targetFieldValue = targetFieldValue;
this.conditionsConnective = conditionsConnective != null ? conditionsConnective : Connective.OR;
this.ruleConditions = Collections.unmodifiableList(ruleConditions);
}
public RuleAction getRuleAction() {
return ruleAction;
}
public String getTargetFieldName() {
return targetFieldName;
}
public String getTargetFieldValue() {
return targetFieldValue;
}
public Connective getConditionsConnective() {
return conditionsConnective;
}
public List<RuleCondition> getRuleConditions() {
return ruleConditions;
}
public Set<String> extractReferencedLists() {
return ruleConditions.stream().map(RuleCondition::getValueList).filter(Objects::nonNull).collect(Collectors.toSet());
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof DetectionRule == false) {
return false;
}
DetectionRule other = (DetectionRule) obj;
return Objects.equals(ruleAction, other.ruleAction) && Objects.equals(targetFieldName, other.targetFieldName)
&& Objects.equals(targetFieldValue, other.targetFieldValue)
&& Objects.equals(conditionsConnective, other.conditionsConnective) && Objects.equals(ruleConditions, other.ruleConditions);
}
@Override
public int hashCode() {
return Objects.hash(ruleAction, targetFieldName, targetFieldValue, conditionsConnective, ruleConditions);
}
}

View File

@ -0,0 +1,24 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.detectionrules;
import java.util.Locale;
public enum RuleAction
{
FILTER_RESULTS;
/**
* Case-insensitive from string method.
*
* @param value String representation
* @return The rule action
*/
public static RuleAction forString(String value)
{
return RuleAction.valueOf(value.toUpperCase(Locale.ROOT));
}
}

View File

@ -0,0 +1,248 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.detectionrules;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.xpack.prelert.job.condition.Condition;
import org.elasticsearch.xpack.prelert.job.condition.Operator;
import org.elasticsearch.xpack.prelert.job.messages.Messages;
import java.io.IOException;
import java.util.EnumSet;
import java.util.Objects;
public class RuleCondition extends ToXContentToBytes implements Writeable {
public static final ParseField CONDITION_TYPE_FIELD = new ParseField("conditionType");
public static final ParseField RULE_CONDITION_FIELD = new ParseField("ruleCondition");
public static final ParseField FIELD_NAME_FIELD = new ParseField("fieldName");
public static final ParseField FIELD_VALUE_FIELD = new ParseField("fieldValue");
public static final ParseField VALUE_LIST_FIELD = new ParseField("valueList");
public static final ConstructingObjectParser<RuleCondition, ParseFieldMatcherSupplier> PARSER =
new ConstructingObjectParser<>(RULE_CONDITION_FIELD.getPreferredName(),
a -> new RuleCondition((RuleConditionType) a[0], (String) a[1], (String) a[2], (Condition) a[3], (String) a[4]));
static {
PARSER.declareField(ConstructingObjectParser.constructorArg(), p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return RuleConditionType.forString(p.text());
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, CONDITION_TYPE_FIELD, ValueType.STRING);
PARSER.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), FIELD_NAME_FIELD);
PARSER.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), FIELD_VALUE_FIELD);
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), Condition.PARSER, Condition.CONDITION_FIELD);
PARSER.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), VALUE_LIST_FIELD);
}
private final RuleConditionType conditionType;
private final String fieldName;
private final String fieldValue;
private final Condition condition;
private final String valueList;
public RuleCondition(StreamInput in) throws IOException {
conditionType = RuleConditionType.readFromStream(in);
condition = in.readOptionalWriteable(Condition::new);
fieldName = in.readOptionalString();
fieldValue = in.readOptionalString();
valueList = in.readOptionalString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
conditionType.writeTo(out);
out.writeOptionalWriteable(condition);
out.writeOptionalString(fieldName);
out.writeOptionalString(fieldValue);
out.writeOptionalString(valueList);
}
public RuleCondition(RuleConditionType conditionType, String fieldName, String fieldValue, Condition condition, String valueList) {
this.conditionType = conditionType;
this.fieldName = fieldName;
this.fieldValue = fieldValue;
this.condition = condition;
this.valueList = valueList;
verifyFieldsBoundToType(this);
verifyFieldValueRequiresFieldName(this);
}
public RuleCondition(RuleCondition ruleCondition) {
this.conditionType = ruleCondition.conditionType;
this.fieldName = ruleCondition.fieldName;
this.fieldValue = ruleCondition.fieldValue;
this.condition = ruleCondition.condition;
this.valueList = ruleCondition.valueList;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(CONDITION_TYPE_FIELD.getPreferredName(), conditionType);
if (condition != null) {
builder.field(Condition.CONDITION_FIELD.getPreferredName(), condition);
}
if (fieldName != null) {
builder.field(FIELD_NAME_FIELD.getPreferredName(), fieldName);
}
if (fieldValue != null) {
builder.field(FIELD_VALUE_FIELD.getPreferredName(), fieldValue);
}
if (valueList != null) {
builder.field(VALUE_LIST_FIELD.getPreferredName(), valueList);
}
builder.endObject();
return builder;
}
public RuleConditionType getConditionType() {
return conditionType;
}
/**
* The field name for which the rule applies. Can be null, meaning rule
* applies to all results.
*/
public String getFieldName() {
return fieldName;
}
/**
* The value of the field name for which the rule applies. When set, the
* rule applies only to the results that have the fieldName/fieldValue pair.
* When null, the rule applies to all values for of the specified field
* name. Only applicable when fieldName is not null.
*/
public String getFieldValue() {
return fieldValue;
}
public Condition getCondition() {
return condition;
}
/**
* The unique identifier of a list. Required when the rule type is
* categorical. Should be null for all other types.
*/
public String getValueList() {
return valueList;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof RuleCondition == false) {
return false;
}
RuleCondition other = (RuleCondition) obj;
return Objects.equals(conditionType, other.conditionType) && Objects.equals(fieldName, other.fieldName)
&& Objects.equals(fieldValue, other.fieldValue) && Objects.equals(condition, other.condition)
&& Objects.equals(valueList, other.valueList);
}
@Override
public int hashCode() {
return Objects.hash(conditionType, fieldName, fieldValue, condition, valueList);
}
public static RuleCondition createCategorical(String fieldName, String valueList) {
return new RuleCondition(RuleConditionType.CATEGORICAL, fieldName, null, null, valueList);
}
private static void verifyFieldsBoundToType(RuleCondition ruleCondition) throws ElasticsearchParseException {
switch (ruleCondition.getConditionType()) {
case CATEGORICAL:
verifyCategorical(ruleCondition);
break;
case NUMERICAL_ACTUAL:
case NUMERICAL_TYPICAL:
case NUMERICAL_DIFF_ABS:
verifyNumerical(ruleCondition);
break;
default:
throw new IllegalStateException();
}
}
private static void verifyCategorical(RuleCondition ruleCondition) throws ElasticsearchParseException {
checkCategoricalHasNoField(Condition.CONDITION_FIELD.getPreferredName(), ruleCondition.getCondition());
checkCategoricalHasNoField(RuleCondition.FIELD_VALUE_FIELD.getPreferredName(), ruleCondition.getFieldValue());
checkCategoricalHasField(RuleCondition.VALUE_LIST_FIELD.getPreferredName(), ruleCondition.getValueList());
}
private static void checkCategoricalHasNoField(String fieldName, Object fieldValue) throws ElasticsearchParseException {
if (fieldValue != null) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_CONDITION_CATEGORICAL_INVALID_OPTION, fieldName);
throw new IllegalArgumentException(msg);
}
}
private static void checkCategoricalHasField(String fieldName, Object fieldValue) throws ElasticsearchParseException {
if (fieldValue == null) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_CONDITION_CATEGORICAL_MISSING_OPTION, fieldName);
throw new IllegalArgumentException(msg);
}
}
private static void verifyNumerical(RuleCondition ruleCondition) throws ElasticsearchParseException {
checkNumericalHasNoField(RuleCondition.VALUE_LIST_FIELD.getPreferredName(), ruleCondition.getValueList());
checkNumericalHasField(Condition.CONDITION_FIELD.getPreferredName(), ruleCondition.getCondition());
if (ruleCondition.getFieldName() != null && ruleCondition.getFieldValue() == null) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_CONDITION_NUMERICAL_WITH_FIELD_NAME_REQUIRES_FIELD_VALUE);
throw new IllegalArgumentException(msg);
}
checkNumericalConditionOparatorsAreValid(ruleCondition);
}
private static void checkNumericalHasNoField(String fieldName, Object fieldValue) throws ElasticsearchParseException {
if (fieldValue != null) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_CONDITION_NUMERICAL_INVALID_OPTION, fieldName);
throw new IllegalArgumentException(msg);
}
}
private static void checkNumericalHasField(String fieldName, Object fieldValue) throws ElasticsearchParseException {
if (fieldValue == null) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_CONDITION_NUMERICAL_MISSING_OPTION, fieldName);
throw new IllegalArgumentException(msg);
}
}
private static void verifyFieldValueRequiresFieldName(RuleCondition ruleCondition) throws ElasticsearchParseException {
if (ruleCondition.getFieldValue() != null && ruleCondition.getFieldName() == null) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_CONDITION_MISSING_FIELD_NAME,
ruleCondition.getFieldValue());
throw new IllegalArgumentException(msg);
}
}
static EnumSet<Operator> VALID_CONDITION_OPERATORS = EnumSet.of(Operator.LT, Operator.LTE, Operator.GT, Operator.GTE);
private static void checkNumericalConditionOparatorsAreValid(RuleCondition ruleCondition) throws ElasticsearchParseException {
Operator operator = ruleCondition.getCondition().getOperator();
if (!VALID_CONDITION_OPERATORS.contains(operator)) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_CONDITION_NUMERICAL_INVALID_OPERATOR, operator);
throw new IllegalArgumentException(msg);
}
}
}

View File

@ -0,0 +1,55 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.detectionrules;
import java.io.IOException;
import java.util.Locale;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
public enum RuleConditionType implements Writeable {
CATEGORICAL("categorical"),
NUMERICAL_ACTUAL("numerical_actual"),
NUMERICAL_TYPICAL("numerical_typical"),
NUMERICAL_DIFF_ABS("numerical_diff_abs");
private String name;
private RuleConditionType(String name) {
this.name = name;
}
public String getName() {
return name;
}
/**
* Case-insensitive from string method.
*
* @param value
* String representation
* @return The condition type
*/
public static RuleConditionType forString(String value) {
return RuleConditionType.valueOf(value.toUpperCase(Locale.ROOT));
}
public static RuleConditionType readFromStream(StreamInput in) throws IOException {
int ordinal = in.readVInt();
if (ordinal < 0 || ordinal >= values().length) {
throw new IOException("Unknown RuleConditionType ordinal [" + ordinal + "]");
}
return values()[ordinal];
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(ordinal());
}
}

View File

@ -0,0 +1,47 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.extraction;
import org.apache.logging.log4j.Logger;
import java.io.IOException;
import java.io.InputStream;
import java.util.Optional;
public interface DataExtractor
{
/**
* Set-up the extractor for a new search
*
* @param start start time
* @param end end time
* @param logger logger
*/
void newSearch(long start, long end, Logger logger) throws IOException;
/**
* Cleans up after a search.
*/
void clear();
/**
* @return {@code true} if the search has not finished yet, or {@code false} otherwise
*/
boolean hasNext();
/**
* Returns the next available extracted data. Note that it is possible for the
* extracted data to be empty the last time this method can be called.
* @return an optional input stream with the next available extracted data
* @throws IOException if an error occurs while extracting the data
*/
Optional<InputStream> next() throws IOException;
/**
* Cancel the current search.
*/
void cancel();
}

View File

@ -0,0 +1,13 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.extraction;
import org.elasticsearch.xpack.prelert.job.Job;
public interface DataExtractorFactory
{
DataExtractor newExtractor(Job job);
}

View File

@ -0,0 +1,232 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.logging;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import java.io.IOException;
import java.util.Date;
import java.util.Objects;
/**
* Provide access to the C++ log messages that arrive via a named pipe in JSON format.
*/
public class CppLogMessage extends ToXContentToBytes implements Writeable {
/**
* Field Names (these are defined by log4cxx; we have no control over them)
*/
public static final ParseField LOGGER_FIELD = new ParseField("logger");
public static final ParseField TIMESTAMP_FIELD = new ParseField("timestamp");
public static final ParseField LEVEL_FIELD = new ParseField("level");
public static final ParseField PID_FIELD = new ParseField("pid");
public static final ParseField THREAD_FIELD = new ParseField("thread");
public static final ParseField MESSAGE_FIELD = new ParseField("message");
public static final ParseField CLASS_FIELD = new ParseField("class");
public static final ParseField METHOD_FIELD = new ParseField("method");
public static final ParseField FILE_FIELD = new ParseField("file");
public static final ParseField LINE_FIELD = new ParseField("line");
public static final ObjectParser<CppLogMessage, ParseFieldMatcherSupplier> PARSER = new ObjectParser<>(
LOGGER_FIELD.getPreferredName(), CppLogMessage::new);
static {
PARSER.declareString(CppLogMessage::setLogger, LOGGER_FIELD);
PARSER.declareField(CppLogMessage::setTimestamp, p -> new Date(p.longValue()), TIMESTAMP_FIELD, ValueType.LONG);
PARSER.declareString(CppLogMessage::setLevel, LEVEL_FIELD);
PARSER.declareLong(CppLogMessage::setPid, PID_FIELD);
PARSER.declareString(CppLogMessage::setThread, THREAD_FIELD);
PARSER.declareString(CppLogMessage::setMessage, MESSAGE_FIELD);
PARSER.declareString(CppLogMessage::setClazz, CLASS_FIELD);
PARSER.declareString(CppLogMessage::setMethod, METHOD_FIELD);
PARSER.declareString(CppLogMessage::setFile, FILE_FIELD);
PARSER.declareLong(CppLogMessage::setLine, LINE_FIELD);
}
/**
* Elasticsearch type
*/
public static final ParseField TYPE = new ParseField("cppLogMessage");
private String logger = "";
private Date timestamp;
private String level = "";
private long pid = 0;
private String thread = "";
private String message = "";
private String clazz = "";
private String method = "";
private String file = "";
private long line = 0;
public CppLogMessage() {
timestamp = new Date();
}
public CppLogMessage(StreamInput in) throws IOException {
logger = in.readString();
timestamp = new Date(in.readVLong());
level = in.readString();
pid = in.readVLong();
thread = in.readString();
message = in.readString();
clazz = in.readString();
method = in.readString();
file = in.readString();
line = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(logger);
out.writeVLong(timestamp.getTime());
out.writeString(level);
out.writeVLong(pid);
out.writeString(thread);
out.writeString(message);
out.writeString(clazz);
out.writeString(method);
out.writeString(file);
out.writeVLong(line);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(LOGGER_FIELD.getPreferredName(), logger);
builder.field(TIMESTAMP_FIELD.getPreferredName(), timestamp.getTime());
builder.field(LEVEL_FIELD.getPreferredName(), level);
builder.field(PID_FIELD.getPreferredName(), pid);
builder.field(THREAD_FIELD.getPreferredName(), thread);
builder.field(MESSAGE_FIELD.getPreferredName(), message);
builder.field(CLASS_FIELD.getPreferredName(), clazz);
builder.field(METHOD_FIELD.getPreferredName(), method);
builder.field(FILE_FIELD.getPreferredName(), file);
builder.field(LINE_FIELD.getPreferredName(), line);
builder.endObject();
return builder;
}
public String getLogger() {
return logger;
}
public void setLogger(String logger) {
this.logger = logger;
}
public Date getTimestamp() {
return this.timestamp;
}
public void setTimestamp(Date d) {
this.timestamp = d;
}
public String getLevel() {
return level;
}
public void setLevel(String level) {
this.level = level;
}
public long getPid() {
return pid;
}
public void setPid(long pid) {
this.pid = pid;
}
public String getThread() {
return thread;
}
public void setThread(String thread) {
this.thread = thread;
}
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
/**
* This is unreliable for some C++ compilers - probably best not to display it prominently
*/
public String getClazz() {
return clazz;
}
public void setClazz(String clazz) {
this.clazz = clazz;
}
/**
* This is unreliable for some C++ compilers - probably best not to display it prominently
*/
public String getMethod() {
return method;
}
public void setMethod(String method) {
this.method = method;
}
public String getFile() {
return file;
}
public void setFile(String file) {
this.file = file;
}
public long getLine() {
return line;
}
public void setLine(long line) {
this.line = line;
}
@Override
public int hashCode() {
return Objects.hash(logger, timestamp, level, pid, thread, message, clazz, method, file, line);
}
/**
* Compare all the fields.
*/
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (!(other instanceof CppLogMessage)) {
return false;
}
CppLogMessage that = (CppLogMessage)other;
return Objects.equals(this.logger, that.logger) && Objects.equals(this.timestamp, that.timestamp)
&& Objects.equals(this.level, that.level) && this.pid == that.pid
&& Objects.equals(this.thread, that.thread) && Objects.equals(this.message, that.message)
&& Objects.equals(this.clazz, that.clazz) && Objects.equals(this.method, that.method)
&& Objects.equals(this.file, that.file) && this.line == that.line;
}
}

View File

@ -0,0 +1,172 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.logging;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.bytes.CompositeBytesReference;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.util.Deque;
import java.util.Objects;
/**
* Handle a stream of C++ log messages that arrive via a named pipe in JSON format.
* Retains the last few error messages so that they can be passed back in a REST response
* if the C++ process dies.
*/
public class CppLogMessageHandler implements Closeable {
private static final int DEFAULT_READBUF_SIZE = 1024;
private static final int DEFAULT_ERROR_STORE_SIZE = 5;
private final Logger logger;
private final InputStream inputStream;
private final int readBufSize;
private final int errorStoreSize;
private final Deque<String> errorStore;
private volatile boolean hasLogStreamEnded;
private volatile boolean seenFatalError;
/**
* @param jobId May be null or empty if the logs are from a process not associated with a job.
* @param inputStream May not be null.
*/
public CppLogMessageHandler(String jobId, InputStream inputStream) {
this(inputStream, Strings.isNullOrEmpty(jobId) ? Loggers.getLogger(CppLogMessageHandler.class) : Loggers.getLogger(jobId),
DEFAULT_READBUF_SIZE, DEFAULT_ERROR_STORE_SIZE);
}
/**
* For testing - allows meddling with the logger, read buffer size and error store size.
*/
CppLogMessageHandler(InputStream inputStream, Logger logger, int readBufSize, int errorStoreSize) {
this.logger = Objects.requireNonNull(logger);
this.inputStream = Objects.requireNonNull(inputStream);
this.readBufSize = readBufSize;
this.errorStoreSize = errorStoreSize;
this.errorStore = ConcurrentCollections.newDeque();
hasLogStreamEnded = false;
}
@Override
public void close() throws IOException {
inputStream.close();
}
/**
* Tail the InputStream provided to the constructor, handling each complete log document as it arrives.
* This method will not return until either end-of-file is detected on the InputStream or the
* InputStream throws an exception.
*/
public void tailStream() throws IOException {
try {
XContent xContent = XContentFactory.xContent(XContentType.JSON);
BytesReference bytesRef = null;
byte[] readBuf = new byte[readBufSize];
for (int bytesRead = inputStream.read(readBuf); bytesRead != -1; bytesRead = inputStream.read(readBuf)) {
if (bytesRef == null) {
bytesRef = new BytesArray(readBuf, 0, bytesRead);
} else {
bytesRef = new CompositeBytesReference(bytesRef, new BytesArray(readBuf, 0, bytesRead));
}
bytesRef = parseMessages(xContent, bytesRef);
readBuf = new byte[readBufSize];
}
} finally {
hasLogStreamEnded = true;
}
}
public boolean hasLogStreamEnded() {
return hasLogStreamEnded;
}
public boolean seenFatalError() {
return seenFatalError;
}
/**
* Expected to be called very infrequently.
*/
public String getErrors() {
String[] errorSnapshot = errorStore.toArray(new String[0]);
StringBuilder errors = new StringBuilder();
for (String error : errorSnapshot) {
errors.append(error).append('\n');
}
return errors.toString();
}
private BytesReference parseMessages(XContent xContent, BytesReference bytesRef) {
byte marker = xContent.streamSeparator();
int from = 0;
while (true) {
int nextMarker = findNextMarker(marker, bytesRef, from);
if (nextMarker == -1) {
// No more markers in this block
break;
}
parseMessage(xContent, bytesRef.slice(from, nextMarker - from));
from = nextMarker + 1;
}
return bytesRef.slice(from, bytesRef.length() - from);
}
private void parseMessage(XContent xContent, BytesReference bytesRef) {
try {
XContentParser parser = xContent.createParser(bytesRef);
CppLogMessage msg = CppLogMessage.PARSER.apply(parser, () -> ParseFieldMatcher.STRICT);
Level level = Level.getLevel(msg.getLevel());
if (level == null) {
// This isn't expected to ever happen
level = Level.WARN;
} else if (level.isMoreSpecificThan(Level.ERROR)) {
// Keep the last few error messages to report if the process dies
storeError(msg.getMessage());
if (level.isMoreSpecificThan(Level.FATAL)) {
seenFatalError = true;
}
}
// TODO: Is there a way to preserve the original timestamp when re-logging?
logger.log(level, "{}/{} {}@{} {}", msg.getLogger(), msg.getPid(), msg.getFile(), msg.getLine(), msg.getMessage());
// TODO: Could send the message for indexing instead of or as well as logging it
} catch (IOException e) {
logger.warn("Failed to parse C++ log message: " + bytesRef.utf8ToString(), e);
}
}
private void storeError(String error) {
if (Strings.isNullOrEmpty(error) || errorStoreSize <= 0) {
return;
}
if (errorStore.size() >= errorStoreSize) {
errorStore.removeFirst();
}
errorStore.offerLast(error);
}
private static int findNextMarker(byte marker, BytesReference bytesRef, int from) {
for (int i = from; i < bytesRef.length(); ++i) {
if (bytesRef.get(i) == marker) {
return i;
}
}
return -1;
}
}

View File

@ -0,0 +1,122 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.logs;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.xpack.prelert.PrelertPlugin;
import org.elasticsearch.xpack.prelert.job.messages.Messages;
import java.io.IOException;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Locale;
/**
* Manage job logs
*/
public class JobLogs {
private static final Logger LOGGER = Loggers.getLogger(JobLogs.class);
/**
* If this system property is set the log files aren't deleted when the job
* is.
*/
public static final Setting<Boolean> DONT_DELETE_LOGS_SETTING = Setting.boolSetting("preserve.logs", false, Property.NodeScope);
private boolean m_DontDelete;
public JobLogs(Settings settings)
{
m_DontDelete = DONT_DELETE_LOGS_SETTING.get(settings);
}
/**
* Delete all the log files and log directory associated with a job.
*
* @param jobId
* the jobId
* @return true if success.
*/
public boolean deleteLogs(Environment env, String jobId) {
return deleteLogs(env.logsFile().resolve(PrelertPlugin.NAME), jobId);
}
/**
* Delete all the files in the directory
*
* <pre>
* logDir / jobId
* </pre>
*
* .
*
* @param logDir
* The base directory of the log files
* @param jobId
* the jobId
*/
public boolean deleteLogs(Path logDir, String jobId) {
if (m_DontDelete) {
return true;
}
Path logPath = sanitizePath(logDir.resolve(jobId), logDir);
LOGGER.info(String.format(Locale.ROOT, "Deleting log files %s/%s", logDir, jobId));
try (DirectoryStream<Path> directoryStream = Files.newDirectoryStream(logPath)) {
for (Path logFile : directoryStream) {
try {
Files.delete(logFile);
} catch (IOException e) {
String msg = "Cannot delete log file " + logDir + ". ";
msg += (e.getCause() != null) ? e.getCause().getMessage() : e.getMessage();
LOGGER.warn(msg);
}
}
} catch (IOException e) {
String msg = "Cannot open the log directory " + logDir + ". ";
msg += (e.getCause() != null) ? e.getCause().getMessage() : e.getMessage();
LOGGER.warn(msg);
}
// delete the directory
try {
Files.delete(logPath);
} catch (IOException e) {
String msg = "Cannot delete log directory " + logDir + ". ";
msg += (e.getCause() != null) ? e.getCause().getMessage() : e.getMessage();
LOGGER.warn(msg);
return false;
}
return true;
}
/**
* Normalize a file path resolving .. and . directories and check the
* resulting path is below the rootDir directory.
*
* Throws an exception if the path is outside the logs directory e.g.
* logs/../lic/license resolves to lic/license and would throw
*/
public Path sanitizePath(Path filePath, Path rootDir) {
Path normalizedPath = filePath.normalize();
Path rootPath = rootDir.normalize();
if (normalizedPath.startsWith(rootPath) == false) {
String msg = Messages.getMessage(Messages.LOGFILE_INVALID_PATH, filePath);
LOGGER.warn(msg);
throw new IllegalArgumentException(msg);
}
return normalizedPath;
}
}

View File

@ -0,0 +1,207 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.manager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.prelert.job.DataCounts;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.JobStatus;
import org.elasticsearch.xpack.prelert.job.ModelSizeStats;
import org.elasticsearch.xpack.prelert.job.data.DataProcessor;
import org.elasticsearch.xpack.prelert.job.metadata.Allocation;
import org.elasticsearch.xpack.prelert.job.persistence.ElasticsearchJobDataCountsPersister;
import org.elasticsearch.xpack.prelert.job.persistence.ElasticsearchPersister;
import org.elasticsearch.xpack.prelert.job.persistence.ElasticsearchUsagePersister;
import org.elasticsearch.xpack.prelert.job.persistence.JobDataCountsPersister;
import org.elasticsearch.xpack.prelert.job.persistence.JobResultsPersister;
import org.elasticsearch.xpack.prelert.job.process.autodetect.AutodetectCommunicator;
import org.elasticsearch.xpack.prelert.job.process.autodetect.AutodetectProcess;
import org.elasticsearch.xpack.prelert.job.process.autodetect.AutodetectProcessFactory;
import org.elasticsearch.xpack.prelert.job.process.autodetect.output.parsing.AutoDetectResultProcessor;
import org.elasticsearch.xpack.prelert.job.process.autodetect.output.parsing.AutodetectResultsParser;
import org.elasticsearch.xpack.prelert.job.process.autodetect.params.DataLoadParams;
import org.elasticsearch.xpack.prelert.job.process.autodetect.params.InterimResultsParams;
import org.elasticsearch.xpack.prelert.job.process.normalizer.noop.NoOpRenormaliser;
import org.elasticsearch.xpack.prelert.job.status.StatusReporter;
import org.elasticsearch.xpack.prelert.job.usage.UsageReporter;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import java.io.IOException;
import java.io.InputStream;
import java.time.Duration;
import java.time.ZonedDateTime;
import java.util.Locale;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeoutException;
public class AutodetectProcessManager extends AbstractComponent implements DataProcessor {
private final Client client;
private final Environment env;
private final ThreadPool threadPool;
private final JobManager jobManager;
private final AutodetectResultsParser parser;
private final AutodetectProcessFactory autodetectProcessFactory;
private final ConcurrentMap<String, AutodetectCommunicator> autoDetectCommunicatorByJob;
public AutodetectProcessManager(Settings settings, Client client, Environment env, ThreadPool threadPool, JobManager jobManager,
AutodetectResultsParser parser, AutodetectProcessFactory autodetectProcessFactory) {
super(settings);
this.client = client;
this.env = env;
this.threadPool = threadPool;
this.parser = parser;
this.autodetectProcessFactory = autodetectProcessFactory;
this.jobManager = jobManager;
this.autoDetectCommunicatorByJob = new ConcurrentHashMap<>();
}
@Override
public DataCounts processData(String jobId, InputStream input, DataLoadParams params) {
Allocation allocation = jobManager.getJobAllocation(jobId);
if (allocation.getStatus().isAnyOf(JobStatus.PAUSING, JobStatus.PAUSED)) {
return new DataCounts(jobId);
}
AutodetectCommunicator communicator = autoDetectCommunicatorByJob.get(jobId);
if (communicator == null) {
communicator = create(jobId, params.isIgnoreDowntime());
autoDetectCommunicatorByJob.put(jobId, communicator);
}
try {
if (params.isResettingBuckets()) {
communicator.writeResetBucketsControlMessage(params);
}
return communicator.writeToJob(input);
// TODO check for errors from autodetect
} catch (IOException e) {
String msg = String.format(Locale.ROOT, "Exception writing to process for job %s", jobId);
if (e.getCause() instanceof TimeoutException) {
logger.warn("Connection to process was dropped due to a timeout - if you are feeding this job from a connector it " +
"may be that your connector stalled for too long", e.getCause());
}
throw ExceptionsHelper.serverError(msg);
}
}
// TODO (norelease) : here we must validate whether we have enough threads in TP in order start analytical process
// Otherwise we are not able to communicate via all the named pipes and we can run into deadlock
AutodetectCommunicator create(String jobId, boolean ignoreDowntime) {
Job job = jobManager.getJobOrThrowIfUnknown(jobId);
Logger jobLogger = Loggers.getLogger(job.getJobId());
ElasticsearchUsagePersister usagePersister = new ElasticsearchUsagePersister(client, jobLogger);
UsageReporter usageReporter = new UsageReporter(settings, job.getJobId(), usagePersister, jobLogger);
JobDataCountsPersister jobDataCountsPersister = new ElasticsearchJobDataCountsPersister(client, jobLogger);
StatusReporter statusReporter = new StatusReporter(env, settings, job.getJobId(), job.getCounts(), usageReporter,
jobDataCountsPersister, jobLogger, job.getAnalysisConfig().getBucketSpanOrDefault());
AutodetectProcess process = autodetectProcessFactory.createAutodetectProcess(job, ignoreDowntime);
JobResultsPersister persister = new ElasticsearchPersister(jobId, client);
// TODO Port the normalizer from the old project
AutoDetectResultProcessor processor = new AutoDetectResultProcessor(new NoOpRenormaliser(), persister, parser);
return new AutodetectCommunicator(threadPool, job, process, jobLogger, persister, statusReporter, processor);
}
@Override
public void flushJob(String jobId, InterimResultsParams params) {
logger.debug("Flushing job {}", jobId);
AutodetectCommunicator communicator = autoDetectCommunicatorByJob.get(jobId);
if (communicator == null) {
logger.debug("Cannot flush: no active autodetect process for job {}", jobId);
return;
}
try {
communicator.flushJob(params);
// TODO check for errors from autodetect
} catch (IOException ioe) {
String msg = String.format(Locale.ROOT, "Exception flushing process for job %s", jobId);
logger.warn(msg);
throw ExceptionsHelper.serverError(msg, ioe);
}
}
public void writeUpdateConfigMessage(String jobId, String config) throws IOException {
AutodetectCommunicator communicator = autoDetectCommunicatorByJob.get(jobId);
if (communicator == null) {
logger.debug("Cannot update config: no active autodetect process for job {}", jobId);
return;
}
communicator.writeUpdateConfigMessage(config);
// TODO check for errors from autodetect
}
@Override
public void closeJob(String jobId) {
logger.debug("Closing job {}", jobId);
AutodetectCommunicator communicator = autoDetectCommunicatorByJob.get(jobId);
if (communicator == null) {
logger.debug("Cannot close: no active autodetect process for job {}", jobId);
return;
}
try {
communicator.close();
// TODO check for errors from autodetect
// TODO delete associated files (model config etc)
} catch (IOException e) {
logger.info("Exception closing stopped process input stream", e);
} finally {
autoDetectCommunicatorByJob.remove(jobId);
setJobFinishedTimeAndStatus(jobId, JobStatus.CLOSED);
}
}
public int numberOfRunningJobs() {
return autoDetectCommunicatorByJob.size();
}
public boolean jobHasActiveAutodetectProcess(String jobId) {
return autoDetectCommunicatorByJob.get(jobId) != null;
}
public Duration jobUpTime(String jobId) {
AutodetectCommunicator communicator = autoDetectCommunicatorByJob.get(jobId);
if (communicator == null) {
return Duration.ZERO;
}
return Duration.between(communicator.getProcessStartTime(), ZonedDateTime.now());
}
private void setJobFinishedTimeAndStatus(String jobId, JobStatus status) {
// NORELEASE Implement this.
// Perhaps move the JobStatus and finish time to a separate document stored outside the cluster state
logger.error("Cannot set finished job status and time- Not Implemented");
}
public Optional<ModelSizeStats> getModelSizeStats(String jobId) {
AutodetectCommunicator communicator = autoDetectCommunicatorByJob.get(jobId);
if (communicator == null) {
return Optional.empty();
}
return communicator.getModelSizeStats();
}
public Optional<DataCounts> getDataCounts(String jobId) {
AutodetectCommunicator communicator = autoDetectCommunicatorByJob.get(jobId);
if (communicator == null) {
return Optional.empty();
}
return communicator.getDataCounts();
}
}

View File

@ -0,0 +1,559 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.manager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.component.Lifecycle;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.xpack.prelert.action.DeleteJobAction;
import org.elasticsearch.xpack.prelert.action.PauseJobAction;
import org.elasticsearch.xpack.prelert.action.PutJobAction;
import org.elasticsearch.xpack.prelert.action.ResumeJobAction;
import org.elasticsearch.xpack.prelert.action.RevertModelSnapshotAction;
import org.elasticsearch.xpack.prelert.action.StartJobSchedulerAction;
import org.elasticsearch.xpack.prelert.action.StopJobSchedulerAction;
import org.elasticsearch.xpack.prelert.job.DataCounts;
import org.elasticsearch.xpack.prelert.job.IgnoreDowntime;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.JobSchedulerStatus;
import org.elasticsearch.xpack.prelert.job.JobStatus;
import org.elasticsearch.xpack.prelert.job.ModelSnapshot;
import org.elasticsearch.xpack.prelert.job.SchedulerState;
import org.elasticsearch.xpack.prelert.job.audit.Auditor;
import org.elasticsearch.xpack.prelert.job.logs.JobLogs;
import org.elasticsearch.xpack.prelert.job.messages.Messages;
import org.elasticsearch.xpack.prelert.job.metadata.Allocation;
import org.elasticsearch.xpack.prelert.job.metadata.PrelertMetadata;
import org.elasticsearch.xpack.prelert.job.persistence.JobProvider;
import org.elasticsearch.xpack.prelert.job.persistence.QueryPage;
import org.elasticsearch.xpack.prelert.job.results.AnomalyRecord;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* Allows interactions with jobs. The managed interactions include:
* <ul>
* <li>creation</li>
* <li>deletion</li>
* <li>flushing</li>
* <li>updating</li>
* <li>sending of data</li>
* <li>fetching jobs and results</li>
* <li>starting/stopping of scheduled jobs</li>
* </ul>
*/
public class JobManager {
private static final Logger LOGGER = Loggers.getLogger(JobManager.class);
/**
* Field name in which to store the API version in the usage info
*/
public static final String APP_VER_FIELDNAME = "appVer";
public static final String DEFAULT_RECORD_SORT_FIELD = AnomalyRecord.PROBABILITY.getPreferredName();
private final JobProvider jobProvider;
private final ClusterService clusterService;
private final Environment env;
private final Settings settings;
/**
* Create a JobManager
*/
public JobManager(Environment env, Settings settings, JobProvider jobProvider, ClusterService clusterService) {
this.env = env;
this.settings = settings;
this.jobProvider = Objects.requireNonNull(jobProvider);
this.clusterService = clusterService;
}
/**
* Get the details of the specific job wrapped in a <code>Optional</code>
*
* @param jobId
* the jobId
* @return An {@code Optional} containing the {@code Job} if a job
* with the given {@code jobId} exists, or an empty {@code Optional}
* otherwise
*/
public Optional<Job> getJob(String jobId, ClusterState clusterState) {
PrelertMetadata prelertMetadata = clusterState.getMetaData().custom(PrelertMetadata.TYPE);
Job job = prelertMetadata.getJobs().get(jobId);
if (job == null) {
return Optional.empty();
}
return Optional.of(job);
}
/**
* Get details of all Jobs.
*
* @param from
* Skip the first N Jobs. This parameter is for paging results if
* not required set to 0.
* @param size
* Take only this number of Jobs
* @return A query page object with hitCount set to the total number of jobs
* not the only the number returned here as determined by the
* <code>size</code> parameter.
*/
public QueryPage<Job> getJobs(int from, int size, ClusterState clusterState) {
PrelertMetadata prelertMetadata = clusterState.getMetaData().custom(PrelertMetadata.TYPE);
List<Job> jobs = prelertMetadata.getJobs().entrySet().stream()
.skip(from)
.limit(size)
.map(Map.Entry::getValue)
.collect(Collectors.toList());
return new QueryPage<>(jobs, prelertMetadata.getJobs().size());
}
/**
* Returns the non-null {@code Job} object for the given
* {@code jobId} or throws
* {@link org.elasticsearch.ResourceNotFoundException}
*
* @param jobId
* the jobId
* @return the {@code Job} if a job with the given {@code jobId}
* exists
* @throws org.elasticsearch.ResourceNotFoundException
* if there is no job with matching the given {@code jobId}
*/
public Job getJobOrThrowIfUnknown(String jobId) {
return getJobOrThrowIfUnknown(clusterService.state(), jobId);
}
public Allocation getJobAllocation(String jobId) {
return getAllocation(clusterService.state(), jobId);
}
/**
* Returns the non-null {@code Job} object for the given
* {@code jobId} or throws
* {@link org.elasticsearch.ResourceNotFoundException}
*
* @param jobId
* the jobId
* @return the {@code Job} if a job with the given {@code jobId}
* exists
* @throws org.elasticsearch.ResourceNotFoundException
* if there is no job with matching the given {@code jobId}
*/
public Job getJobOrThrowIfUnknown(ClusterState clusterState, String jobId) {
PrelertMetadata prelertMetadata = clusterState.metaData().custom(PrelertMetadata.TYPE);
Job job = prelertMetadata.getJobs().get(jobId);
if (job == null) {
throw ExceptionsHelper.missingJobException(jobId);
}
return job;
}
/**
* Stores a job in the cluster state
*/
public void putJob(PutJobAction.Request request, ActionListener<PutJobAction.Response> actionListener) {
Job job = request.getJob();
ActionListener<Boolean> delegateListener = new ActionListener<Boolean>() {
@Override
public void onResponse(Boolean jobSaved) {
jobProvider.createJobRelatedIndices(job, new ActionListener<Boolean>() {
@Override
public void onResponse(Boolean indicesCreated) {
// NORELEASE: make auditing async too (we can't do
// blocking stuff here):
// audit(jobDetails.getId()).info(Messages.getMessage(Messages.JOB_AUDIT_CREATED));
// Also I wonder if we need to audit log infra
// structure in prelert as when we merge into xpack
// we can use its audit trailing. See:
// https://github.com/elastic/prelert-legacy/issues/48
actionListener.onResponse(new PutJobAction.Response(jobSaved && indicesCreated, job));
}
@Override
public void onFailure(Exception e) {
actionListener.onFailure(e);
}
});
}
@Override
public void onFailure(Exception e) {
actionListener.onFailure(e);
}
};
clusterService.submitStateUpdateTask("put-job-" + job.getId(),
new AckedClusterStateUpdateTask<Boolean>(request, delegateListener) {
@Override
protected Boolean newResponse(boolean acknowledged) {
return acknowledged;
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
return innerPutJob(job, request.isOverwrite(), currentState);
}
});
}
ClusterState innerPutJob(Job job, boolean overwrite, ClusterState currentState) {
PrelertMetadata currentPrelertMetadata = currentState.metaData().custom(PrelertMetadata.TYPE);
PrelertMetadata.Builder builder = new PrelertMetadata.Builder(currentPrelertMetadata);
builder.putJob(job, overwrite);
ClusterState.Builder newState = ClusterState.builder(currentState);
newState.metaData(MetaData.builder(currentState.getMetaData()).putCustom(PrelertMetadata.TYPE, builder.build()).build());
return newState.build();
}
/**
* Deletes a job.
*
* The clean-up involves:
* <ul>
* <li>Deleting the index containing job results</li>
* <li>Deleting the job logs</li>
* <li>Removing the job from the cluster state</li>
* </ul>
*
* @param request
* the delete job request
* @param actionListener
* the action listener
*/
public void deleteJob(DeleteJobAction.Request request, ActionListener<DeleteJobAction.Response> actionListener) {
String jobId = request.getJobId();
LOGGER.debug("Deleting job '" + jobId + "'");
// NORELEASE: Should also delete the running process
ActionListener<Boolean> delegateListener = new ActionListener<Boolean>() {
@Override
public void onResponse(Boolean jobDeleted) {
jobProvider.deleteJobRelatedIndices(request.getJobId(), new ActionListener<Boolean>() {
@Override
public void onResponse(Boolean indicesDeleted) {
new JobLogs(settings).deleteLogs(env, jobId);
// NORELEASE: This is not the place the audit
// log
// (indexes a document), because this method is
// executed on
// the cluster state update task thread and any
// action performed on that thread should be
// quick.
// audit(jobId).info(Messages.getMessage(Messages.JOB_AUDIT_DELETED));
// Also I wonder if we need to audit log infra
// structure in prelert as when we merge into
// xpack
// we can use its audit trailing. See:
// https://github.com/elastic/prelert-legacy/issues/48
actionListener.onResponse(new DeleteJobAction.Response(jobDeleted && indicesDeleted));
}
@Override
public void onFailure(Exception e) {
actionListener.onFailure(e);
}
});
}
@Override
public void onFailure(Exception e) {
actionListener.onFailure(e);
}
};
clusterService.submitStateUpdateTask("delete-job-" + jobId,
new AckedClusterStateUpdateTask<Boolean>(request, delegateListener) {
@Override
protected Boolean newResponse(boolean acknowledged) {
return acknowledged;
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
return removeJobFromClusterState(jobId, currentState);
}
});
}
ClusterState removeJobFromClusterState(String jobId, ClusterState currentState) {
PrelertMetadata currentPrelertMetadata = currentState.metaData().custom(PrelertMetadata.TYPE);
PrelertMetadata.Builder builder = new PrelertMetadata.Builder(currentPrelertMetadata);
builder.removeJob(jobId);
Allocation allocation = currentPrelertMetadata.getAllocations().get(jobId);
if (allocation != null) {
SchedulerState schedulerState = allocation.getSchedulerState();
if (schedulerState != null && schedulerState.getStatus() != JobSchedulerStatus.STOPPED) {
throw ExceptionsHelper.conflictStatusException(Messages.getMessage(Messages.JOB_CANNOT_DELETE_WHILE_SCHEDULER_RUNS, jobId));
}
}
ClusterState.Builder newState = ClusterState.builder(currentState);
newState.metaData(MetaData.builder(currentState.getMetaData()).putCustom(PrelertMetadata.TYPE, builder.build()).build());
return newState.build();
}
public void startJobScheduler(StartJobSchedulerAction.Request request,
ActionListener<StartJobSchedulerAction.Response> actionListener) {
clusterService.submitStateUpdateTask("start-scheduler-job-" + request.getJobId(),
new AckedClusterStateUpdateTask<StartJobSchedulerAction.Response>(request, actionListener) {
@Override
protected StartJobSchedulerAction.Response newResponse(boolean acknowledged) {
return new StartJobSchedulerAction.Response(acknowledged);
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
long startTime = request.getSchedulerState().getStartTimeMillis();
Long endTime = request.getSchedulerState().getEndTimeMillis();
return innerUpdateSchedulerState(currentState, request.getJobId(), JobSchedulerStatus.STARTING, startTime, endTime);
}
});
}
public void stopJobScheduler(StopJobSchedulerAction.Request request, ActionListener<StopJobSchedulerAction.Response> actionListener) {
clusterService.submitStateUpdateTask("stop-scheduler-job-" + request.getJobId(),
new AckedClusterStateUpdateTask<StopJobSchedulerAction.Response>(request, actionListener) {
@Override
protected StopJobSchedulerAction.Response newResponse(boolean acknowledged) {
return new StopJobSchedulerAction.Response(acknowledged);
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
return innerUpdateSchedulerState(currentState, request.getJobId(), JobSchedulerStatus.STOPPING, null, null);
}
});
}
private void checkJobIsScheduled(Job job) {
if (job.getSchedulerConfig() == null) {
throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_SCHEDULER_NO_SUCH_SCHEDULED_JOB, job.getId()));
}
}
public void updateSchedulerStatus(String jobId, JobSchedulerStatus newStatus) {
clusterService.submitStateUpdateTask("update-scheduler-status-job-" + jobId, new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
return innerUpdateSchedulerState(currentState, jobId, newStatus, null, null);
}
@Override
public void onFailure(String source, Exception e) {
LOGGER.error("Error updating scheduler status: source=[" + source + "], status=[" + newStatus + "]", e);
}
});
}
private ClusterState innerUpdateSchedulerState(ClusterState currentState, String jobId, JobSchedulerStatus status,
Long startTime, Long endTime) {
Job job = getJobOrThrowIfUnknown(currentState, jobId);
checkJobIsScheduled(job);
Allocation allocation = getAllocation(currentState, jobId);
if (allocation.getSchedulerState() == null && status != JobSchedulerStatus.STARTING) {
throw new IllegalArgumentException("Can't change status to [" + status + "], because job's [" + jobId +
"] scheduler never started");
}
SchedulerState existingState = allocation.getSchedulerState();
if (existingState != null) {
if (startTime == null) {
startTime = existingState.getStartTimeMillis();
}
if (endTime == null) {
endTime = existingState.getEndTimeMillis();
}
}
existingState = new SchedulerState(status, startTime, endTime);
Allocation.Builder builder = new Allocation.Builder(allocation);
builder.setSchedulerState(existingState);
return innerUpdateAllocation(builder.build(), currentState);
}
private Allocation getAllocation(ClusterState state, String jobId) {
PrelertMetadata prelertMetadata = state.metaData().custom(PrelertMetadata.TYPE);
Allocation allocation = prelertMetadata.getAllocations().get(jobId);
if (allocation == null) {
throw new ResourceNotFoundException("No allocation found for job with id [" + jobId + "]");
}
return allocation;
}
private ClusterState innerUpdateAllocation(Allocation newAllocation, ClusterState currentState) {
PrelertMetadata currentPrelertMetadata = currentState.metaData().custom(PrelertMetadata.TYPE);
PrelertMetadata.Builder builder = new PrelertMetadata.Builder(currentPrelertMetadata);
builder.updateAllocation(newAllocation.getJobId(), newAllocation);
ClusterState.Builder newState = ClusterState.builder(currentState);
newState.metaData(MetaData.builder(currentState.getMetaData()).putCustom(PrelertMetadata.TYPE, builder.build()).build());
return newState.build();
}
public Auditor audit(String jobId) {
return jobProvider.audit(jobId);
}
public Auditor systemAudit() {
return jobProvider.audit("");
}
public void revertSnapshot(RevertModelSnapshotAction.Request request, ActionListener<RevertModelSnapshotAction.Response> actionListener,
ModelSnapshot modelSnapshot) {
clusterService.submitStateUpdateTask("revert-snapshot-" + request.getJobId(),
new AckedClusterStateUpdateTask<RevertModelSnapshotAction.Response>(request, actionListener) {
@Override
protected RevertModelSnapshotAction.Response newResponse(boolean acknowledged) {
RevertModelSnapshotAction.Response response;
if (acknowledged) {
response = new RevertModelSnapshotAction.Response(modelSnapshot);
// NORELEASE: This is not the place the audit log
// (indexes a document), because this method is
// executed on the cluster state update task thread
// and any action performed on that thread should be
// quick. (so no indexing documents)
// audit(jobId).info(Messages.getMessage(Messages.JOB_AUDIT_REVERTED,
// modelSnapshot.getDescription()));
} else {
response = new RevertModelSnapshotAction.Response();
}
return response;
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
Job job = getJobOrThrowIfUnknown(currentState, request.getJobId());
Job.Builder builder = new Job.Builder(job);
builder.setModelSnapshotId(modelSnapshot.getSnapshotId());
if (request.getDeleteInterveningResults()) {
builder.setIgnoreDowntime(IgnoreDowntime.NEVER);
Date latestRecordTime = modelSnapshot.getLatestResultTimeStamp();
LOGGER.info("Resetting latest record time to '" + latestRecordTime + "'");
builder.setLastDataTime(latestRecordTime);
DataCounts counts = job.getCounts();
counts.setLatestRecordTimeStamp(latestRecordTime);
builder.setCounts(counts);
} else {
builder.setIgnoreDowntime(IgnoreDowntime.ONCE);
}
return innerPutJob(builder.build(), true, currentState);
}
});
}
public void pauseJob(PauseJobAction.Request request, ActionListener<PauseJobAction.Response> actionListener) {
clusterService.submitStateUpdateTask("pause-job-" + request.getJobId(),
new AckedClusterStateUpdateTask<PauseJobAction.Response>(request, actionListener) {
@Override
protected PauseJobAction.Response newResponse(boolean acknowledged) {
return new PauseJobAction.Response(acknowledged);
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
Job job = getJobOrThrowIfUnknown(currentState, request.getJobId());
Allocation allocation = getAllocation(currentState, job.getId());
checkJobIsNotScheduled(job);
if (!allocation.getStatus().isAnyOf(JobStatus.RUNNING, JobStatus.CLOSED)) {
throw ExceptionsHelper.conflictStatusException(
Messages.getMessage(Messages.JOB_CANNOT_PAUSE, job.getId(), allocation.getStatus()));
}
ClusterState newState = innerSetJobStatus(job.getId(), JobStatus.PAUSING, currentState);
Job.Builder jobBuilder = new Job.Builder(job);
jobBuilder.setIgnoreDowntime(IgnoreDowntime.ONCE);
return innerPutJob(jobBuilder.build(), true, newState);
}
});
}
public void resumeJob(ResumeJobAction.Request request, ActionListener<ResumeJobAction.Response> actionListener) {
clusterService.submitStateUpdateTask("resume-job-" + request.getJobId(),
new AckedClusterStateUpdateTask<ResumeJobAction.Response>(request, actionListener) {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
getJobOrThrowIfUnknown(request.getJobId());
Allocation allocation = getJobAllocation(request.getJobId());
if (allocation.getStatus() != JobStatus.PAUSED) {
throw ExceptionsHelper.conflictStatusException(
Messages.getMessage(Messages.JOB_CANNOT_RESUME, request.getJobId(), allocation.getStatus()));
}
Allocation.Builder builder = new Allocation.Builder(allocation);
builder.setStatus(JobStatus.CLOSED);
return innerUpdateAllocation(builder.build(), currentState);
}
@Override
protected ResumeJobAction.Response newResponse(boolean acknowledged) {
return new ResumeJobAction.Response(acknowledged);
}
});
}
public void setJobStatus(String jobId, JobStatus newStatus) {
clusterService.submitStateUpdateTask("set-paused-status-job-" + jobId, new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
return innerSetJobStatus(jobId, newStatus, currentState);
}
@Override
public void onFailure(String source, Exception e) {
LOGGER.error("Error updating job status: source=[" + source + "], new status [" + newStatus + "]", e);
}
});
}
private ClusterState innerSetJobStatus(String jobId, JobStatus newStatus, ClusterState currentState) {
Allocation allocation = getJobAllocation(jobId);
Allocation.Builder builder = new Allocation.Builder(allocation);
builder.setStatus(newStatus);
return innerUpdateAllocation(builder.build(), currentState);
}
private void checkJobIsNotScheduled(Job job) {
if (job.getSchedulerConfig() != null) {
throw ExceptionsHelper.conflictStatusException(Messages.getMessage(Messages.REST_ACTION_NOT_ALLOWED_FOR_SCHEDULED_JOB));
}
}
}

View File

@ -0,0 +1,304 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.messages;
import java.text.MessageFormat;
import java.util.Locale;
import java.util.ResourceBundle;
/**
* Defines the keys for all the message strings
*/
public final class Messages
{
/**
* The base name of the bundle without the .properties extension
* or locale
*/
private static final String BUNDLE_NAME = "org.elasticsearch.xpack.prelert.job.messages.prelert_messages";
public static final String AUTODETECT_FLUSH_UNEXPTECTED_DEATH = "autodetect.flush.failed.unexpected.death";
public static final String AUTODETECT_FLUSH_TIMEOUT = "autodetect.flush.timeout";
public static final String CPU_LIMIT_JOB = "cpu.limit.jobs";
public static final String DATASTORE_ERROR_DELETING = "datastore.error.deleting";
public static final String DATASTORE_ERROR_DELETING_MISSING_INDEX = "datastore.error.deleting.missing.index";
public static final String DATASTORE_ERROR_EXECUTING_SCRIPT = "datastore.error.executing.script";
public static final String LICENSE_LIMIT_DETECTORS = "license.limit.detectors";
public static final String LICENSE_LIMIT_JOBS = "license.limit.jobs";
public static final String LICENSE_LIMIT_DETECTORS_REACTIVATE = "license.limit.detectors.reactivate";
public static final String LICENSE_LIMIT_JOBS_REACTIVATE = "license.limit.jobs.reactivate";
public static final String LICENSE_LIMIT_PARTITIONS = "license.limit.partitions";
public static final String LOGFILE_INVALID_CHARS_IN_PATH = "logfile.invalid.chars.path";
public static final String LOGFILE_INVALID_PATH = "logfile.invalid.path";
public static final String LOGFILE_MISSING = "logfile.missing";
public static final String LOGFILE_MISSING_DIRECTORY = "logfile.missing.directory";
public static final String JOB_AUDIT_CREATED = "job.audit.created";
public static final String JOB_AUDIT_DELETED = "job.audit.deleted";
public static final String JOB_AUDIT_PAUSED = "job.audit.paused";
public static final String JOB_AUDIT_RESUMED = "job.audit.resumed";
public static final String JOB_AUDIT_UPDATED = "job.audit.updated";
public static final String JOB_AUDIT_REVERTED = "job.audit.reverted";
public static final String JOB_AUDIT_OLD_RESULTS_DELETED = "job.audit.old.results.deleted";
public static final String JOB_AUDIT_SNAPSHOT_DELETED = "job.audit.snapshot.deleted";
public static final String JOB_AUDIT_SCHEDULER_STARTED_FROM_TO = "job.audit.scheduler.started.from.to";
public static final String JOB_AUDIT_SCHEDULER_CONTINUED_REALTIME = "job.audit.scheduler.continued.realtime";
public static final String JOB_AUDIT_SCHEDULER_STARTED_REALTIME = "job.audit.scheduler.started.realtime";
public static final String JOB_AUDIT_SCHEDULER_LOOKBACK_COMPLETED = "job.audit.scheduler.lookback.completed";
public static final String JOB_AUDIT_SCHEDULER_STOPPED = "job.audit.scheduler.stopped";
public static final String JOB_AUDIT_SCHEDULER_NO_DATA = "job.audit.scheduler.no.data";
public static final String JOB_AUDIR_SCHEDULER_DATA_SEEN_AGAIN = "job.audit.scheduler.data.seen.again";
public static final String JOB_AUDIT_SCHEDULER_DATA_ANALYSIS_ERROR = "job.audit.scheduler.data.analysis.error";
public static final String JOB_AUDIT_SCHEDULER_DATA_EXTRACTION_ERROR = "job.audit.scheduler.data.extraction.error";
public static final String JOB_AUDIT_SCHEDULER_RECOVERED = "job.audit.scheduler.recovered";
public static final String SYSTEM_AUDIT_STARTED = "system.audit.started";
public static final String SYSTEM_AUDIT_SHUTDOWN = "system.audit.shutdown";
public static final String JOB_CANNOT_DELETE_WHILE_SCHEDULER_RUNS = "job.cannot.delete.while.scheduler.runs";
public static final String JOB_CANNOT_PAUSE = "job.cannot.pause";
public static final String JOB_CANNOT_RESUME = "job.cannot.resume";
public static final String JOB_CONFIG_BYFIELD_INCOMPATIBLE_FUNCTION = "job.config.byField.incompatible.function";
public static final String JOB_CONFIG_BYFIELD_NEEDS_ANOTHER = "job.config.byField.needs.another";
public static final String JOB_CONFIG_CANNOT_ENCRYPT_PASSWORD = "job.config.cannot.encrypt.password";
public static final String JOB_CONFIG_CATEGORIZATION_FILTERS_REQUIRE_CATEGORIZATION_FIELD_NAME = "job.config.categorization.filters."
+ "require.categorization.field.name";
public static final String JOB_CONFIG_CATEGORIZATION_FILTERS_CONTAINS_DUPLICATES = "job.config.categorization.filters.contains"
+ ".duplicates";
public static final String JOB_CONFIG_CATEGORIZATION_FILTERS_CONTAINS_EMPTY = "job.config.categorization.filter.contains.empty";
public static final String JOB_CONFIG_CATEGORIZATION_FILTERS_CONTAINS_INVALID_REGEX = "job.config.categorization.filter.contains."
+ "invalid.regex";
public static final String JOB_CONFIG_CONDITION_INVALID_OPERATOR = "job.config.condition.invalid.operator";
public static final String JOB_CONFIG_CONDITION_INVALID_VALUE_NULL = "job.config.condition.invalid.value.null";
public static final String JOB_CONFIG_CONDITION_INVALID_VALUE_NUMBER = "job.config.condition.invalid.value.numeric";
public static final String JOB_CONFIG_CONDITION_INVALID_VALUE_REGEX = "job.config.condition.invalid.value.regex";
public static final String JOB_CONFIG_CONDITION_UNKNOWN_OPERATOR = "job.config.condition.unknown.operator";
public static final String JOB_CONFIG_DATAFORMAT_REQUIRES_TRANSFORM = "job.config.dataformat.requires.transform";
public static final String JOB_CONFIG_DETECTION_RULE_CONDITION_CATEGORICAL_INVALID_OPTION = "job.config.detectionrule.condition."
+ "categorical.invalid.option";
public static final String JOB_CONFIG_DETECTION_RULE_CONDITION_CATEGORICAL_MISSING_OPTION = "job.config.detectionrule.condition."
+ "categorical.missing.option";
public static final String JOB_CONFIG_DETECTION_RULE_CONDITION_INVALID_FIELD_NAME = "job.config.detectionrule.condition.invalid."
+ "fieldname";
public static final String JOB_CONFIG_DETECTION_RULE_CONDITION_MISSING_FIELD_NAME = "job.config.detectionrule.condition.missing."
+ "fieldname";
public static final String JOB_CONFIG_DETECTION_RULE_CONDITION_NUMERICAL_INVALID_OPERATOR = "job.config.detectionrule.condition."
+ "numerical.invalid.operator";
public static final String JOB_CONFIG_DETECTION_RULE_CONDITION_NUMERICAL_INVALID_OPTION = "job.config.detectionrule.condition."
+ "numerical.invalid.option";
public static final String JOB_CONFIG_DETECTION_RULE_CONDITION_NUMERICAL_MISSING_OPTION = "job.config.detectionrule.condition."
+ "numerical.missing.option";
public static final String JOB_CONFIG_DETECTION_RULE_CONDITION_NUMERICAL_WITH_FIELD_NAME_REQUIRES_FIELD_VALUE = "job.config."
+ "detectionrule.condition.numerical.with.fieldname.requires.fieldvalue";
public static final String JOB_CONFIG_DETECTION_RULE_INVALID_TARGET_FIELD_NAME = "job.config.detectionrule.invalid.targetfieldname";
public static final String JOB_CONFIG_DETECTION_RULE_MISSING_TARGET_FIELD_NAME = "job.config.detectionrule.missing.targetfieldname";
public static final String JOB_CONFIG_DETECTION_RULE_NOT_SUPPORTED_BY_FUNCTION = "job.config.detectionrule.not.supported.by.function";
public static final String JOB_CONFIG_DETECTION_RULE_REQUIRES_AT_LEAST_ONE_CONDITION = "job.config.detectionrule.requires.at."
+ "least.one.condition";
public static final String JOB_CONFIG_FIELDNAME_INCOMPATIBLE_FUNCTION = "job.config.fieldname.incompatible.function";
public static final String JOB_CONFIG_FUNCTION_REQUIRES_BYFIELD = "job.config.function.requires.byfield";
public static final String JOB_CONFIG_FUNCTION_REQUIRES_FIELDNAME = "job.config.function.requires.fieldname";
public static final String JOB_CONFIG_FUNCTION_REQUIRES_OVERFIELD = "job.config.function.requires.overfield";
public static final String JOB_CONFIG_ID_TOO_LONG = "job.config.id.too.long";
public static final String JOB_CONFIG_ID_ALREADY_TAKEN = "job.config.id.already.taken";
public static final String JOB_CONFIG_INVALID_FIELDNAME_CHARS = "job.config.invalid.fieldname.chars";
public static final String JOB_CONFIG_INVALID_JOBID_CHARS = "job.config.invalid.jobid.chars";
public static final String JOB_CONFIG_INVALID_TIMEFORMAT = "job.config.invalid.timeformat";
public static final String JOB_CONFIG_FUNCTION_INCOMPATIBLE_PRESUMMARIZED = "job.config.function.incompatible.presummarized";
public static final String JOB_CONFIG_MISSING_ANALYSISCONFIG = "job.config.missing.analysisconfig";
public static final String JOB_CONFIG_MODEL_DEBUG_CONFIG_INVALID_BOUNDS_PERCENTILE = "job.config.model.debug.config.invalid.bounds."
+ "percentile";
public static final String JOB_CONFIG_FIELD_VALUE_TOO_LOW = "job.config.field.value.too.low";
public static final String JOB_CONFIG_NO_ANALYSIS_FIELD = "job.config.no.analysis.field";
public static final String JOB_CONFIG_NO_ANALYSIS_FIELD_NOT_COUNT = "job.config.no.analysis.field.not.count";
public static final String JOB_CONFIG_NO_DETECTORS = "job.config.no.detectors";
public static final String JOB_CONFIG_OVERFIELD_INCOMPATIBLE_FUNCTION = "job.config.overField.incompatible.function";
public static final String JOB_CONFIG_OVERLAPPING_BUCKETS_INCOMPATIBLE_FUNCTION = "job.config.overlapping.buckets.incompatible."
+ "function";
public static final String JOB_CONFIG_OVERFIELD_NEEDS_ANOTHER = "job.config.overField.needs.another";
public static final String JOB_CONFIG_MULTIPLE_BUCKETSPANS_REQUIRE_BUCKETSPAN = "job.config.multiple.bucketspans.require.bucketspan";
public static final String JOB_CONFIG_MULTIPLE_BUCKETSPANS_MUST_BE_MULTIPLE = "job.config.multiple.bucketspans.must.be.multiple";
public static final String JOB_CONFIG_PER_PARTITION_NORMALIZATION_REQUIRES_PARTITION_FIELD = "job.config.per.partition.normalisation."
+ "requires.partition.field";
public static final String JOB_CONFIG_PER_PARTITION_NORMALIZATION_CANNOT_USE_INFLUENCERS = "job.config.per.partition.normalisation."
+ "cannot.use.influencers";
public static final String JOB_CONFIG_UPDATE_ANALYSIS_LIMITS_PARSE_ERROR = "job.config.update.analysis.limits.parse.error";
public static final String JOB_CONFIG_UPDATE_ANALYSIS_LIMITS_CANNOT_BE_NULL = "job.config.update.analysis.limits.cannot.be.null";
public static final String JOB_CONFIG_UPDATE_ANALYSIS_LIMITS_MODEL_MEMORY_LIMIT_CANNOT_BE_DECREASED = "job.config.update.analysis."
+ "limits.model.memory.limit.cannot.be.decreased";
public static final String JOB_CONFIG_UPDATE_CATEGORIZATION_FILTERS_INVALID = "job.config.update.categorization.filters.invalid";
public static final String JOB_CONFIG_UPDATE_CUSTOM_SETTINGS_INVALID = "job.config.update.custom.settings.invalid";
public static final String JOB_CONFIG_UPDATE_DESCRIPTION_INVALID = "job.config.update.description.invalid";
public static final String JOB_CONFIG_UPDATE_DETECTORS_INVALID = "job.config.update.detectors.invalid";
public static final String JOB_CONFIG_UPDATE_DETECTORS_INVALID_DETECTOR_INDEX = "job.config.update.detectors.invalid.detector.index";
public static final String JOB_CONFIG_UPDATE_DETECTORS_DETECTOR_INDEX_SHOULD_BE_INTEGER = "job.config.update.detectors.detector.index."
+ "should.be.integer";
public static final String JOB_CONFIG_UPDATE_DETECTORS_MISSING_PARAMS = "job.config.update.detectors.missing.params";
public static final String JOB_CONFIG_UPDATE_DETECTORS_DESCRIPTION_SHOULD_BE_STRING = "job.config.update.detectors.description.should"
+ ".be.string";
public static final String JOB_CONFIG_UPDATE_DETECTOR_RULES_PARSE_ERROR = "job.config.update.detectors.rules.parse.error";
public static final String JOB_CONFIG_UPDATE_FAILED = "job.config.update.failed";
public static final String JOB_CONFIG_UPDATE_INVALID_KEY = "job.config.update.invalid.key";
public static final String JOB_CONFIG_UPDATE_IGNORE_DOWNTIME_PARSE_ERROR = "job.config.update.ignore.downtime.parse.error";
public static final String JOB_CONFIG_UPDATE_JOB_IS_NOT_CLOSED = "job.config.update.job.is.not.closed";
public static final String JOB_CONFIG_UPDATE_MODEL_DEBUG_CONFIG_PARSE_ERROR = "job.config.update.model.debug.config.parse.error";
public static final String JOB_CONFIG_UPDATE_REQUIRES_NON_EMPTY_OBJECT = "job.config.update.requires.non.empty.object";
public static final String JOB_CONFIG_UPDATE_PARSE_ERROR = "job.config.update.parse.error";
public static final String JOB_CONFIG_UPDATE_BACKGROUND_PERSIST_INTERVAL_INVALID = "job.config.update.background.persist.interval."
+ "invalid";
public static final String JOB_CONFIG_UPDATE_RENORMALIZATION_WINDOW_DAYS_INVALID = "job.config.update.renormalization.window.days."
+ "invalid";
public static final String JOB_CONFIG_UPDATE_MODEL_SNAPSHOT_RETENTION_DAYS_INVALID = "job.config.update.model.snapshot.retention.days."
+ "invalid";
public static final String JOB_CONFIG_UPDATE_RESULTS_RETENTION_DAYS_INVALID = "job.config.update.results.retention.days.invalid";
public static final String JOB_CONFIG_UPDATE_SCHEDULE_CONFIG_PARSE_ERROR = "job.config.update.scheduler.config.parse.error";
public static final String JOB_CONFIG_UPDATE_SCHEDULE_CONFIG_CANNOT_BE_NULL = "job.config.update.scheduler.config.cannot.be.null";
public static final String JOB_CONFIG_UPDATE_SCHEDULE_CONFIG_DATA_SOURCE_INVALID = "job.config.update.scheduler.config.data.source."
+ "invalid";
public static final String JOB_CONFIG_TRANSFORM_CIRCULAR_DEPENDENCY = "job.config.transform.circular.dependency";
public static final String JOB_CONFIG_TRANSFORM_CONDITION_REQUIRED = "job.config.transform.condition.required";
public static final String JOB_CONFIG_TRANSFORM_DUPLICATED_OUTPUT_NAME = "job.config.transform.duplicated.output.name";
public static final String JOB_CONFIG_TRANSFORM_EXTRACT_GROUPS_SHOULD_MATCH_OUTPUT_COUNT = "job.config.transform.extract.groups.should."
+ "match.output.count";
public static final String JOB_CONFIG_TRANSFORM_INPUTS_CONTAIN_EMPTY_STRING = "job.config.transform.inputs.contain.empty.string";
public static final String JOB_CONFIG_TRANSFORM_INVALID_ARGUMENT = "job.config.transform.invalid.argument";
public static final String JOB_CONFIG_TRANSFORM_INVALID_ARGUMENT_COUNT = "job.config.transform.invalid.argument.count";
public static final String JOB_CONFIG_TRANSFORM_INVALID_INPUT_COUNT = "job.config.transform.invalid.input.count";
public static final String JOB_CONFIG_TRANSFORM_INVALID_OUTPUT_COUNT = "job.config.transform.invalid.output.count";
public static final String JOB_CONFIG_TRANSFORM_OUTPUTS_CONTAIN_EMPTY_STRING = "job.config.transform.outputs.contain.empty.string";
public static final String JOB_CONFIG_TRANSFORM_OUTPUTS_UNUSED = "job.config.transform.outputs.unused";
public static final String JOB_CONFIG_TRANSFORM_OUTPUT_NAME_USED_MORE_THAN_ONCE = "job.config.transform.output.name.used.more.than"
+ ".once";
public static final String JOB_CONFIG_TRANSFORM_UNKNOWN_TYPE = "job.config.transform.unknown.type";
public static final String JOB_CONFIG_UNKNOWN_FUNCTION = "job.config.unknown.function";
public static final String JOB_CONFIG_SCHEDULER_UNKNOWN_DATASOURCE = "job.config.scheduler.unknown.datasource";
public static final String JOB_CONFIG_SCHEDULER_FIELD_NOT_SUPPORTED = "job.config.scheduler.field.not.supported";
public static final String JOB_CONFIG_SCHEDULER_INVALID_OPTION_VALUE = "job.config.scheduler.invalid.option.value";
public static final String JOB_CONFIG_SCHEDULER_REQUIRES_BUCKET_SPAN = "job.config.scheduler.requires.bucket.span";
public static final String JOB_CONFIG_SCHEDULER_ELASTICSEARCH_DOES_NOT_SUPPORT_LATENCY = "job.config.scheduler.elasticsearch.does.not."
+ "support.latency";
public static final String JOB_CONFIG_SCHEDULER_AGGREGATIONS_REQUIRES_SUMMARY_COUNT_FIELD = "job.config.scheduler.aggregations."
+ "requires.summary.count.field";
public static final String JOB_CONFIG_SCHEDULER_ELASTICSEARCH_REQUIRES_DATAFORMAT_ELASTICSEARCH = "job.config.scheduler.elasticsearch."
+ "requires.dataformat.elasticsearch";
public static final String JOB_CONFIG_SCHEDULER_INCOMPLETE_CREDENTIALS = "job.config.scheduler.incomplete.credentials";
public static final String JOB_CONFIG_SCHEDULER_MULTIPLE_PASSWORDS = "job.config.scheduler.multiple.passwords";
public static final String JOB_CONFIG_SCHEDULER_MULTIPLE_AGGREGATIONS = "job.config.scheduler.multiple.aggregations";
public static final String JOB_DATA_CONCURRENT_USE_CLOSE = "job.data.concurrent.use.close";
public static final String JOB_DATA_CONCURRENT_USE_DELETE = "job.data.concurrent.use.delete";
public static final String JOB_DATA_CONCURRENT_USE_FLUSH = "job.data.concurrent.use.flush";
public static final String JOB_DATA_CONCURRENT_USE_PAUSE = "job.data.concurrent.use.pause";
public static final String JOB_DATA_CONCURRENT_USE_RESUME = "job.data.concurrent.use.resume";
public static final String JOB_DATA_CONCURRENT_USE_REVERT = "job.data.concurrent.use.revert";
public static final String JOB_DATA_CONCURRENT_USE_UPDATE = "job.data.concurrent.use.update";
public static final String JOB_DATA_CONCURRENT_USE_UPLOAD = "job.data.concurrent.use.upload";
public static final String JOB_SCHEDULER_CANNOT_START = "job.scheduler.cannot.start";
public static final String JOB_SCHEDULER_CANNOT_STOP_IN_CURRENT_STATE = "job.scheduler.cannot.stop.in.current.state";
public static final String JOB_SCHEDULER_CANNOT_UPDATE_IN_CURRENT_STATE = "job.scheduler.cannot.update.in.current.state";
public static final String JOB_SCHEDULER_FAILED_TO_STOP = "job.scheduler.failed.to.stop";
public static final String JOB_SCHEDULER_NO_SUCH_SCHEDULED_JOB = "job.scheduler.no.such.scheduled.job";
public static final String JOB_SCHEDULER_STATUS_STARTED = "job.scheduler.status.started";
public static final String JOB_SCHEDULER_STATUS_STOPPING = "job.scheduler.status.stopping";
public static final String JOB_SCHEDULER_STATUS_STOPPED = "job.scheduler.status.stopped";
public static final String JOB_SCHEDULER_STATUS_UPDATING = "job.scheduler.status.updating";
public static final String JOB_SCHEDULER_STATUS_DELETING = "job.scheduler.status.deleting";
public static final String JOB_MISSING_QUANTILES = "job.missing.quantiles";
public static final String JOB_UNKNOWN_ID = "job.unknown.id";
public static final String JSON_JOB_CONFIG_MAPPING = "json.job.config.mapping.error";
public static final String JSON_JOB_CONFIG_PARSE = "json.job.config.parse.error";
public static final String JSON_DETECTOR_CONFIG_MAPPING = "json.detector.config.mapping.error";
public static final String JSON_DETECTOR_CONFIG_PARSE = "json.detector.config.parse.error";
public static final String JSON_LIST_DOCUMENT_MAPPING_ERROR = "json.list.document.mapping.error";
public static final String JSON_LIST_DOCUMENT_PARSE_ERROR = "json.list.document.parse.error";
public static final String JSON_TRANSFORM_CONFIG_MAPPING = "json.transform.config.mapping.error";
public static final String JSON_TRANSFORM_CONFIG_PARSE = "json.transform.config.parse.error";
public static final String ON_HOST = "on.host";
public static final String REST_ACTION_NOT_ALLOWED_FOR_SCHEDULED_JOB = "rest.action.not.allowed.for.scheduled.job";
public static final String REST_INVALID_DATETIME_PARAMS = "rest.invalid.datetime.params";
public static final String REST_INVALID_FLUSH_PARAMS_MISSING = "rest.invalid.flush.params.missing.argument";
public static final String REST_INVALID_FLUSH_PARAMS_UNEXPECTED = "rest.invalid.flush.params.unexpected";
public static final String REST_INVALID_RESET_PARAMS = "rest.invalid.reset.params";
public static final String REST_INVALID_FROM = "rest.invalid.from";
public static final String REST_INVALID_SIZE = "rest.invalid.size";
public static final String REST_INVALID_FROM_SIZE_SUM = "rest.invalid.from.size.sum";
public static final String REST_GZIP_ERROR = "rest.gzip.error";
public static final String REST_START_AFTER_END = "rest.start.after.end";
public static final String REST_RESET_BUCKET_NO_LATENCY = "rest.reset.bucket.no.latency";
public static final String REST_INVALID_REVERT_PARAMS = "rest.invalid.revert.params";
public static final String REST_JOB_NOT_CLOSED_REVERT = "rest.job.not.closed.revert";
public static final String REST_NO_SUCH_MODEL_SNAPSHOT = "rest.no.such.model.snapshot";
public static final String REST_INVALID_DESCRIPTION_PARAMS = "rest.invalid.description.params";
public static final String REST_DESCRIPTION_ALREADY_USED = "rest.description.already.used";
public static final String REST_CANNOT_DELETE_HIGHEST_PRIORITY = "rest.cannot.delete.highest.priority";
public static final String REST_ALERT_MISSING_ARGUMENT = "rest.alert.missing.argument";
public static final String REST_ALERT_INVALID_TIMEOUT = "rest.alert.invalid.timeout";
public static final String REST_ALERT_INVALID_THRESHOLD = "rest.alert.invalid.threshold";
public static final String REST_ALERT_CANT_USE_PROB = "rest.alert.cant.use.prob";
public static final String REST_ALERT_INVALID_TYPE = "rest.alert.invalid.type";
public static final String PROCESS_ACTION_SLEEPING_JOB = "process.action.sleeping.job";
public static final String PROCESS_ACTION_CLOSED_JOB = "process.action.closed.job";
public static final String PROCESS_ACTION_CLOSING_JOB = "process.action.closing.job";
public static final String PROCESS_ACTION_DELETING_JOB = "process.action.deleting.job";
public static final String PROCESS_ACTION_FLUSHING_JOB = "process.action.flushing.job";
public static final String PROCESS_ACTION_PAUSING_JOB = "process.action.pausing.job";
public static final String PROCESS_ACTION_RESUMING_JOB = "process.action.resuming.job";
public static final String PROCESS_ACTION_REVERTING_JOB = "process.action.reverting.job";
public static final String PROCESS_ACTION_UPDATING_JOB = "process.action.updating.job";
public static final String PROCESS_ACTION_WRITING_JOB = "process.action.writing.job";
public static final String SUPPORT_BUNDLE_SCRIPT_ERROR = "support.bundle.script.error";
private Messages()
{
}
public static ResourceBundle load()
{
return ResourceBundle.getBundle(Messages.BUNDLE_NAME, Locale.getDefault());
}
/**
* Look up the message string from the resource bundle.
*
* @param key Must be one of the statics defined in this file]
*/
public static String getMessage(String key)
{
return load().getString(key);
}
/**
* Look up the message string from the resource bundle and format with
* the supplied arguments
* @param key the key for the message
* @param args MessageFormat arguments. See {@linkplain MessageFormat#format(Object)}]
*/
public static String getMessage(String key, Object...args)
{
return new MessageFormat(load().getString(key), Locale.ROOT).format(args);
}
}

View File

@ -0,0 +1,206 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.metadata;
import org.elasticsearch.cluster.AbstractDiffable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.xpack.prelert.job.JobSchedulerStatus;
import org.elasticsearch.xpack.prelert.job.JobStatus;
import org.elasticsearch.xpack.prelert.job.SchedulerState;
import org.elasticsearch.xpack.prelert.job.messages.Messages;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.Objects;
public class Allocation extends AbstractDiffable<Allocation> implements ToXContent {
private static final ParseField NODE_ID_FIELD = new ParseField("node_id");
private static final ParseField JOB_ID_FIELD = new ParseField("job_id");
public static final ParseField STATUS = new ParseField("status");
public static final ParseField SCHEDULER_STATE = new ParseField("scheduler_state");
static final Allocation PROTO = new Allocation(null, null, null, null);
static final ObjectParser<Builder, ParseFieldMatcherSupplier> PARSER = new ObjectParser<>("allocation", Builder::new);
static {
PARSER.declareString(Builder::setNodeId, NODE_ID_FIELD);
PARSER.declareString(Builder::setJobId, JOB_ID_FIELD);
PARSER.declareField(Builder::setStatus, (p, c) -> JobStatus.fromString(p.text()), STATUS, ObjectParser.ValueType.STRING);
PARSER.declareObject(Builder::setSchedulerState, SchedulerState.PARSER, SCHEDULER_STATE);
}
private final String nodeId;
private final String jobId;
private final JobStatus status;
private final SchedulerState schedulerState;
public Allocation(String nodeId, String jobId, JobStatus status, SchedulerState schedulerState) {
this.nodeId = nodeId;
this.jobId = jobId;
this.status = status;
this.schedulerState = schedulerState;
}
public Allocation(StreamInput in) throws IOException {
this.nodeId = in.readString();
this.jobId = in.readString();
this.status = JobStatus.fromStream(in);
this.schedulerState = in.readOptionalWriteable(SchedulerState::new);
}
public String getNodeId() {
return nodeId;
}
public String getJobId() {
return jobId;
}
public JobStatus getStatus() {
return status;
}
public SchedulerState getSchedulerState() {
return schedulerState;
}
@Override
public Allocation readFrom(StreamInput in) throws IOException {
return new Allocation(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(nodeId);
out.writeString(jobId);
status.writeTo(out);
out.writeOptionalWriteable(schedulerState);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(NODE_ID_FIELD.getPreferredName(), nodeId);
builder.field(JOB_ID_FIELD.getPreferredName(), jobId);
builder.field(STATUS.getPreferredName(), status);
if (schedulerState != null) {
builder.field(SCHEDULER_STATE.getPreferredName(), schedulerState);
}
builder.endObject();
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Allocation that = (Allocation) o;
return Objects.equals(nodeId, that.nodeId) &&
Objects.equals(jobId, that.jobId) &&
Objects.equals(status, that.status) &&
Objects.equals(schedulerState, that.schedulerState);
}
@Override
public int hashCode() {
return Objects.hash(nodeId, jobId, status, schedulerState);
}
// Class alreadt extends from AbstractDiffable, so copied from ToXContentToBytes#toString()
@SuppressWarnings("deprecation")
@Override
public final String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.prettyPrint();
toXContent(builder, EMPTY_PARAMS);
return builder.string();
} catch (Exception e) {
// So we have a stack trace logged somewhere
return "{ \"error\" : \"" + org.elasticsearch.ExceptionsHelper.detailedMessage(e) + "\"}";
}
}
public static class Builder {
private String nodeId;
private String jobId;
private JobStatus status = JobStatus.CLOSED;
private SchedulerState schedulerState;
public Builder() {
}
public Builder(Allocation allocation) {
this.nodeId = allocation.nodeId;
this.jobId = allocation.jobId;
this.status = allocation.status;
this.schedulerState = allocation.schedulerState;
}
public void setNodeId(String nodeId) {
this.nodeId = nodeId;
}
public void setJobId(String jobId) {
this.jobId = jobId;
}
public void setStatus(JobStatus status) {
this.status = status;
}
public void setSchedulerState(SchedulerState schedulerState) {
JobSchedulerStatus currentSchedulerStatus = this.schedulerState == null ?
JobSchedulerStatus.STOPPED : this.schedulerState.getStatus();
JobSchedulerStatus newSchedulerStatus = schedulerState.getStatus();
switch (newSchedulerStatus) {
case STARTING:
if (currentSchedulerStatus != JobSchedulerStatus.STOPPED) {
String msg = Messages.getMessage(Messages.JOB_SCHEDULER_CANNOT_START, jobId, newSchedulerStatus);
throw ExceptionsHelper.conflictStatusException(msg);
}
break;
case STARTED:
if (currentSchedulerStatus != JobSchedulerStatus.STARTING) {
String msg = Messages.getMessage(Messages.JOB_SCHEDULER_CANNOT_START, jobId, newSchedulerStatus);
throw ExceptionsHelper.conflictStatusException(msg);
}
break;
case STOPPING:
if (currentSchedulerStatus != JobSchedulerStatus.STARTED) {
String msg = Messages.getMessage(Messages.JOB_SCHEDULER_CANNOT_STOP_IN_CURRENT_STATE, jobId, newSchedulerStatus);
throw ExceptionsHelper.conflictStatusException(msg);
}
break;
case STOPPED:
if (currentSchedulerStatus != JobSchedulerStatus.STOPPING) {
String msg = Messages.getMessage(Messages.JOB_SCHEDULER_CANNOT_STOP_IN_CURRENT_STATE, jobId, newSchedulerStatus);
throw ExceptionsHelper.conflictStatusException(msg);
}
break;
default:
throw new IllegalArgumentException("Invalid requested job scheduler status: " + newSchedulerStatus);
}
this.schedulerState = schedulerState;
}
public Allocation build() {
return new Allocation(nodeId, jobId, status, schedulerState);
}
}
}

View File

@ -0,0 +1,114 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.metadata;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
/**
* Runs only on the elected master node and decides to what nodes jobs should be allocated
*/
public class JobAllocator extends AbstractComponent implements ClusterStateListener {
private final ThreadPool threadPool;
private final ClusterService clusterService;
public JobAllocator(Settings settings, ClusterService clusterService, ThreadPool threadPool) {
super(settings);
this.threadPool = threadPool;
this.clusterService = clusterService;
clusterService.add(this);
}
ClusterState allocateJobs(ClusterState current) {
if (shouldAllocate(current) == false) {
// returning same instance, so no cluster state update is performed
return current;
}
DiscoveryNodes nodes = current.getNodes();
if (nodes.getSize() != 1) {
throw new IllegalStateException("Current prelert doesn't support multiple nodes");
}
// NORELEASE: Assumes prelert always runs on a single node:
PrelertMetadata prelertMetadata = current.getMetaData().custom(PrelertMetadata.TYPE);
PrelertMetadata.Builder builder = new PrelertMetadata.Builder(prelertMetadata);
DiscoveryNode prelertNode = nodes.getMasterNode(); // prelert is now always master node
for (String jobId : prelertMetadata.getJobs().keySet()) {
if (prelertMetadata.getAllocations().containsKey(jobId) == false) {
builder.putAllocation(prelertNode.getId(), jobId);
}
}
return ClusterState.builder(current)
.metaData(MetaData.builder(current.metaData()).putCustom(PrelertMetadata.TYPE, builder.build()))
.build();
}
boolean shouldAllocate(ClusterState current) {
PrelertMetadata prelertMetadata = current.getMetaData().custom(PrelertMetadata.TYPE);
for (String jobId : prelertMetadata.getJobs().keySet()) {
if (prelertMetadata.getAllocations().containsKey(jobId) == false) {
return true;
}
}
return false;
}
boolean prelertMetaDataMissing(ClusterState clusterState) {
return clusterState.getMetaData().custom(PrelertMetadata.TYPE) == null;
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
if (event.localNodeMaster()) {
if (prelertMetaDataMissing(event.state())) {
threadPool.executor(ThreadPool.Names.GENERIC).execute(() -> {
clusterService.submitStateUpdateTask("install-prelert-metadata", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
ClusterState.Builder builder = new ClusterState.Builder(currentState);
MetaData.Builder metadataBuilder = MetaData.builder(currentState.metaData());
metadataBuilder.putCustom(PrelertMetadata.TYPE, PrelertMetadata.PROTO);
builder.metaData(metadataBuilder.build());
return builder.build();
}
@Override
public void onFailure(String source, Exception e) {
logger.error("unable to install prelert metadata upon startup", e);
}
});
});
} else if (shouldAllocate(event.state())) {
threadPool.executor(ThreadPool.Names.GENERIC).execute(() -> {
clusterService.submitStateUpdateTask("allocate_jobs", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
return allocateJobs(currentState);
}
@Override
public void onFailure(String source, Exception e) {
logger.error("failed to allocate jobs", e);
}
});
});
}
}
}
}

View File

@ -0,0 +1,174 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.metadata;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xpack.prelert.action.UpdateJobStatusAction;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.JobStatus;
import org.elasticsearch.xpack.prelert.job.SchedulerState;
import org.elasticsearch.xpack.prelert.job.data.DataProcessor;
import org.elasticsearch.xpack.prelert.job.scheduler.ScheduledJobService;
import java.util.Collections;
import java.util.HashSet;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.Executor;
public class JobLifeCycleService extends AbstractComponent implements ClusterStateListener {
volatile Set<String> localAllocatedJobs = Collections.emptySet();
private final Client client;
private final ScheduledJobService scheduledJobService;
private DataProcessor dataProcessor;
private final Executor executor;
public JobLifeCycleService(Settings settings, Client client, ClusterService clusterService, ScheduledJobService scheduledJobService,
DataProcessor dataProcessor, Executor executor) {
super(settings);
clusterService.add(this);
this.client = Objects.requireNonNull(client);
this.scheduledJobService = Objects.requireNonNull(scheduledJobService);
this.dataProcessor = Objects.requireNonNull(dataProcessor);
this.executor = Objects.requireNonNull(executor);
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
PrelertMetadata prelertMetadata = event.state().getMetaData().custom(PrelertMetadata.TYPE);
if (prelertMetadata == null) {
logger.debug("Prelert metadata not installed");
return;
}
// Single volatile read:
Set<String> localAllocatedJobs = this.localAllocatedJobs;
DiscoveryNode localNode = event.state().nodes().getLocalNode();
for (Allocation allocation : prelertMetadata.getAllocations().values()) {
if (localNode.getId().equals(allocation.getNodeId())) {
handleLocallyAllocatedJob(prelertMetadata, allocation);
}
}
for (String localAllocatedJob : localAllocatedJobs) {
Allocation allocation = prelertMetadata.getAllocations().get(localAllocatedJob);
if (allocation != null) {
if (localNode.getId().equals(allocation.getNodeId()) == false) {
stopJob(localAllocatedJob);
}
} else {
stopJob(localAllocatedJob);
}
}
}
private void handleLocallyAllocatedJob(PrelertMetadata prelertMetadata, Allocation allocation) {
Job job = prelertMetadata.getJobs().get(allocation.getJobId());
if (localAllocatedJobs.contains(allocation.getJobId()) == false) {
startJob(job);
}
handleJobStatusChange(job, allocation.getStatus());
handleSchedulerStatusChange(job, allocation);
}
private void handleJobStatusChange(Job job, JobStatus status) {
switch (status) {
case PAUSING:
executor.execute(() -> pauseJob(job));
break;
case RUNNING:
break;
case CLOSING:
break;
case CLOSED:
break;
case PAUSED:
break;
default:
throw new IllegalStateException("Unknown job status [" + status + "]");
}
}
private void handleSchedulerStatusChange(Job job, Allocation allocation) {
SchedulerState schedulerState = allocation.getSchedulerState();
if (schedulerState != null) {
switch (schedulerState.getStatus()) {
case STARTING:
scheduledJobService.start(job, allocation);
break;
case STARTED:
break;
case STOPPING:
scheduledJobService.stop(allocation);
break;
case STOPPED:
break;
default:
throw new IllegalStateException("Unhandled scheduler state [" + schedulerState.getStatus() + "]");
}
}
}
void startJob(Job job) {
logger.info("Starting job [" + job.getId() + "]");
// noop now, but should delegate to a task / ProcessManager that actually starts the job
// update which jobs are now allocated locally
Set<String> newSet = new HashSet<>(localAllocatedJobs);
newSet.add(job.getId());
localAllocatedJobs = newSet;
}
void stopJob(String jobId) {
logger.info("Stopping job [" + jobId + "]");
// noop now, but should delegate to a task / ProcessManager that actually stops the job
// update which jobs are now allocated locally
Set<String> newSet = new HashSet<>(localAllocatedJobs);
newSet.remove(jobId);
localAllocatedJobs = newSet;
}
private void pauseJob(Job job) {
try {
// NORELEASE Ensure this also removes the job auto-close timeout task
dataProcessor.closeJob(job.getId());
} catch (ElasticsearchException e) {
logger.error("Failed to close job [" + job.getId() + "] while pausing", e);
updateJobStatus(job.getId(), JobStatus.FAILED);
return;
}
updateJobStatus(job.getId(), JobStatus.PAUSED);
}
private void updateJobStatus(String jobId, JobStatus status) {
UpdateJobStatusAction.Request request = new UpdateJobStatusAction.Request(jobId, status);
client.execute(UpdateJobStatusAction.INSTANCE, request, new ActionListener<UpdateJobStatusAction.Response>() {
@Override
public void onResponse(UpdateJobStatusAction.Response response) {
logger.info("Successfully set job status to [{}] for job [{}]", status, jobId);
// NORELEASE Audit job paused
// audit(jobId).info(Messages.getMessage(Messages.JOB_AUDIT_PAUSED));
}
@Override
public void onFailure(Exception e) {
logger.error("Could not set job status to [" + status + "] for job [" + jobId +"]", e);
}
});
}
}

View File

@ -0,0 +1,251 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.metadata;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.DiffableUtils;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.Map;
import java.util.Objects;
import java.util.SortedMap;
import java.util.TreeMap;
public class PrelertMetadata implements MetaData.Custom {
private static final ParseField JOBS_FIELD = new ParseField("jobs");
private static final ParseField ALLOCATIONS_FIELD = new ParseField("allocations");
public static final String TYPE = "prelert";
public static final PrelertMetadata PROTO = new PrelertMetadata(Collections.emptySortedMap(), Collections.emptySortedMap());
private static final ObjectParser<Builder, ParseFieldMatcherSupplier> PRELERT_METADATA_PARSER = new ObjectParser<>("prelert_metadata",
Builder::new);
static {
PRELERT_METADATA_PARSER.declareObjectArray(Builder::putJobs, (p, c) -> Job.PARSER.apply(p, c).build(), JOBS_FIELD);
PRELERT_METADATA_PARSER.declareObjectArray(Builder::putAllocations, Allocation.PARSER, ALLOCATIONS_FIELD);
}
// NORELEASE: A few fields of job details change frequently and this needs to be stored elsewhere
// performance issue will occur if we don't change that
private final SortedMap<String, Job> jobs;
private final SortedMap<String, Allocation> allocations;
private PrelertMetadata(SortedMap<String, Job> jobs, SortedMap<String, Allocation> allocations) {
this.jobs = Collections.unmodifiableSortedMap(jobs);
this.allocations = Collections.unmodifiableSortedMap(allocations);
}
public Map<String, Job> getJobs() {
// NORELEASE jobs should be immutable or a job can be modified in the
// cluster state of a single node without a cluster state update
return jobs;
}
public SortedMap<String, Allocation> getAllocations() {
return allocations;
}
@Override
public String type() {
return TYPE;
}
@Override
public MetaData.Custom fromXContent(XContentParser parser) throws IOException {
return PRELERT_METADATA_PARSER.parse(parser, () -> ParseFieldMatcher.STRICT).build();
}
@Override
public EnumSet<MetaData.XContentContext> context() {
// NORELEASE: Also include SNAPSHOT, but then we need to split the allocations from here and add them
// as ClusterState.Custom metadata, because only the job definitions should be stored in snapshots.
return MetaData.API_AND_GATEWAY;
}
@Override
public Diff<MetaData.Custom> diff(MetaData.Custom previousState) {
return new PrelertMetadataDiff((PrelertMetadata) previousState, this);
}
@Override
public Diff<MetaData.Custom> readDiffFrom(StreamInput in) throws IOException {
return new PrelertMetadataDiff(in);
}
@Override
public MetaData.Custom readFrom(StreamInput in) throws IOException {
int size = in.readVInt();
TreeMap<String, Job> jobs = new TreeMap<>();
for (int i = 0; i < size; i++) {
jobs.put(in.readString(), new Job(in));
}
size = in.readVInt();
TreeMap<String, Allocation> allocations = new TreeMap<>();
for (int i = 0; i < size; i++) {
allocations.put(in.readString(), Allocation.PROTO.readFrom(in));
}
return new PrelertMetadata(jobs, allocations);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(jobs.size());
for (Map.Entry<String, Job> entry : jobs.entrySet()) {
out.writeString(entry.getKey());
entry.getValue().writeTo(out);
}
out.writeVInt(allocations.size());
for (Map.Entry<String, Allocation> entry : allocations.entrySet()) {
out.writeString(entry.getKey());
entry.getValue().writeTo(out);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startArray(JOBS_FIELD.getPreferredName());
for (Job job : jobs.values()) {
builder.value(job);
}
builder.endArray();
builder.startArray(ALLOCATIONS_FIELD.getPreferredName());
for (Map.Entry<String, Allocation> entry : allocations.entrySet()) {
builder.value(entry.getValue());
}
builder.endArray();
return builder;
}
static class PrelertMetadataDiff implements Diff<MetaData.Custom> {
final Diff<Map<String, Job>> jobs;
final Diff<Map<String, Allocation>> allocations;
PrelertMetadataDiff(PrelertMetadata before, PrelertMetadata after) {
this.jobs = DiffableUtils.diff(before.jobs, after.jobs, DiffableUtils.getStringKeySerializer());
this.allocations = DiffableUtils.diff(before.allocations, after.allocations, DiffableUtils.getStringKeySerializer());
}
PrelertMetadataDiff(StreamInput in) throws IOException {
jobs = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), Job.PROTO);
allocations = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), Allocation.PROTO);
}
@Override
public MetaData.Custom apply(MetaData.Custom part) {
TreeMap<String, Job> newJobs = new TreeMap<>(jobs.apply(((PrelertMetadata) part).jobs));
TreeMap<String, Allocation> newAllocations = new TreeMap<>(allocations.apply(((PrelertMetadata) part).allocations));
return new PrelertMetadata(newJobs, newAllocations);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
jobs.writeTo(out);
allocations.writeTo(out);
}
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
PrelertMetadata that = (PrelertMetadata) o;
return Objects.equals(jobs, that.jobs) && Objects.equals(allocations, that.allocations);
}
@Override
public int hashCode() {
return Objects.hash(jobs, allocations);
}
public static class Builder {
private TreeMap<String, Job> jobs;
private TreeMap<String, Allocation> allocations;
public Builder() {
this.jobs = new TreeMap<>();
this.allocations = new TreeMap<>();
}
public Builder(PrelertMetadata previous) {
jobs = new TreeMap<>(previous.jobs);
allocations = new TreeMap<>(previous.allocations);
}
public Builder putJob(Job job, boolean overwrite) {
if (jobs.containsKey(job.getId()) && overwrite == false) {
throw ExceptionsHelper.jobAlreadyExists(job.getId());
}
this.jobs.put(job.getId(), job);
return this;
}
public Builder removeJob(String jobId) {
if (jobs.remove(jobId) == null) {
throw new ResourceNotFoundException("job [" + jobId + "] does not exist");
}
this.allocations.remove(jobId);
return this;
}
public Builder putAllocation(String nodeId, String jobId) {
Allocation.Builder builder = new Allocation.Builder();
builder.setJobId(jobId);
builder.setNodeId(nodeId);
this.allocations.put(jobId, builder.build());
return this;
}
public Builder updateAllocation(String jobId, Allocation updated) {
Allocation previous = this.allocations.put(jobId, updated);
if (previous == null) {
throw new IllegalStateException("Expected that job [" + jobId + "] was already allocated");
}
return this;
}
// only for parsing
private Builder putAllocations(Collection<Allocation.Builder> allocations) {
for (Allocation.Builder allocationBuilder : allocations) {
Allocation allocation = allocationBuilder.build();
this.allocations.put(allocation.getJobId(), allocation);
}
return this;
}
private Builder putJobs(Collection<Job> jobs) {
for (Job job : jobs) {
putJob(job, true);
}
return this;
}
public PrelertMetadata build() {
return new PrelertMetadata(jobs, allocations);
}
}
}

View File

@ -0,0 +1,53 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.persistence;
import java.util.Deque;
import java.util.NoSuchElementException;
/**
* An iterator useful to fetch a big number of documents of type T
* and iterate through them in batches.
*/
public interface BatchedDocumentsIterator<T>
{
/**
* Query documents whose timestamp is within the given time range
*
* @param startEpochMs the start time as epoch milliseconds (inclusive)
* @param endEpochMs the end time as epoch milliseconds (exclusive)
* @return the iterator itself
*/
BatchedDocumentsIterator<T> timeRange(long startEpochMs, long endEpochMs);
/**
* Include interim documents
*
* @param interimFieldName Name of the include interim field
*/
BatchedDocumentsIterator<T> includeInterim(String interimFieldName);
/**
* The first time next() is called, the search will be performed and the first
* batch will be returned. Any subsequent call will return the following batches.
* <p>
* Note that in some implementations it is possible that when there are no
* results at all, the first time this method is called an empty {@code Deque} is returned.
*
* @return a {@code Deque} with the next batch of documents
* @throws NoSuchElementException if the iteration has no more elements
*/
Deque<T> next();
/**
* Returns {@code true} if the iteration has more elements.
* (In other words, returns {@code true} if {@link #next} would
* return an element rather than throwing an exception.)
*
* @return {@code true} if the iteration has more elements
*/
boolean hasNext();
}

View File

@ -0,0 +1,112 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.persistence;
import org.elasticsearch.common.Strings;
import java.util.Objects;
/**
* One time query builder for a single buckets.
* <ul>
* <li>Timestamp (Required) - Timestamp of the bucket</li>
* <li>Expand- Include anomaly records. Default= false</li>
* <li>IncludeInterim- Include interim results. Default = false</li>
* <li>partitionValue Set the bucket's max normalised probabiltiy to this
* partiton field value's max normalised probability. Default = null</li>
* </ul>
*/
public final class BucketQueryBuilder {
public static int DEFAULT_SIZE = 100;
private BucketQuery bucketQuery;
public BucketQueryBuilder(String timestamp) {
bucketQuery = new BucketQuery(timestamp);
}
public BucketQueryBuilder expand(boolean expand) {
bucketQuery.expand = expand;
return this;
}
public BucketQueryBuilder includeInterim(boolean include) {
bucketQuery.includeInterim = include;
return this;
}
/**
* partitionValue must be non null and not empty else it
* is not set
*/
public BucketQueryBuilder partitionValue(String partitionValue) {
if (!Strings.isNullOrEmpty(partitionValue)) {
bucketQuery.partitionValue = partitionValue;
}
return this;
}
public BucketQueryBuilder.BucketQuery build() {
return bucketQuery;
}
public class BucketQuery {
private String timestamp;
private boolean expand = false;
private boolean includeInterim = false;
private String partitionValue = null;
public BucketQuery(String timestamp) {
this.timestamp = timestamp;
}
public String getTimestamp() {
return timestamp;
}
public boolean isIncludeInterim() {
return includeInterim;
}
public boolean isExpand() {
return expand;
}
/**
* @return Null if not set
*/
public String getPartitionValue() {
return partitionValue;
}
@Override
public int hashCode() {
return Objects.hash(timestamp, expand, includeInterim,
partitionValue);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
BucketQuery other = (BucketQuery) obj;
return Objects.equals(timestamp, other.timestamp) &&
Objects.equals(expand, other.expand) &&
Objects.equals(includeInterim, other.includeInterim) &&
Objects.equals(partitionValue, other.partitionValue);
}
}
}

View File

@ -0,0 +1,210 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.persistence;
import org.elasticsearch.xpack.prelert.job.results.Bucket;
import org.elasticsearch.common.Strings;
import java.util.Objects;
/**
* One time query builder for buckets.
* <ul>
* <li>From- Skip the first N Buckets. This parameter is for paging if not
* required set to 0. Default = 0</li>
* <li>Size- Take only this number of Buckets. Default =
* {@value DEFAULT_SIZE}</li>
* <li>Expand- Include anomaly records. Default= false</li>
* <li>IncludeInterim- Include interim results. Default = false</li>
* <li>anomalyScoreThreshold- Return only buckets with an anomalyScore &gt;=
* this value. Default = 0.0</li>
* <li>normalizedProbabilityThreshold- Return only buckets with a
* maxNormalizedProbability &gt;= this value. Default = 0.0</li>
* <li>epochStart- The start bucket time. A bucket with this timestamp will be
* included in the results. If 0 all buckets up to <code>endEpochMs</code> are
* returned. Default = -1</li>
* <li>epochEnd- The end bucket timestamp buckets up to but NOT including this
* timestamp are returned. If 0 all buckets from <code>startEpochMs</code> are
* returned. Default = -1</li>
* <li>partitionValue Set the bucket's max normalised probability to this
* partition field value's max normalised probability. Default = null</li>
* </ul>
*/
public final class BucketsQueryBuilder {
public static final int DEFAULT_SIZE = 100;
private BucketsQuery bucketsQuery = new BucketsQuery();
public BucketsQueryBuilder from(int from) {
bucketsQuery.from = from;
return this;
}
public BucketsQueryBuilder size(int size) {
bucketsQuery.size = size;
return this;
}
public BucketsQueryBuilder expand(boolean expand) {
bucketsQuery.expand = expand;
return this;
}
public BucketsQueryBuilder includeInterim(boolean include) {
bucketsQuery.includeInterim = include;
return this;
}
public BucketsQueryBuilder anomalyScoreThreshold(Double anomalyScoreFilter) {
bucketsQuery.anomalyScoreFilter = anomalyScoreFilter;
return this;
}
public BucketsQueryBuilder normalizedProbabilityThreshold(Double normalizedProbability) {
bucketsQuery.normalizedProbability = normalizedProbability;
return this;
}
/**
* @param partitionValue Not set if null or empty
*/
public BucketsQueryBuilder partitionValue(String partitionValue) {
if (!Strings.isNullOrEmpty(partitionValue)) {
bucketsQuery.partitionValue = partitionValue;
}
return this;
}
public BucketsQueryBuilder sortField(String sortField) {
bucketsQuery.sortField = sortField;
return this;
}
public BucketsQueryBuilder sortDescending(boolean sortDescending) {
bucketsQuery.sortDescending = sortDescending;
return this;
}
/**
* If startTime &lt;= 0 the parameter is not set
*/
public BucketsQueryBuilder epochStart(String startTime) {
bucketsQuery.epochStart = startTime;
return this;
}
/**
* If endTime &lt;= 0 the parameter is not set
*/
public BucketsQueryBuilder epochEnd(String endTime) {
bucketsQuery.epochEnd = endTime;
return this;
}
public BucketsQueryBuilder.BucketsQuery build() {
return bucketsQuery;
}
public void clear() {
bucketsQuery = new BucketsQueryBuilder.BucketsQuery();
}
public class BucketsQuery {
private int from = 0;
private int size = DEFAULT_SIZE;
private boolean expand = false;
private boolean includeInterim = false;
private double anomalyScoreFilter = 0.0d;
private double normalizedProbability = 0.0d;
private String epochStart;
private String epochEnd;
private String partitionValue = null;
private String sortField = Bucket.TIMESTAMP.getPreferredName();
private boolean sortDescending = false;
public int getFrom() {
return from;
}
public int getSize() {
return size;
}
public boolean isExpand() {
return expand;
}
public boolean isIncludeInterim() {
return includeInterim;
}
public double getAnomalyScoreFilter() {
return anomalyScoreFilter;
}
public double getNormalizedProbability() {
return normalizedProbability;
}
public String getEpochStart() {
return epochStart;
}
public String getEpochEnd() {
return epochEnd;
}
/**
* @return Null if not set
*/
public String getPartitionValue() {
return partitionValue;
}
public String getSortField() {
return sortField;
}
public boolean isSortDescending() {
return sortDescending;
}
@Override
public int hashCode() {
return Objects.hash(from, size, expand, includeInterim, anomalyScoreFilter, normalizedProbability, epochStart, epochEnd,
partitionValue, sortField, sortDescending);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
BucketsQuery other = (BucketsQuery) obj;
return Objects.equals(from, other.from) &&
Objects.equals(size, other.size) &&
Objects.equals(expand, other.expand) &&
Objects.equals(includeInterim, other.includeInterim) &&
Objects.equals(epochStart, other.epochStart) &&
Objects.equals(epochStart, other.epochStart) &&
Objects.equals(anomalyScoreFilter, other.anomalyScoreFilter) &&
Objects.equals(normalizedProbability, other.normalizedProbability) &&
Objects.equals(partitionValue, other.partitionValue) &&
Objects.equals(sortField, other.sortField) &&
this.sortDescending == other.sortDescending;
}
}
}

View File

@ -0,0 +1,105 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.persistence;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.xpack.prelert.job.audit.AuditActivity;
import org.elasticsearch.xpack.prelert.job.audit.AuditMessage;
import org.elasticsearch.xpack.prelert.job.audit.Auditor;
import java.io.IOException;
import java.util.Objects;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
public class ElasticsearchAuditor implements Auditor
{
private static final Logger LOGGER = Loggers.getLogger(ElasticsearchAuditor.class);
private final Client client;
private final String index;
private final String jobId;
public ElasticsearchAuditor(Client client, String index, String jobId)
{
this.client = Objects.requireNonNull(client);
this.index = index;
this.jobId = jobId;
}
@Override
public void info(String message)
{
persistAuditMessage(AuditMessage.newInfo(jobId, message));
}
@Override
public void warning(String message)
{
persistAuditMessage(AuditMessage.newWarning(jobId, message));
}
@Override
public void error(String message)
{
persistAuditMessage(AuditMessage.newError(jobId, message));
}
@Override
public void activity(String message)
{
persistAuditMessage(AuditMessage.newActivity(jobId, message));
}
@Override
public void activity(int totalJobs, int totalDetectors, int runningJobs, int runningDetectors)
{
persistAuditActivity(AuditActivity.newActivity(totalJobs, totalDetectors, runningJobs, runningDetectors));
}
private void persistAuditMessage(AuditMessage message)
{
try
{
client.prepareIndex(index, AuditMessage.TYPE.getPreferredName())
.setSource(serialiseMessage(message))
.execute().actionGet();
}
catch (IOException | IndexNotFoundException e)
{
LOGGER.error("Error writing auditMessage", e);
}
}
private void persistAuditActivity(AuditActivity activity)
{
try
{
client.prepareIndex(index, AuditActivity.TYPE.getPreferredName())
.setSource(serialiseActivity(activity))
.execute().actionGet();
}
catch (IOException | IndexNotFoundException e)
{
LOGGER.error("Error writing auditActivity", e);
}
}
private XContentBuilder serialiseMessage(AuditMessage message) throws IOException
{
return message.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS);
}
private XContentBuilder serialiseActivity(AuditActivity activity) throws IOException
{
return activity.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS);
}
}

View File

@ -0,0 +1,47 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.persistence;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.SearchHit;
import java.io.IOException;
import org.elasticsearch.xpack.prelert.job.results.Bucket;
class ElasticsearchBatchedBucketsIterator extends ElasticsearchBatchedDocumentsIterator<Bucket>
{
public ElasticsearchBatchedBucketsIterator(Client client, String jobId, ParseFieldMatcher parserFieldMatcher)
{
super(client, ElasticsearchPersister.getJobIndexName(jobId), parserFieldMatcher);
}
@Override
protected String getType()
{
return Bucket.TYPE.getPreferredName();
}
@Override
protected Bucket map(SearchHit hit)
{
BytesReference source = hit.getSourceRef();
XContentParser parser;
try {
parser = XContentFactory.xContent(source).createParser(source);
} catch (IOException e) {
throw new ElasticsearchParseException("failed to parse bucket", e);
}
Bucket bucket = Bucket.PARSER.apply(parser, () -> parseFieldMatcher);
bucket.setId(hit.getId());
return bucket;
}
}

View File

@ -0,0 +1,115 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.persistence;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.sort.SortBuilders;
import java.util.ArrayDeque;
import java.util.Arrays;
import java.util.Deque;
import java.util.NoSuchElementException;
import java.util.Objects;
abstract class ElasticsearchBatchedDocumentsIterator<T> implements BatchedDocumentsIterator<T> {
private static final Logger LOGGER = Loggers.getLogger(ElasticsearchBatchedDocumentsIterator.class);
private static final String CONTEXT_ALIVE_DURATION = "5m";
private static final int BATCH_SIZE = 10000;
private final Client client;
private final String index;
private final ResultsFilterBuilder filterBuilder;
private volatile long count;
private volatile long totalHits;
private volatile String scrollId;
private volatile boolean isScrollInitialised;
protected ParseFieldMatcher parseFieldMatcher;
public ElasticsearchBatchedDocumentsIterator(Client client, String index, ParseFieldMatcher parseFieldMatcher) {
this.parseFieldMatcher = parseFieldMatcher;
this.client = Objects.requireNonNull(client);
this.index = Objects.requireNonNull(index);
this.parseFieldMatcher = Objects.requireNonNull(parseFieldMatcher);
totalHits = 0;
count = 0;
filterBuilder = new ResultsFilterBuilder();
isScrollInitialised = false;
}
@Override
public BatchedDocumentsIterator<T> timeRange(long startEpochMs, long endEpochMs) {
filterBuilder.timeRange(ElasticsearchMappings.ES_TIMESTAMP, startEpochMs, endEpochMs);
return this;
}
@Override
public BatchedDocumentsIterator<T> includeInterim(String interimFieldName) {
filterBuilder.interim(interimFieldName, true);
return this;
}
@Override
public boolean hasNext() {
return !isScrollInitialised || count != totalHits;
}
@Override
public Deque<T> next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
SearchResponse searchResponse = (scrollId == null) ? initScroll()
: client.prepareSearchScroll(scrollId).setScroll(CONTEXT_ALIVE_DURATION).get();
scrollId = searchResponse.getScrollId();
return mapHits(searchResponse);
}
private SearchResponse initScroll() {
LOGGER.trace("ES API CALL: search all of type " + getType() + " from index " + index);
isScrollInitialised = true;
SearchResponse searchResponse = client.prepareSearch(index).setScroll(CONTEXT_ALIVE_DURATION).setSize(BATCH_SIZE)
.setTypes(getType()).setQuery(filterBuilder.build()).addSort(SortBuilders.fieldSort(ElasticsearchMappings.ES_DOC)).get();
totalHits = searchResponse.getHits().getTotalHits();
scrollId = searchResponse.getScrollId();
return searchResponse;
}
private Deque<T> mapHits(SearchResponse searchResponse) {
Deque<T> results = new ArrayDeque<>();
SearchHit[] hits = searchResponse.getHits().getHits();
for (SearchHit hit : hits) {
T mapped = map(hit);
if (mapped != null) {
results.add(mapped);
}
}
count += hits.length;
if (!hasNext() && scrollId != null) {
client.prepareClearScroll().setScrollIds(Arrays.asList(scrollId)).get();
}
return results;
}
protected abstract String getType();
/**
* Maps the search hit to the document type
* @param hit
* the search hit
* @return The mapped document or {@code null} if the mapping failed
*/
protected abstract T map(SearchHit hit);
}

View File

@ -0,0 +1,49 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.persistence;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.SearchHit;
import java.io.IOException;
import org.elasticsearch.xpack.prelert.job.results.Influencer;
class ElasticsearchBatchedInfluencersIterator extends ElasticsearchBatchedDocumentsIterator<Influencer>
{
public ElasticsearchBatchedInfluencersIterator(Client client, String jobId,
ParseFieldMatcher parserFieldMatcher)
{
super(client, ElasticsearchPersister.getJobIndexName(jobId), parserFieldMatcher);
}
@Override
protected String getType()
{
return Influencer.TYPE.getPreferredName();
}
@Override
protected Influencer map(SearchHit hit)
{
BytesReference source = hit.getSourceRef();
XContentParser parser;
try {
parser = XContentFactory.xContent(source).createParser(source);
} catch (IOException e) {
throw new ElasticsearchParseException("failed to parser influencer", e);
}
Influencer influencer = Influencer.PARSER.apply(parser, () -> parseFieldMatcher);
influencer.setId(hit.getId());
return influencer;
}
}

View File

@ -0,0 +1,47 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.persistence;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.SearchHit;
import java.io.IOException;
import org.elasticsearch.xpack.prelert.job.results.ModelDebugOutput;
class ElasticsearchBatchedModelDebugOutputIterator extends ElasticsearchBatchedDocumentsIterator<ModelDebugOutput>
{
public ElasticsearchBatchedModelDebugOutputIterator(Client client, String jobId, ParseFieldMatcher parserFieldMatcher)
{
super(client, ElasticsearchPersister.getJobIndexName(jobId), parserFieldMatcher);
}
@Override
protected String getType()
{
return ModelDebugOutput.TYPE.getPreferredName();
}
@Override
protected ModelDebugOutput map(SearchHit hit)
{
BytesReference source = hit.getSourceRef();
XContentParser parser;
try {
parser = XContentFactory.xContent(source).createParser(source);
} catch (IOException e) {
throw new ElasticsearchParseException("failed to parser model debug output", e);
}
ModelDebugOutput result = ModelDebugOutput.PARSER.apply(parser, () -> parseFieldMatcher);
result.setId(hit.getId());
return result;
}
}

View File

@ -0,0 +1,48 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.persistence;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.SearchHit;
import java.io.IOException;
import org.elasticsearch.xpack.prelert.job.ModelSizeStats;
public class ElasticsearchBatchedModelSizeStatsIterator extends ElasticsearchBatchedDocumentsIterator<ModelSizeStats>
{
public ElasticsearchBatchedModelSizeStatsIterator(Client client, String jobId, ParseFieldMatcher parserFieldMatcher)
{
super(client, ElasticsearchPersister.getJobIndexName(jobId), parserFieldMatcher);
}
@Override
protected String getType()
{
return ModelSizeStats.TYPE.getPreferredName();
}
@Override
protected ModelSizeStats map(SearchHit hit)
{
BytesReference source = hit.getSourceRef();
XContentParser parser;
try {
parser = XContentFactory.xContent(source).createParser(source);
} catch (IOException e) {
throw new ElasticsearchParseException("failed to parser model size stats", e);
}
ModelSizeStats.Builder result = ModelSizeStats.PARSER.apply(parser, () -> parseFieldMatcher);
result.setId(hit.getId());
return result.build();
}
}

View File

@ -0,0 +1,46 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.persistence;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.SearchHit;
import java.io.IOException;
import org.elasticsearch.xpack.prelert.job.ModelSnapshot;
class ElasticsearchBatchedModelSnapshotIterator extends ElasticsearchBatchedDocumentsIterator<ModelSnapshot>
{
public ElasticsearchBatchedModelSnapshotIterator(Client client, String jobId, ParseFieldMatcher parserFieldMatcher)
{
super(client, ElasticsearchPersister.getJobIndexName(jobId), parserFieldMatcher);
}
@Override
protected String getType()
{
return ModelSnapshot.TYPE.getPreferredName();
}
@Override
protected ModelSnapshot map(SearchHit hit)
{
BytesReference source = hit.getSourceRef();
XContentParser parser;
try {
parser = XContentFactory.xContent(source).createParser(source);
} catch (IOException e) {
throw new ElasticsearchParseException("failed to parser model snapshot", e);
}
return ModelSnapshot.PARSER.apply(parser, () -> parseFieldMatcher);
}
}

View File

@ -0,0 +1,255 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.persistence;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteAction;
import org.elasticsearch.action.delete.DeleteRequestBuilder;
import org.elasticsearch.action.search.SearchAction;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHitField;
import org.elasticsearch.search.sort.SortBuilders;
import org.elasticsearch.xpack.prelert.job.ModelSizeStats;
import org.elasticsearch.xpack.prelert.job.ModelSnapshot;
import org.elasticsearch.xpack.prelert.job.ModelState;
import org.elasticsearch.xpack.prelert.job.results.AnomalyRecord;
import org.elasticsearch.xpack.prelert.job.results.Bucket;
import org.elasticsearch.xpack.prelert.job.results.BucketInfluencer;
import org.elasticsearch.xpack.prelert.job.results.Influencer;
import org.elasticsearch.xpack.prelert.job.results.ModelDebugOutput;
import java.util.Objects;
import java.util.function.LongSupplier;
public class ElasticsearchBulkDeleter implements JobDataDeleter {
private static final Logger LOGGER = Loggers.getLogger(ElasticsearchBulkDeleter.class);
private static final int SCROLL_SIZE = 1000;
private static final String SCROLL_CONTEXT_DURATION = "5m";
private final Client client;
private final String jobId;
private final BulkRequestBuilder bulkRequestBuilder;
private long deletedBucketCount;
private long deletedRecordCount;
private long deletedBucketInfluencerCount;
private long deletedInfluencerCount;
private long deletedModelSnapshotCount;
private long deletedModelStateCount;
private boolean quiet;
public ElasticsearchBulkDeleter(Client client, String jobId, boolean quiet) {
this.client = Objects.requireNonNull(client);
this.jobId = Objects.requireNonNull(jobId);
bulkRequestBuilder = client.prepareBulk();
deletedBucketCount = 0;
deletedRecordCount = 0;
deletedBucketInfluencerCount = 0;
deletedInfluencerCount = 0;
deletedModelSnapshotCount = 0;
deletedModelStateCount = 0;
this.quiet = quiet;
}
public ElasticsearchBulkDeleter(Client client, String jobId) {
this(client, jobId, false);
}
@Override
public void deleteBucket(Bucket bucket) {
deleteRecords(bucket);
deleteBucketInfluencers(bucket);
bulkRequestBuilder.add(
client.prepareDelete(ElasticsearchPersister.getJobIndexName(jobId), Bucket.TYPE.getPreferredName(), bucket.getId()));
++deletedBucketCount;
}
@Override
public void deleteRecords(Bucket bucket) {
// Find the records using the time stamp rather than a parent-child
// relationship. The parent-child filter involves two queries behind
// the scenes, and Elasticsearch documentation claims it's significantly
// slower. Here we rely on the record timestamps being identical to the
// bucket timestamp.
deleteTypeByBucket(bucket, AnomalyRecord.TYPE.getPreferredName(), () -> ++deletedRecordCount);
}
private void deleteTypeByBucket(Bucket bucket, String type, LongSupplier deleteCounter) {
QueryBuilder query = QueryBuilders.termQuery(ElasticsearchMappings.ES_TIMESTAMP,
bucket.getTimestamp().getTime());
int done = 0;
boolean finished = false;
while (finished == false) {
SearchResponse searchResponse = SearchAction.INSTANCE.newRequestBuilder(client)
.setIndices(ElasticsearchPersister.getJobIndexName(jobId))
.setTypes(type)
.setQuery(query)
.addSort(SortBuilders.fieldSort(ElasticsearchMappings.ES_DOC))
.setSize(SCROLL_SIZE)
.setFrom(done)
.execute().actionGet();
for (SearchHit hit : searchResponse.getHits()) {
++done;
addDeleteRequest(hit);
deleteCounter.getAsLong();
}
if (searchResponse.getHits().getTotalHits() == done) {
finished = true;
}
}
}
private void addDeleteRequest(SearchHit hit) {
DeleteRequestBuilder deleteRequest = DeleteAction.INSTANCE.newRequestBuilder(client)
.setIndex(ElasticsearchPersister.getJobIndexName(jobId))
.setType(hit.getType())
.setId(hit.getId());
SearchHitField parentField = hit.field(ElasticsearchMappings.PARENT);
if (parentField != null) {
deleteRequest.setParent(parentField.getValue().toString());
}
bulkRequestBuilder.add(deleteRequest);
}
public void deleteBucketInfluencers(Bucket bucket) {
// Find the bucket influencers using the time stamp, relying on the
// bucket influencer timestamps being identical to the bucket timestamp.
deleteTypeByBucket(bucket, BucketInfluencer.TYPE.getPreferredName(), () -> ++deletedBucketInfluencerCount);
}
public void deleteInfluencers(Bucket bucket) {
// Find the influencers using the time stamp, relying on the influencer
// timestamps being identical to the bucket timestamp.
deleteTypeByBucket(bucket, Influencer.TYPE.getPreferredName(), () -> ++deletedInfluencerCount);
}
public void deleteBucketByTime(Bucket bucket) {
deleteTypeByBucket(bucket, Bucket.TYPE.getPreferredName(), () -> ++deletedBucketCount);
}
@Override
public void deleteInfluencer(Influencer influencer) {
String id = influencer.getId();
if (id == null) {
LOGGER.error("Cannot delete specific influencer without an ID",
// This means we get a stack trace to show where the request came from
new NullPointerException());
return;
}
bulkRequestBuilder.add(
client.prepareDelete(ElasticsearchPersister.getJobIndexName(jobId), Influencer.TYPE.getPreferredName(), id));
++deletedInfluencerCount;
}
@Override
public void deleteModelSnapshot(ModelSnapshot modelSnapshot) {
String snapshotId = modelSnapshot.getSnapshotId();
int docCount = modelSnapshot.getSnapshotDocCount();
String indexName = ElasticsearchPersister.getJobIndexName(jobId);
// Deduce the document IDs of the state documents from the information
// in the snapshot document - we cannot query the state itself as it's
// too big and has no mappings
for (int i = 0; i < docCount; ++i) {
String stateId = snapshotId + '_' + i;
bulkRequestBuilder.add(
client.prepareDelete(indexName, ModelState.TYPE, stateId));
++deletedModelStateCount;
}
bulkRequestBuilder.add(
client.prepareDelete(indexName, ModelSnapshot.TYPE.getPreferredName(), snapshotId));
++deletedModelSnapshotCount;
}
@Override
public void deleteModelDebugOutput(ModelDebugOutput modelDebugOutput) {
String id = modelDebugOutput.getId();
bulkRequestBuilder.add(
client.prepareDelete(ElasticsearchPersister.getJobIndexName(jobId), ModelDebugOutput.TYPE.getPreferredName(), id));
}
@Override
public void deleteModelSizeStats(ModelSizeStats modelSizeStats) {
bulkRequestBuilder.add(client.prepareDelete(
ElasticsearchPersister.getJobIndexName(jobId), ModelSizeStats.TYPE.getPreferredName(), modelSizeStats.getId()));
}
public void deleteInterimResults() {
QueryBuilder qb = QueryBuilders.termQuery(Bucket.IS_INTERIM.getPreferredName(), true);
SearchResponse searchResponse = client.prepareSearch(ElasticsearchPersister.getJobIndexName(jobId))
.setTypes(Bucket.TYPE.getPreferredName(), AnomalyRecord.TYPE.getPreferredName(), Influencer.TYPE.getPreferredName(),
BucketInfluencer.TYPE.getPreferredName())
.setQuery(qb)
.addSort(SortBuilders.fieldSort(ElasticsearchMappings.ES_DOC))
.setScroll(SCROLL_CONTEXT_DURATION)
.setSize(SCROLL_SIZE)
.get();
String scrollId = searchResponse.getScrollId();
long totalHits = searchResponse.getHits().totalHits();
long totalDeletedCount = 0;
while (totalDeletedCount < totalHits) {
for (SearchHit hit : searchResponse.getHits()) {
LOGGER.trace("Search hit for bucket: " + hit.toString() + ", " + hit.getId());
String type = hit.getType();
if (type.equals(Bucket.TYPE)) {
++deletedBucketCount;
} else if (type.equals(AnomalyRecord.TYPE)) {
++deletedRecordCount;
} else if (type.equals(BucketInfluencer.TYPE)) {
++deletedBucketInfluencerCount;
} else if (type.equals(Influencer.TYPE)) {
++deletedInfluencerCount;
}
++totalDeletedCount;
addDeleteRequest(hit);
}
searchResponse = client.prepareSearchScroll(scrollId).setScroll(SCROLL_CONTEXT_DURATION).get();
}
}
/**
* Commits the deletions and if {@code forceMerge} is {@code true}, it
* forces a merge which removes the data from disk.
*/
@Override
public void commit(ActionListener<BulkResponse> listener) {
if (bulkRequestBuilder.numberOfActions() == 0) {
listener.onResponse(new BulkResponse(new BulkItemResponse[0], 0L));
return;
}
if (!quiet) {
LOGGER.debug("Requesting deletion of "
+ deletedBucketCount + " buckets, "
+ deletedRecordCount + " records, "
+ deletedBucketInfluencerCount + " bucket influencers, "
+ deletedInfluencerCount + " influencers, "
+ deletedModelSnapshotCount + " model snapshots, "
+ " and "
+ deletedModelStateCount + " model state documents");
}
try {
bulkRequestBuilder.execute(listener);
} catch (Exception e) {
listener.onFailure(e);
}
}
}

View File

@ -0,0 +1,28 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.persistence;
import org.elasticsearch.client.Client;
import java.util.function.Function;
/**
* TODO This is all just silly static typing shenanigans because Guice can't inject
* anonymous lambdas. This can all be removed once Guice goes away.
*/
public class ElasticsearchBulkDeleterFactory implements Function<String, ElasticsearchBulkDeleter> {
private final Client client;
public ElasticsearchBulkDeleterFactory(Client client) {
this.client = client;
}
@Override
public ElasticsearchBulkDeleter apply(String jobId) {
return new ElasticsearchBulkDeleter(client, jobId);
}
}

View File

@ -0,0 +1,163 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.persistence;
import java.util.Locale;
import java.util.Map;
import java.util.TreeMap;
import java.util.regex.Pattern;
import org.elasticsearch.xpack.prelert.job.results.ReservedFieldNames;
/**
* Interprets field names containing dots as nested JSON structures.
* This matches what Elasticsearch does.
*/
class ElasticsearchDotNotationReverser
{
private static final char DOT = '.';
private static final Pattern DOT_PATTERN = Pattern.compile("\\.");
private final Map<String, Object> resultsMap;
public ElasticsearchDotNotationReverser()
{
resultsMap = new TreeMap<>();
}
// TODO - could handle values of all types Elasticsearch does, e.g. date,
// long, int, double, etc. However, at the moment field values in our
// results are only strings, so it's not "minimum viable product" right
// now. Hence this method only takes fieldValue as a String and there are
// no overloads.
/**
* Given a field name and value, convert it to a map representation of the
* (potentially nested) JSON structure Elasticsearch would use to store it.
* For example:
* <code>foo = x</code> goes to <code>{ "foo" : "x" }</code> and
* <code>foo.bar = y</code> goes to <code>{ "foo" : { "bar" : "y" } }</code>
*/
@SuppressWarnings("unchecked")
public void add(String fieldName, String fieldValue)
{
if (fieldName == null || fieldValue == null)
{
return;
}
// Minimise processing in the simple case of no dots in the field name.
if (fieldName.indexOf(DOT) == -1)
{
if (ReservedFieldNames.RESERVED_FIELD_NAMES.contains(fieldName))
{
return;
}
resultsMap.put(fieldName, fieldValue);
return;
}
String[] segments = DOT_PATTERN.split(fieldName);
// If any segment created by the split is a reserved word then ignore
// the whole field.
for (String segment : segments)
{
if (ReservedFieldNames.RESERVED_FIELD_NAMES.contains(segment))
{
return;
}
}
Map<String, Object> layerMap = resultsMap;
for (int i = 0; i < segments.length; ++i)
{
String segment = segments[i];
if (i == segments.length - 1)
{
layerMap.put(segment, fieldValue);
}
else
{
Object existingLayerValue = layerMap.get(segment);
if (existingLayerValue == null)
{
Map<String, Object> nextLayerMap = new TreeMap<>();
layerMap.put(segment, nextLayerMap);
layerMap = nextLayerMap;
}
else
{
if (existingLayerValue instanceof Map)
{
layerMap = (Map<String, Object>)existingLayerValue;
}
else
{
// This implies an inconsistency - different additions
// imply the same path leads to both an object and a
// value. For example:
// foo.bar = x
// foo.bar.baz = y
return;
}
}
}
}
}
public Map<String, Object> getResultsMap()
{
return resultsMap;
}
/**
* Mappings for a given hierarchical structure are more complex than the
* basic results.
*/
public Map<String, Object> getMappingsMap()
{
Map<String, Object> mappingsMap = new TreeMap<>();
recurseMappingsLevel(resultsMap, mappingsMap);
return mappingsMap;
}
@SuppressWarnings("unchecked")
private void recurseMappingsLevel(Map<String, Object> resultsMap,
Map<String, Object> mappingsMap)
{
for (Map.Entry<String, Object> entry : resultsMap.entrySet())
{
Map<String, Object> typeMap = new TreeMap<>();
String name = entry.getKey();
Object value = entry.getValue();
if (value instanceof Map)
{
Map<String, Object> propertiesMap = new TreeMap<>();
recurseMappingsLevel((Map<String, Object>)value, propertiesMap);
typeMap.put(ElasticsearchMappings.TYPE,
ElasticsearchMappings.OBJECT);
typeMap.put(ElasticsearchMappings.PROPERTIES, propertiesMap);
mappingsMap.put(name, typeMap);
}
else
{
String fieldType = value.getClass().getSimpleName().toLowerCase(Locale.ROOT);
if ("string".equals(fieldType)) {
fieldType = "keyword";
}
typeMap.put(ElasticsearchMappings.TYPE,
// Even though the add() method currently only supports
// strings, this way of getting the type would work for
// many Elasticsearch types, e.g. date, int, long,
// double and boolean
fieldType);
mappingsMap.put(name, typeMap);
}
}
}
}

View File

@ -0,0 +1,57 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.persistence;
import java.io.IOException;
import java.util.Locale;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.xpack.prelert.job.DataCounts;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
public class ElasticsearchJobDataCountsPersister implements JobDataCountsPersister {
private Client client;
private Logger logger;
public ElasticsearchJobDataCountsPersister(Client client, Logger logger) {
this.client = client;
this.logger = logger;
}
private XContentBuilder serialiseCounts(DataCounts counts) throws IOException {
XContentBuilder builder = jsonBuilder();
return counts.toXContent(builder, ToXContent.EMPTY_PARAMS);
}
@Override
public void persistDataCounts(String jobId, DataCounts counts) {
// NORELEASE - Should these stats be stored in memory?
try {
XContentBuilder content = serialiseCounts(counts);
client.prepareIndex(ElasticsearchPersister.getJobIndexName(jobId), DataCounts.TYPE.getPreferredName(),
jobId + DataCounts.DOCUMENT_SUFFIX)
.setSource(content).execute().actionGet();
}
catch (IOException ioe) {
logger.warn("Error serialising DataCounts stats", ioe);
}
catch (IndexNotFoundException e) {
String msg = String.format(Locale.ROOT, "Error writing the job '%s' status stats.", jobId);
logger.warn(msg, e);
}
}
}

View File

@ -0,0 +1,100 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.persistence;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.ModelSizeStats;
import org.elasticsearch.xpack.prelert.job.results.ReservedFieldNames;
import java.io.IOException;
import java.util.Objects;
class ElasticsearchJobDetailsMapper {
private static final Logger LOGGER = Loggers.getLogger(ElasticsearchJobDetailsMapper.class);
private final Client client;
private final ParseFieldMatcher parseFieldMatcher;
public ElasticsearchJobDetailsMapper(Client client, ParseFieldMatcher parseFieldMatcher) {
this.client = Objects.requireNonNull(client);
this.parseFieldMatcher = Objects.requireNonNull(parseFieldMatcher);
}
/**
* Maps an Elasticsearch source map to a {@link Job} object
*
* @param source The source of an Elasticsearch search response
* @return the {@code Job} object
*/
public Job map(BytesReference source) {
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
Job.Builder builder = Job.PARSER.apply(parser, () -> parseFieldMatcher);
addModelSizeStats(builder, builder.getId());
addBucketProcessingTime(builder, builder.getId());
return builder.build();
} catch (IOException e) {
throw new ElasticsearchParseException("failed to parser job", e);
}
}
private void addModelSizeStats(Job.Builder job, String jobId) {
String indexName = ElasticsearchPersister.getJobIndexName(jobId);
// Pull out the modelSizeStats document, and add this to the Job
LOGGER.trace("ES API CALL: get ID " + ModelSizeStats.TYPE +
" type " + ModelSizeStats.TYPE + " from index " + indexName);
GetResponse modelSizeStatsResponse = client.prepareGet(
indexName, ModelSizeStats.TYPE.getPreferredName(), ModelSizeStats.TYPE.getPreferredName()).get();
if (!modelSizeStatsResponse.isExists()) {
String msg = "No memory usage details for job with id " + jobId;
LOGGER.warn(msg);
} else {
// Remove the Kibana/Logstash '@timestamp' entry as stored in Elasticsearch,
// and replace using the API 'timestamp' key.
Object timestamp = modelSizeStatsResponse.getSource().remove(ElasticsearchMappings.ES_TIMESTAMP);
modelSizeStatsResponse.getSource().put(ModelSizeStats.TIMESTAMP_FIELD.getPreferredName(), timestamp);
BytesReference source = modelSizeStatsResponse.getSourceAsBytesRef();
XContentParser parser;
try {
parser = XContentFactory.xContent(source).createParser(source);
} catch (IOException e) {
throw new ElasticsearchParseException("failed to parser model size stats", e);
}
ModelSizeStats.Builder modelSizeStats = ModelSizeStats.PARSER.apply(parser, () -> parseFieldMatcher);
job.setModelSizeStats(modelSizeStats);
}
}
private void addBucketProcessingTime(Job.Builder job, String jobId) {
String indexName = ElasticsearchPersister.getJobIndexName(jobId);
// Pull out the modelSizeStats document, and add this to the Job
LOGGER.trace("ES API CALL: get ID " + ReservedFieldNames.BUCKET_PROCESSING_TIME_TYPE +
" type " + ReservedFieldNames.AVERAGE_PROCESSING_TIME_MS + " from index " + indexName);
GetResponse procTimeResponse = client.prepareGet(
indexName, ReservedFieldNames.BUCKET_PROCESSING_TIME_TYPE,
ReservedFieldNames.AVERAGE_PROCESSING_TIME_MS).get();
if (!procTimeResponse.isExists()) {
String msg = "No average bucket processing time details for job with id " + jobId;
LOGGER.warn(msg);
} else {
Object averageTime = procTimeResponse.getSource()
.get(ReservedFieldNames.AVERAGE_PROCESSING_TIME_MS);
if (averageTime instanceof Double) {
job.setAverageBucketProcessingTimeMs((Double) averageTime);
}
}
}
}

View File

@ -0,0 +1,892 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.persistence;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.xpack.prelert.job.CategorizerState;
import org.elasticsearch.xpack.prelert.job.DataCounts;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.ModelSizeStats;
import org.elasticsearch.xpack.prelert.job.ModelSnapshot;
import org.elasticsearch.xpack.prelert.job.ModelState;
import org.elasticsearch.xpack.prelert.job.audit.AuditActivity;
import org.elasticsearch.xpack.prelert.job.audit.AuditMessage;
import org.elasticsearch.xpack.prelert.job.quantiles.Quantiles;
import org.elasticsearch.xpack.prelert.job.results.AnomalyCause;
import org.elasticsearch.xpack.prelert.job.results.AnomalyRecord;
import org.elasticsearch.xpack.prelert.job.results.Bucket;
import org.elasticsearch.xpack.prelert.job.results.BucketInfluencer;
import org.elasticsearch.xpack.prelert.job.results.CategoryDefinition;
import org.elasticsearch.xpack.prelert.job.results.Influence;
import org.elasticsearch.xpack.prelert.job.results.Influencer;
import org.elasticsearch.xpack.prelert.job.results.ModelDebugOutput;
import org.elasticsearch.xpack.prelert.job.results.ReservedFieldNames;
import org.elasticsearch.xpack.prelert.job.usage.Usage;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
/**
* Static methods to create Elasticsearch mappings for the autodetect
* persisted objects/documents
* <p>
* ElasticSearch automatically recognises array types so they are
* not explicitly mapped as such. For arrays of objects the type
* must be set to <i>nested</i> so the arrays are searched properly
* see https://www.elastic.co/guide/en/elasticsearch/guide/current/nested-objects.html
* <p>
* It is expected that indexes to which these mappings are applied have their
* default analyzer set to "keyword", which does not tokenise fields. The
* index-wide default analyzer cannot be set via these mappings, so needs to be
* set in the index settings during index creation. Then the _all field has its
* analyzer set to "whitespace" by these mappings, so that _all gets tokenised
* using whitespace.
*/
public class ElasticsearchMappings {
/**
* String constants used in mappings
*/
static final String INDEX = "index";
static final String NO = "false";
static final String ALL = "_all";
static final String ENABLED = "enabled";
static final String ANALYZER = "analyzer";
static final String WHITESPACE = "whitespace";
static final String INCLUDE_IN_ALL = "include_in_all";
static final String NESTED = "nested";
static final String COPY_TO = "copy_to";
static final String PARENT = "_parent";
static final String PROPERTIES = "properties";
static final String TYPE = "type";
static final String DYNAMIC = "dynamic";
/**
* Name of the field used to store the timestamp in Elasticsearch.
* Note the field name is different to {@link org.elasticsearch.xpack.prelert.job.results.Bucket#TIMESTAMP} used by the
* API Bucket Resource, and is chosen for consistency with the default field name used by
* Logstash and Kibana.
*/
static final String ES_TIMESTAMP = "timestamp";
/**
* Name of the Elasticsearch field by which documents are sorted by default
*/
static final String ES_DOC = "_doc";
/**
* Elasticsearch data types
*/
static final String BOOLEAN = "boolean";
static final String DATE = "date";
static final String DOUBLE = "double";
static final String INTEGER = "integer";
static final String KEYWORD = "keyword";
static final String LONG = "long";
static final String OBJECT = "object";
static final String TEXT = "text";
private ElasticsearchMappings() {
}
public static XContentBuilder dataCountsMapping() throws IOException {
return jsonBuilder()
.startObject()
.startObject(DataCounts.TYPE.getPreferredName())
.startObject(ALL)
.field(ENABLED, false)
// analyzer must be specified even though _all is disabled
// because all types in the same index must have the same
// analyzer for a given field
.field(ANALYZER, WHITESPACE)
.endObject()
.startObject(PROPERTIES)
.startObject(DataCounts.PROCESSED_RECORD_COUNT.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(DataCounts.PROCESSED_FIELD_COUNT.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(DataCounts.INPUT_BYTES.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(DataCounts.INPUT_RECORD_COUNT.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(DataCounts.INPUT_FIELD_COUNT.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(DataCounts.INVALID_DATE_COUNT.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(DataCounts.MISSING_FIELD_COUNT.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(DataCounts.OUT_OF_ORDER_TIME_COUNT.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(DataCounts.LATEST_RECORD_TIME.getPreferredName())
.field(TYPE, DATE)
.endObject()
.endObject()
.endObject()
.endObject();
}
/**
* Create the Elasticsearch mapping for {@linkplain org.elasticsearch.xpack.prelert.job.results.Bucket}.
* The '_all' field is disabled as the document isn't meant to be searched.
*/
public static XContentBuilder bucketMapping() throws IOException {
return jsonBuilder()
.startObject()
.startObject(Bucket.TYPE.getPreferredName())
.startObject(ALL)
.field(ENABLED, false)
// analyzer must be specified even though _all is disabled
// because all types in the same index must have the same
// analyzer for a given field
.field(ANALYZER, WHITESPACE)
.endObject()
.startObject(PROPERTIES)
.startObject(Job.ID.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(ES_TIMESTAMP)
.field(TYPE, DATE)
.endObject()
.startObject(Bucket.ANOMALY_SCORE.getPreferredName())
.field(TYPE, DOUBLE)
.endObject()
.startObject(Bucket.INITIAL_ANOMALY_SCORE.getPreferredName())
.field(TYPE, DOUBLE)
.endObject()
.startObject(Bucket.MAX_NORMALIZED_PROBABILITY.getPreferredName())
.field(TYPE, DOUBLE)
.endObject()
.startObject(Bucket.IS_INTERIM.getPreferredName())
.field(TYPE, BOOLEAN)
.endObject()
.startObject(Bucket.RECORD_COUNT.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(Bucket.EVENT_COUNT.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(Bucket.BUCKET_SPAN.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(Bucket.PROCESSING_TIME_MS.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(Bucket.BUCKET_INFLUENCERS.getPreferredName())
.field(TYPE, NESTED)
.startObject(PROPERTIES)
.startObject(BucketInfluencer.INFLUENCER_FIELD_NAME.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(BucketInfluencer.INITIAL_ANOMALY_SCORE.getPreferredName())
.field(TYPE, DOUBLE)
.endObject()
.startObject(BucketInfluencer.ANOMALY_SCORE.getPreferredName())
.field(TYPE, DOUBLE)
.endObject()
.startObject(BucketInfluencer.RAW_ANOMALY_SCORE.getPreferredName())
.field(TYPE, DOUBLE)
.endObject()
.startObject(BucketInfluencer.PROBABILITY.getPreferredName())
.field(TYPE, DOUBLE)
.endObject()
.endObject()
.endObject()
.startObject(Bucket.PARTITION_SCORES.getPreferredName())
.field(TYPE, NESTED)
.startObject(PROPERTIES)
.startObject(AnomalyRecord.PARTITION_FIELD_NAME.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(AnomalyRecord.PARTITION_FIELD_VALUE.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(AnomalyRecord.ANOMALY_SCORE.getPreferredName())
.field(TYPE, DOUBLE)
.endObject()
.startObject(AnomalyRecord.PROBABILITY.getPreferredName())
.field(TYPE, DOUBLE)
.endObject()
.endObject()
.endObject()
.endObject()
.endObject()
.endObject();
}
/**
* Create the Elasticsearch mapping for {@linkplain org.elasticsearch.xpack.prelert.job.results.BucketInfluencer}.
*/
public static XContentBuilder bucketInfluencerMapping() throws IOException {
return jsonBuilder()
.startObject()
.startObject(BucketInfluencer.TYPE.getPreferredName())
.startObject(ALL)
.field(ENABLED, false)
// analyzer must be specified even though _all is disabled
// because all types in the same index must have the same
// analyzer for a given field
.field(ANALYZER, WHITESPACE)
.endObject()
.startObject(PROPERTIES)
.startObject(Job.ID.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(ES_TIMESTAMP)
.field(TYPE, DATE)
.endObject()
.startObject(Bucket.IS_INTERIM.getPreferredName())
.field(TYPE, BOOLEAN)
.endObject()
.startObject(BucketInfluencer.INFLUENCER_FIELD_NAME.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(BucketInfluencer.INITIAL_ANOMALY_SCORE.getPreferredName())
.field(TYPE, DOUBLE)
.endObject()
.startObject(BucketInfluencer.ANOMALY_SCORE.getPreferredName())
.field(TYPE, DOUBLE)
.endObject()
.startObject(BucketInfluencer.RAW_ANOMALY_SCORE.getPreferredName())
.field(TYPE, DOUBLE)
.endObject()
.startObject(BucketInfluencer.PROBABILITY.getPreferredName())
.field(TYPE, DOUBLE)
.endObject()
.endObject()
.endObject()
.endObject();
}
/**
* Partition normalized scores. There is one per bucket
* so the timestamp is sufficient to uniquely identify
* the document per bucket per job
* <p>
* Partition field values and scores are nested objects.
*/
public static XContentBuilder bucketPartitionMaxNormalizedScores() throws IOException {
return jsonBuilder()
.startObject()
.startObject(ReservedFieldNames.PARTITION_NORMALIZED_PROB_TYPE)
.startObject(ALL)
.field(ENABLED, false)
// analyzer must be specified even though _all is disabled
// because all types in the same index must have the same
// analyzer for a given field
.field(ANALYZER, WHITESPACE)
.endObject()
.startObject(PROPERTIES)
.startObject(Job.ID.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(ES_TIMESTAMP)
.field(TYPE, DATE)
.endObject()
.startObject(ReservedFieldNames.PARTITION_NORMALIZED_PROBS)
.field(TYPE, NESTED)
.startObject(PROPERTIES)
.startObject(AnomalyRecord.PARTITION_FIELD_VALUE.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(Bucket.MAX_NORMALIZED_PROBABILITY.getPreferredName())
.field(TYPE, DOUBLE)
.endObject()
.endObject()
.endObject()
.endObject()
.endObject()
.endObject();
}
public static XContentBuilder categorizerStateMapping() throws IOException {
return jsonBuilder()
.startObject()
.startObject(CategorizerState.TYPE)
.field(ENABLED, false)
.startObject(ALL)
.field(ENABLED, false)
// analyzer must be specified even though _all is disabled
// because all types in the same index must have the same
// analyzer for a given field
.field(ANALYZER, WHITESPACE)
.endObject()
.endObject()
.endObject();
}
public static XContentBuilder categoryDefinitionMapping() throws IOException {
return jsonBuilder()
.startObject()
.startObject(CategoryDefinition.TYPE.getPreferredName())
.startObject(ALL)
.field(ENABLED, false)
// analyzer must be specified even though _all is disabled
// because all types in the same index must have the same
// analyzer for a given field
.field(ANALYZER, WHITESPACE)
.endObject()
.startObject(PROPERTIES)
.startObject(CategoryDefinition.CATEGORY_ID.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(Job.ID.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(CategoryDefinition.TERMS.getPreferredName())
.field(TYPE, TEXT).field(INDEX, NO)
.endObject()
.startObject(CategoryDefinition.REGEX.getPreferredName())
.field(TYPE, TEXT).field(INDEX, NO)
.endObject()
.startObject(CategoryDefinition.MAX_MATCHING_LENGTH.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(CategoryDefinition.EXAMPLES.getPreferredName())
.field(TYPE, TEXT).field(INDEX, NO)
.endObject()
.endObject()
.endObject()
.endObject();
}
/**
* Records have a _parent mapping to a {@linkplain org.elasticsearch.xpack.prelert.job.results.Bucket}.
*
* @param termFieldNames Optionally, other field names to include in the
* mappings. Pass <code>null</code> if not required.
*/
public static XContentBuilder recordMapping(Collection<String> termFieldNames) throws IOException {
XContentBuilder builder = jsonBuilder()
.startObject()
.startObject(AnomalyRecord.TYPE.getPreferredName())
.startObject(ALL)
.field(ANALYZER, WHITESPACE)
.endObject()
.startObject(PROPERTIES)
.startObject(Job.ID.getPreferredName())
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(ES_TIMESTAMP)
.field(TYPE, DATE).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyRecord.DETECTOR_INDEX.getPreferredName())
.field(TYPE, INTEGER).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyRecord.ACTUAL.getPreferredName())
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyRecord.TYPICAL.getPreferredName())
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyRecord.PROBABILITY.getPreferredName())
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyRecord.FUNCTION.getPreferredName())
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyRecord.FUNCTION_DESCRIPTION.getPreferredName())
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyRecord.BY_FIELD_NAME.getPreferredName())
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyRecord.BY_FIELD_VALUE.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(AnomalyRecord.FIELD_NAME.getPreferredName())
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyRecord.PARTITION_FIELD_NAME.getPreferredName())
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyRecord.PARTITION_FIELD_VALUE.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(AnomalyRecord.OVER_FIELD_NAME.getPreferredName())
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyRecord.OVER_FIELD_VALUE.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(AnomalyRecord.CAUSES.getPreferredName())
.field(TYPE, NESTED)
.startObject(PROPERTIES)
.startObject(AnomalyCause.ACTUAL.getPreferredName())
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyCause.TYPICAL.getPreferredName())
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyCause.PROBABILITY.getPreferredName())
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyCause.FUNCTION.getPreferredName())
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyCause.FUNCTION_DESCRIPTION.getPreferredName())
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyCause.BY_FIELD_NAME.getPreferredName())
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyCause.BY_FIELD_VALUE.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(AnomalyCause.CORRELATED_BY_FIELD_VALUE.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(AnomalyCause.FIELD_NAME.getPreferredName())
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyCause.PARTITION_FIELD_NAME.getPreferredName())
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyCause.PARTITION_FIELD_VALUE.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(AnomalyCause.OVER_FIELD_NAME.getPreferredName())
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyCause.OVER_FIELD_VALUE.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.endObject()
.endObject()
.startObject(AnomalyRecord.ANOMALY_SCORE.getPreferredName())
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyRecord.NORMALIZED_PROBABILITY.getPreferredName())
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyRecord.INITIAL_NORMALIZED_PROBABILITY.getPreferredName())
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyRecord.IS_INTERIM.getPreferredName())
.field(TYPE, BOOLEAN).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(AnomalyRecord.INFLUENCERS.getPreferredName())
/* Array of influences */
.field(TYPE, NESTED)
.startObject(PROPERTIES)
.startObject(Influence.INFLUENCER_FIELD_NAME.getPreferredName())
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(Influence.INFLUENCER_FIELD_VALUES.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.endObject()
.endObject();
if (termFieldNames != null) {
ElasticsearchDotNotationReverser reverser = new ElasticsearchDotNotationReverser();
for (String fieldName : termFieldNames) {
reverser.add(fieldName, "");
}
for (Map.Entry<String, Object> entry : reverser.getMappingsMap().entrySet()) {
builder.field(entry.getKey(), entry.getValue());
}
}
return builder
.endObject()
.endObject()
.endObject();
}
/**
* Create the Elasticsearch mapping for {@linkplain Quantiles}.
* The '_all' field is disabled as the document isn't meant to be searched.
* <p>
* The quantile state string is not searchable (index = 'no') as it could be
* very large.
*/
public static XContentBuilder quantilesMapping() throws IOException {
return jsonBuilder()
.startObject()
.startObject(Quantiles.TYPE.getPreferredName())
.startObject(ALL)
.field(ENABLED, false)
// analyzer must be specified even though _all is disabled
// because all types in the same index must have the same
// analyzer for a given field
.field(ANALYZER, WHITESPACE)
.endObject()
.startObject(PROPERTIES)
.startObject(ES_TIMESTAMP)
.field(TYPE, DATE)
.endObject()
.startObject(Quantiles.QUANTILE_STATE.getPreferredName())
.field(TYPE, TEXT).field(INDEX, NO)
.endObject()
.endObject()
.endObject()
.endObject();
}
/**
* Create the Elasticsearch mapping for {@linkplain ModelState}.
* The model state could potentially be huge (over a gigabyte in size)
* so all analysis by Elasticsearch is disabled. The only way to
* retrieve the model state is by knowing the ID of a particular
* document or by searching for all documents of this type.
*/
public static XContentBuilder modelStateMapping() throws IOException {
return jsonBuilder()
.startObject()
.startObject(ModelState.TYPE)
.field(ENABLED, false)
.startObject(ALL)
.field(ENABLED, false)
// analyzer must be specified even though _all is disabled
// because all types in the same index must have the same
// analyzer for a given field
.field(ANALYZER, WHITESPACE)
.endObject()
.endObject()
.endObject();
}
/**
* Create the Elasticsearch mapping for {@linkplain ModelState}.
* The model state could potentially be huge (over a gigabyte in size)
* so all analysis by Elasticsearch is disabled. The only way to
* retrieve the model state is by knowing the ID of a particular
* document or by searching for all documents of this type.
*/
public static XContentBuilder modelSnapshotMapping() throws IOException {
return jsonBuilder()
.startObject()
.startObject(ModelSnapshot.TYPE.getPreferredName())
.startObject(ALL)
.field(ENABLED, false)
// analyzer must be specified even though _all is disabled
// because all types in the same index must have the same
// analyzer for a given field
.field(ANALYZER, WHITESPACE)
.endObject()
.startObject(PROPERTIES)
.startObject(Job.ID.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(ES_TIMESTAMP)
.field(TYPE, DATE)
.endObject()
// "description" is analyzed so that it has the same
// mapping as a user field of the same name - this means
// it doesn't have to be a reserved field name
.startObject(ModelSnapshot.DESCRIPTION.getPreferredName())
.field(TYPE, TEXT)
.endObject()
.startObject(ModelSnapshot.RESTORE_PRIORITY.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(ModelSnapshot.SNAPSHOT_ID.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(ModelSnapshot.SNAPSHOT_DOC_COUNT.getPreferredName())
.field(TYPE, INTEGER)
.endObject()
.startObject(ModelSizeStats.TYPE.getPreferredName())
.startObject(PROPERTIES)
.startObject(Job.ID.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(ModelSizeStats.MODEL_BYTES_FIELD.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(ModelSizeStats.TOTAL_BY_FIELD_COUNT_FIELD.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(ModelSizeStats.TOTAL_OVER_FIELD_COUNT_FIELD.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(ModelSizeStats.TOTAL_PARTITION_FIELD_COUNT_FIELD.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(ModelSizeStats.BUCKET_ALLOCATION_FAILURES_COUNT_FIELD.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(ModelSizeStats.MEMORY_STATUS_FIELD.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(ES_TIMESTAMP)
.field(TYPE, DATE)
.endObject()
.startObject(ModelSizeStats.LOG_TIME_FIELD.getPreferredName())
.field(TYPE, DATE)
.endObject()
.endObject()
.endObject()
.startObject(Quantiles.TYPE.getPreferredName())
.startObject(PROPERTIES)
.startObject(ES_TIMESTAMP)
.field(TYPE, DATE)
.endObject()
.startObject(Quantiles.QUANTILE_STATE.getPreferredName())
.field(TYPE, TEXT).field(INDEX, NO)
.endObject()
.endObject()
.endObject()
.startObject(ModelSnapshot.LATEST_RECORD_TIME.getPreferredName())
.field(TYPE, DATE)
.endObject()
.startObject(ModelSnapshot.LATEST_RESULT_TIME.getPreferredName())
.field(TYPE, DATE)
.endObject()
.endObject()
.endObject()
.endObject();
}
/**
* Create the Elasticsearch mapping for {@linkplain ModelSizeStats}.
*/
public static XContentBuilder modelSizeStatsMapping() throws IOException {
return jsonBuilder()
.startObject()
.startObject(ModelSizeStats.TYPE.getPreferredName())
.startObject(ALL)
.field(ENABLED, false)
// analyzer must be specified even though _all is disabled
// because all types in the same index must have the same
// analyzer for a given field
.field(ANALYZER, WHITESPACE)
.endObject()
.startObject(PROPERTIES)
.startObject(ModelSizeStats.MODEL_BYTES_FIELD.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(ModelSizeStats.TOTAL_BY_FIELD_COUNT_FIELD.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(ModelSizeStats.TOTAL_OVER_FIELD_COUNT_FIELD.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(ModelSizeStats.TOTAL_PARTITION_FIELD_COUNT_FIELD.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(ModelSizeStats.BUCKET_ALLOCATION_FAILURES_COUNT_FIELD.getPreferredName())
.field(TYPE, LONG)
.endObject()
.startObject(ModelSizeStats.MEMORY_STATUS_FIELD.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(ES_TIMESTAMP)
.field(TYPE, DATE)
.endObject()
.startObject(ModelSizeStats.LOG_TIME_FIELD.getPreferredName())
.field(TYPE, DATE)
.endObject()
.endObject()
.endObject()
.endObject();
}
/**
* Mapping for model debug output
*
* @param termFieldNames Optionally, other field names to include in the
* mappings. Pass <code>null</code> if not required.
*/
public static XContentBuilder modelDebugOutputMapping(Collection<String> termFieldNames) throws IOException {
XContentBuilder builder = jsonBuilder()
.startObject()
.startObject(ModelDebugOutput.TYPE.getPreferredName())
.startObject(ALL)
.field(ANALYZER, WHITESPACE)
.endObject()
.startObject(PROPERTIES)
.startObject(Job.ID.getPreferredName())
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(ES_TIMESTAMP)
.field(TYPE, DATE).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(ModelDebugOutput.PARTITION_FIELD_VALUE.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(ModelDebugOutput.OVER_FIELD_VALUE.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(ModelDebugOutput.BY_FIELD_VALUE.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(ModelDebugOutput.DEBUG_FEATURE.getPreferredName())
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(ModelDebugOutput.DEBUG_LOWER.getPreferredName())
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(ModelDebugOutput.DEBUG_UPPER.getPreferredName())
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(ModelDebugOutput.DEBUG_MEDIAN.getPreferredName())
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(ModelDebugOutput.ACTUAL.getPreferredName())
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
.endObject();
if (termFieldNames != null) {
ElasticsearchDotNotationReverser reverser = new ElasticsearchDotNotationReverser();
for (String fieldName : termFieldNames) {
reverser.add(fieldName, "");
}
for (Map.Entry<String, Object> entry : reverser.getMappingsMap().entrySet()) {
builder.field(entry.getKey(), entry.getValue());
}
}
return builder
.endObject()
.endObject()
.endObject();
}
/**
* Influence results mapping
*
* @param influencerFieldNames Optionally, other field names to include in the
* mappings. Pass <code>null</code> if not required.
*/
public static XContentBuilder influencerMapping(Collection<String> influencerFieldNames) throws IOException {
XContentBuilder builder = jsonBuilder()
.startObject()
.startObject(Influencer.TYPE.getPreferredName())
.startObject(ALL)
.field(ANALYZER, WHITESPACE)
.endObject()
.startObject(PROPERTIES)
.startObject(Job.ID.getPreferredName())
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(ES_TIMESTAMP)
.field(TYPE, DATE).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(Influencer.PROBABILITY.getPreferredName())
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(Influencer.INITIAL_ANOMALY_SCORE.getPreferredName())
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(Influencer.ANOMALY_SCORE.getPreferredName())
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(Influencer.INFLUENCER_FIELD_NAME.getPreferredName())
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
.endObject()
.startObject(Influencer.INFLUENCER_FIELD_VALUE.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(Bucket.IS_INTERIM.getPreferredName())
.field(TYPE, BOOLEAN).field(INCLUDE_IN_ALL, false)
.endObject();
if (influencerFieldNames != null) {
ElasticsearchDotNotationReverser reverser = new ElasticsearchDotNotationReverser();
for (String fieldName : influencerFieldNames) {
reverser.add(fieldName, "");
}
for (Map.Entry<String, Object> entry : reverser.getMappingsMap().entrySet()) {
builder.field(entry.getKey(), entry.getValue());
}
}
return builder
.endObject()
.endObject()
.endObject();
}
/**
* The Elasticsearch mappings for the usage documents
*/
public static XContentBuilder usageMapping() throws IOException {
return jsonBuilder()
.startObject()
.startObject(Usage.TYPE)
.startObject(ALL)
.field(ENABLED, false)
// analyzer must be specified even though _all is disabled
// because all types in the same index must have the same
// analyzer for a given field
.field(ANALYZER, WHITESPACE)
.endObject()
.startObject(PROPERTIES)
.startObject(ES_TIMESTAMP)
.field(TYPE, DATE)
.endObject()
.startObject(Usage.INPUT_BYTES)
.field(TYPE, LONG)
.endObject()
.startObject(Usage.INPUT_FIELD_COUNT)
.field(TYPE, LONG)
.endObject()
.startObject(Usage.INPUT_RECORD_COUNT)
.field(TYPE, LONG)
.endObject()
.endObject()
.endObject()
.endObject();
}
public static XContentBuilder auditMessageMapping() throws IOException {
return jsonBuilder()
.startObject()
.startObject(AuditMessage.TYPE.getPreferredName())
.startObject(PROPERTIES)
.startObject(ES_TIMESTAMP)
.field(TYPE, DATE)
.endObject()
.endObject()
.endObject()
.endObject();
}
public static XContentBuilder auditActivityMapping() throws IOException {
return jsonBuilder()
.startObject()
.startObject(AuditActivity.TYPE.getPreferredName())
.startObject(PROPERTIES)
.startObject(ES_TIMESTAMP)
.field(TYPE, DATE)
.endObject()
.endObject()
.endObject()
.endObject();
}
public static XContentBuilder processingTimeMapping() throws IOException {
return jsonBuilder()
.startObject()
.startObject(ReservedFieldNames.BUCKET_PROCESSING_TIME_TYPE)
.startObject(ALL)
.field(ENABLED, false)
// analyzer must be specified even though _all is disabled
// because all types in the same index must have the same
// analyzer for a given field
.field(ANALYZER, WHITESPACE)
.endObject()
.startObject(PROPERTIES)
.startObject(ReservedFieldNames.AVERAGE_PROCESSING_TIME_MS)
.field(TYPE, DOUBLE)
.endObject()
.endObject()
.endObject()
.endObject();
}
}

View File

@ -0,0 +1,558 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.persistence;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.xpack.prelert.job.Job;
import org.elasticsearch.xpack.prelert.job.ModelSizeStats;
import org.elasticsearch.xpack.prelert.job.ModelSnapshot;
import org.elasticsearch.xpack.prelert.job.quantiles.Quantiles;
import org.elasticsearch.xpack.prelert.job.results.AnomalyRecord;
import org.elasticsearch.xpack.prelert.job.results.Bucket;
import org.elasticsearch.xpack.prelert.job.results.BucketInfluencer;
import org.elasticsearch.xpack.prelert.job.results.CategoryDefinition;
import org.elasticsearch.xpack.prelert.job.results.Influencer;
import org.elasticsearch.xpack.prelert.job.results.ModelDebugOutput;
import org.elasticsearch.xpack.prelert.job.results.ReservedFieldNames;
import java.io.IOException;
import java.util.Date;
import java.util.List;
import java.util.Map.Entry;
import java.util.function.Supplier;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
/**
* Saves result Buckets and Quantiles to Elasticsearch<br>
*
* <b>Buckets</b> are written with the following structure:
* <h2>Bucket</h2> The results of each job are stored in buckets, this is the
* top level structure for the results. A bucket contains multiple anomaly
* records. The anomaly score of the bucket may not match the summed score of
* all the records as all the records may not have been outputted for the
* bucket.
* <h2>Anomaly Record</h2> In Elasticsearch records have a parent &lt;-&lt;
* child relationship with buckets and should only exist is relation to a parent
* bucket. Each record was generated by a detector which can be identified via
* the detectorIndex field.
* <h2>Detector</h2> The Job has a fixed number of detectors but there may not
* be output for every detector in each bucket. <br>
* <b>Quantiles</b> may contain model quantiles used in normalisation and are
* stored in documents of type {@link Quantiles#TYPE} <br>
* <h2>ModelSizeStats</h2> This is stored in a flat structure <br>
*
* @see org.elasticsearch.xpack.prelert.job.persistence.ElasticsearchMappings
*/
public class ElasticsearchPersister implements JobResultsPersister, JobRenormaliser
{
private static final Logger LOGGER = Loggers.getLogger(ElasticsearchPersister.class);
public static final String INDEX_PREFIX = "prelertresults-";
public static String getJobIndexName(String jobId) {
return INDEX_PREFIX + jobId;
}
private final Client client;
// TODO norelease: remove this field, the job id can be interred from most of method's paramters here and for cases
// where there are now parameters we can supply the job id. This way we don't have to create an instance of the class
// per job
private final String jobId;
/**
* Create with the Elasticsearch client. Data will be written to
* the index <code>jobId</code>
*
* @param jobId The job Id/Elasticsearch index
* @param client The Elasticsearch client
*/
public ElasticsearchPersister(String jobId, Client client)
{
this.jobId = jobId;
this.client = client;
}
@Override
public void persistBucket(Bucket bucket)
{
if (bucket.getRecords() == null)
{
return;
}
try
{
XContentBuilder content = serialiseWithJobId(Bucket.TYPE.getPreferredName(), bucket);
String indexName = getJobIndexName(jobId);
LOGGER.trace("ES API CALL: index type " + Bucket.TYPE +
" to index " + indexName + " at epoch " + bucket.getEpoch());
IndexResponse response = client.prepareIndex(indexName, Bucket.TYPE.getPreferredName())
.setSource(content)
.execute().actionGet();
bucket.setId(response.getId());
persistBucketInfluencersStandalone(bucket.getId(), bucket.getBucketInfluencers(),
bucket.getTimestamp(), bucket.isInterim());
if (bucket.getInfluencers() != null && bucket.getInfluencers().isEmpty() == false)
{
BulkRequestBuilder addInfluencersRequest = client.prepareBulk();
for (Influencer influencer : bucket.getInfluencers())
{
influencer.setTimestamp(bucket.getTimestamp());
influencer.setInterim(bucket.isInterim());
content = serialiseWithJobId(Influencer.TYPE.getPreferredName(), influencer);
LOGGER.trace("ES BULK ACTION: index type " + Influencer.TYPE +
" to index " + indexName + " with auto-generated ID");
addInfluencersRequest.add(
client.prepareIndex(indexName, Influencer.TYPE.getPreferredName())
.setSource(content));
}
LOGGER.trace("ES API CALL: bulk request with " + addInfluencersRequest.numberOfActions() + " actions");
BulkResponse addInfluencersResponse = addInfluencersRequest.execute().actionGet();
if (addInfluencersResponse.hasFailures())
{
LOGGER.error("Bulk index of Influencers has errors: "
+ addInfluencersResponse.buildFailureMessage());
}
}
if (bucket.getRecords().isEmpty() == false)
{
BulkRequestBuilder addRecordsRequest = client.prepareBulk();
for (AnomalyRecord record : bucket.getRecords())
{
record.setTimestamp(bucket.getTimestamp());
content = serialiseWithJobId(AnomalyRecord.TYPE.getPreferredName(), record);
LOGGER.trace("ES BULK ACTION: index type " + AnomalyRecord.TYPE +
" to index " + indexName + " with auto-generated ID, for bucket "
+ bucket.getId());
addRecordsRequest.add(client.prepareIndex(indexName, AnomalyRecord.TYPE.getPreferredName())
.setSource(content)
.setParent(bucket.getId()));
}
LOGGER.trace("ES API CALL: bulk request with " + addRecordsRequest.numberOfActions() + " actions");
BulkResponse addRecordsResponse = addRecordsRequest.execute().actionGet();
if (addRecordsResponse.hasFailures())
{
LOGGER.error("Bulk index of AnomalyRecord has errors: "
+ addRecordsResponse.buildFailureMessage());
}
}
persistPerPartitionMaxProbabilities(bucket);
}
catch (IOException e)
{
LOGGER.error("Error writing bucket state", e);
}
}
private void persistPerPartitionMaxProbabilities(Bucket bucket)
{
if (bucket.getPerPartitionMaxProbability().isEmpty())
{
return;
}
try
{
XContentBuilder builder = jsonBuilder();
builder.startObject()
.field(ElasticsearchMappings.ES_TIMESTAMP, bucket.getTimestamp())
.field(Job.ID.getPreferredName(), jobId);
builder.startArray(ReservedFieldNames.PARTITION_NORMALIZED_PROBS);
for (Entry<String, Double> entry : bucket.getPerPartitionMaxProbability().entrySet())
{
builder.startObject()
.field(AnomalyRecord.PARTITION_FIELD_VALUE.getPreferredName(), entry.getKey())
.field(Bucket.MAX_NORMALIZED_PROBABILITY.getPreferredName(), entry.getValue())
.endObject();
}
builder.endArray().endObject();
String indexName = getJobIndexName(jobId);
LOGGER.trace("ES API CALL: index type " + ReservedFieldNames.PARTITION_NORMALIZED_PROB_TYPE +
" to index " + indexName + " at epoch " + bucket.getEpoch());
client.prepareIndex(indexName, ReservedFieldNames.PARTITION_NORMALIZED_PROB_TYPE)
.setSource(builder)
.setId(bucket.getId())
.execute().actionGet();
}
catch (IOException e)
{
LOGGER.error("Error updating bucket per partition max normalized scores", e);
return;
}
}
@Override
public void persistCategoryDefinition(CategoryDefinition category)
{
Persistable persistable = new Persistable(category, () -> CategoryDefinition.TYPE.getPreferredName(),
() -> String.valueOf(category.getCategoryId()),
() -> serialiseCategoryDefinition(category));
persistable.persist();
// Don't commit as we expect masses of these updates and they're not
// read again by this process
}
/**
* The quantiles objects are written with a fixed ID, so that the
* latest quantiles will overwrite the previous ones. For each ES index,
* which corresponds to a job, there can only be one quantiles document.
* @param quantiles If <code>null</code> then returns straight away.
*/
@Override
public void persistQuantiles(Quantiles quantiles)
{
Persistable persistable = new Persistable(quantiles, () -> Quantiles.TYPE.getPreferredName(),
() -> Quantiles.QUANTILES_ID, () -> serialiseWithJobId(Quantiles.TYPE.getPreferredName(), quantiles));
if (persistable.persist())
{
// Refresh the index when persisting quantiles so that previously
// persisted results will be available for searching. Do this using the
// indices API rather than the index API (used to write the quantiles
// above), because this will refresh all shards rather than just the
// shard that the quantiles document itself was written to.
commitWrites();
}
}
/**
* Write a model snapshot description to Elasticsearch. Note that this is
* only the description - the actual model state is persisted separately.
* @param modelSnapshot If <code>null</code> then returns straight away.
*/
@Override
public void persistModelSnapshot(ModelSnapshot modelSnapshot)
{
Persistable persistable = new Persistable(modelSnapshot, () -> ModelSnapshot.TYPE.getPreferredName(),
() -> modelSnapshot.getSnapshotId(), () -> serialiseWithJobId(ModelSnapshot.TYPE.getPreferredName(), modelSnapshot));
persistable.persist();
}
/**
* Persist the memory usage data
* @param modelSizeStats If <code>null</code> then returns straight away.
*/
@Override
public void persistModelSizeStats(ModelSizeStats modelSizeStats)
{
LOGGER.trace("Persisting model size stats, for size " + modelSizeStats.getModelBytes());
Persistable persistable = new Persistable(modelSizeStats, () -> ModelSizeStats.TYPE.getPreferredName(),
() -> modelSizeStats.getId(),
() -> serialiseWithJobId(ModelSizeStats.TYPE.getPreferredName(), modelSizeStats));
persistable.persist();
persistable = new Persistable(modelSizeStats, () -> ModelSizeStats.TYPE.getPreferredName(),
() -> null,
() -> serialiseWithJobId(ModelSizeStats.TYPE.getPreferredName(), modelSizeStats));
persistable.persist();
// Don't commit as we expect masses of these updates and they're only
// for information at the API level
}
/**
* Persist model debug output
* @param modelDebugOutput If <code>null</code> then returns straight away.
*/
@Override
public void persistModelDebugOutput(ModelDebugOutput modelDebugOutput)
{
Persistable persistable = new Persistable(modelDebugOutput, () -> ModelDebugOutput.TYPE.getPreferredName(),
() -> null, () -> serialiseWithJobId(ModelDebugOutput.TYPE.getPreferredName(), modelDebugOutput));
persistable.persist();
// Don't commit as we expect masses of these updates and they're not
// read again by this process
}
@Override
public void persistInfluencer(Influencer influencer)
{
Persistable persistable = new Persistable(influencer, () -> Influencer.TYPE.getPreferredName(),
() -> influencer.getId(), () -> serialiseWithJobId(Influencer.TYPE.getPreferredName(), influencer));
persistable.persist();
// Don't commit as we expect masses of these updates and they're not
// read again by this process
}
@Override
public void persistBulkState(BytesReference bytesRef) {
try {
// No validation - assume the native process has formatted the state correctly
byte[] bytes = bytesRef.toBytesRef().bytes;
LOGGER.trace("ES API CALL: bulk index");
client.prepareBulk()
.add(bytes, 0, bytes.length)
.execute().actionGet();
} catch (Exception e) {
LOGGER.error("Error persisting bulk state", e);
}
}
/**
* Refreshes the Elasticsearch index.
* Blocks until results are searchable.
*/
@Override
public boolean commitWrites()
{
String indexName = getJobIndexName(jobId);
// Refresh should wait for Lucene to make the data searchable
LOGGER.trace("ES API CALL: refresh index " + indexName);
client.admin().indices().refresh(new RefreshRequest(indexName)).actionGet();
return true;
}
@Override
public void updateBucket(Bucket bucket)
{
try
{
String indexName = getJobIndexName(jobId);
LOGGER.trace("ES API CALL: index type " + Bucket.TYPE +
" to index " + indexName + " with ID " + bucket.getId());
client.prepareIndex(indexName, Bucket.TYPE.getPreferredName(), bucket.getId())
.setSource(serialiseWithJobId(Bucket.TYPE.getPreferredName(), bucket)).execute().actionGet();
}
catch (IOException e)
{
LOGGER.error("Error updating bucket state", e);
return;
}
// If the update to the bucket was successful, also update the
// standalone copies of the nested bucket influencers
try
{
persistBucketInfluencersStandalone(bucket.getId(), bucket.getBucketInfluencers(),
bucket.getTimestamp(), bucket.isInterim());
}
catch (IOException e)
{
LOGGER.error("Error updating standalone bucket influencer state", e);
return;
}
persistPerPartitionMaxProbabilities(bucket);
}
private void persistBucketInfluencersStandalone(String bucketId, List<BucketInfluencer> bucketInfluencers,
Date bucketTime, boolean isInterim) throws IOException
{
if (bucketInfluencers != null && bucketInfluencers.isEmpty() == false)
{
BulkRequestBuilder addBucketInfluencersRequest = client.prepareBulk();
for (BucketInfluencer bucketInfluencer : bucketInfluencers)
{
XContentBuilder content = serialiseBucketInfluencerStandalone(bucketInfluencer,
bucketTime, isInterim);
// Need consistent IDs to ensure overwriting on renormalisation
String id = bucketId + bucketInfluencer.getInfluencerFieldName();
String indexName = getJobIndexName(jobId);
LOGGER.trace("ES BULK ACTION: index type " + BucketInfluencer.TYPE +
" to index " + indexName + " with ID " + id);
addBucketInfluencersRequest.add(
client.prepareIndex(indexName, BucketInfluencer.TYPE.getPreferredName(), id)
.setSource(content));
}
LOGGER.trace("ES API CALL: bulk request with " + addBucketInfluencersRequest.numberOfActions() + " actions");
BulkResponse addBucketInfluencersResponse = addBucketInfluencersRequest.execute().actionGet();
if (addBucketInfluencersResponse.hasFailures())
{
LOGGER.error("Bulk index of Bucket Influencers has errors: "
+ addBucketInfluencersResponse.buildFailureMessage());
}
}
}
@Override
public void updateRecords(String bucketId, List<AnomalyRecord> records)
{
try
{
// Now bulk update the records within the bucket
BulkRequestBuilder bulkRequest = client.prepareBulk();
boolean addedAny = false;
for (AnomalyRecord record : records)
{
String recordId = record.getId();
String indexName = getJobIndexName(jobId);
LOGGER.trace("ES BULK ACTION: update ID " + recordId + " type " + AnomalyRecord.TYPE +
" in index " + indexName + " using map of new values, for bucket " +
bucketId);
bulkRequest.add(
client.prepareIndex(indexName, AnomalyRecord.TYPE.getPreferredName(), recordId)
.setSource(serialiseWithJobId(AnomalyRecord.TYPE.getPreferredName(), record))
// Need to specify the parent ID when updating a child
.setParent(bucketId));
addedAny = true;
}
if (addedAny)
{
LOGGER.trace("ES API CALL: bulk request with " +
bulkRequest.numberOfActions() + " actions");
BulkResponse bulkResponse = bulkRequest.execute().actionGet();
if (bulkResponse.hasFailures())
{
LOGGER.error("BulkResponse has errors: " + bulkResponse.buildFailureMessage());
}
}
}
catch (IOException | ElasticsearchException e)
{
LOGGER.error("Error updating anomaly records", e);
}
}
@Override
public void updateInfluencer(Influencer influencer)
{
persistInfluencer(influencer);
}
@Override
public void deleteInterimResults()
{
ElasticsearchBulkDeleter deleter = new ElasticsearchBulkDeleter(client, jobId, true);
deleter.deleteInterimResults();
// NOCOMMIT This is called from AutodetectResultsParser, feels wrong...
deleter.commit(new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse bulkResponse) {
// don't care?
}
@Override
public void onFailure(Exception e) {
// don't care?
}
});
}
private interface Serialiser
{
XContentBuilder serialise() throws IOException;
}
private class Persistable
{
private final Object object;
private final Supplier<String> typeSupplier;
private final Supplier<String> idSupplier;
private final Serialiser serialiser;
Persistable(Object object, Supplier<String> typeSupplier, Supplier<String> idSupplier,
Serialiser serialiser)
{
this.object = object;
this.typeSupplier = typeSupplier;
this.idSupplier = idSupplier;
this.serialiser = serialiser;
}
boolean persist()
{
String type = typeSupplier.get();
String id = idSupplier.get();
if (object == null)
{
LOGGER.warn("No " + type + " to persist for job " + jobId);
return false;
}
logCall(type, id);
try
{
String indexName = getJobIndexName(jobId);
client.prepareIndex(indexName, type, idSupplier.get())
.setSource(serialiser.serialise())
.execute().actionGet();
return true;
}
catch (IOException e)
{
LOGGER.error("Error writing " + typeSupplier.get(), e);
return false;
}
}
private void logCall(String type, String id)
{
String indexName = getJobIndexName(jobId);
String msg = "ES API CALL: index type " + type + " to index " + indexName;
if (id != null)
{
msg += " with ID " + idSupplier.get();
}
else
{
msg += " with auto-generated ID";
}
LOGGER.trace(msg);
}
}
private XContentBuilder serialiseWithJobId(String objField, ToXContent obj) throws IOException
{
XContentBuilder builder = jsonBuilder();
obj.toXContent(builder, ToXContent.EMPTY_PARAMS);
return builder;
}
private XContentBuilder serialiseCategoryDefinition(CategoryDefinition categoryDefinition)
throws IOException
{
XContentBuilder builder = jsonBuilder();
categoryDefinition.toXContent(builder, ToXContent.EMPTY_PARAMS);
return builder;
}
private XContentBuilder serialiseBucketInfluencerStandalone(BucketInfluencer bucketInfluencer,
Date bucketTime, boolean isInterim) throws IOException
{
BucketInfluencer influencer = new BucketInfluencer(bucketInfluencer);
influencer.setIsInterim(isInterim);
influencer.setTimestamp(bucketTime);
XContentBuilder builder = jsonBuilder();
influencer.toXContent(builder, ToXContent.EMPTY_PARAMS);
return builder;
}
}

View File

@ -0,0 +1,125 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.persistence;
import org.elasticsearch.client.Client;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType;
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
import java.util.HashMap;
import java.util.Map;
/**
* Create methods for the custom scripts that are run on Elasticsearch
*/
public final class ElasticsearchScripts {
private static final String PAINLESS = "painless";
// Script names
private static final String UPDATE_AVERAGE_PROCESSING_TIME = "ctx._source.averageProcessingTimeMs = ctx._source.averageProcessingTimeMs"
+ " * 0.9 + params.timeMs * 0.1";
private static final String UPDATE_BUCKET_COUNT = "ctx._source.counts.bucketCount += params.count";
private static final String UPDATE_USAGE = "ctx._source.inputBytes += params.bytes;ctx._source.inputFieldCount += params.fieldCount;"
+ "ctx._source.inputRecordCount += params.recordCount;";
// Script parameters
private static final String COUNT_PARAM = "count";
private static final String BYTES_PARAM = "bytes";
private static final String FIELD_COUNT_PARAM = "fieldCount";
private static final String RECORD_COUNT_PARAM = "recordCount";
private static final String PROCESSING_TIME_PARAM = "timeMs";
public static final int UPDATE_JOB_RETRY_COUNT = 3;
private ElasticsearchScripts()
{
// Do nothing
}
public static Script newUpdateBucketCount(long count)
{
Map<String, Object> scriptParams = new HashMap<>();
scriptParams.put(COUNT_PARAM, count);
return new Script(ScriptType.INLINE, PAINLESS, UPDATE_BUCKET_COUNT, scriptParams);
}
public static Script newUpdateUsage(long additionalBytes, long additionalFields,
long additionalRecords)
{
Map<String, Object> scriptParams = new HashMap<>();
scriptParams.put(BYTES_PARAM, additionalBytes);
scriptParams.put(FIELD_COUNT_PARAM, additionalFields);
scriptParams.put(RECORD_COUNT_PARAM, additionalRecords);
return new Script(ScriptType.INLINE, PAINLESS, UPDATE_USAGE, scriptParams);
}
public static Script updateProcessingTime(Long processingTimeMs)
{
Map<String, Object> scriptParams = new HashMap<>();
scriptParams.put(PROCESSING_TIME_PARAM, processingTimeMs);
return new Script(ScriptType.INLINE, PAINLESS, UPDATE_AVERAGE_PROCESSING_TIME, scriptParams);
}
/**
* Updates the specified document via executing a script
*
* @param client
* the Elasticsearch client
* @param index
* the index
* @param type
* the document type
* @param docId
* the document id
* @param script
* the script the performs the update
* @return {@code} true if successful, {@code} false otherwise
*/
public static boolean updateViaScript(Client client, String index, String type, String docId, Script script) {
try {
client.prepareUpdate(index, type, docId)
.setScript(script)
.setRetryOnConflict(UPDATE_JOB_RETRY_COUNT).get();
} catch (IndexNotFoundException e) {
throw ExceptionsHelper.missingJobException(index);
}
return true;
}
/**
* Upserts the specified document via executing a script
*
* @param client
* the Elasticsearch client
* @param index
* the index
* @param type
* the document type
* @param docId
* the document id
* @param script
* the script the performs the update
* @param upsertMap
* the doc source of the update request to be used when the
* document does not exists
* @return {@code} true if successful, {@code} false otherwise
*/
public static boolean upsertViaScript(Client client, String index, String type, String docId, Script script,
Map<String, Object> upsertMap) {
try {
client.prepareUpdate(index, type, docId)
.setScript(script)
.setUpsert(upsertMap)
.setRetryOnConflict(UPDATE_JOB_RETRY_COUNT).get();
} catch (IndexNotFoundException e) {
throw ExceptionsHelper.missingJobException(index);
}
return true;
}
}

View File

@ -0,0 +1,86 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.persistence;
import static org.elasticsearch.xpack.prelert.job.persistence.ElasticsearchJobProvider.PRELERT_USAGE_INDEX;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.time.temporal.ChronoUnit;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.client.Client;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.xpack.prelert.job.usage.Usage;
public class ElasticsearchUsagePersister implements UsagePersister {
private static final String USAGE_DOC_ID_PREFIX = "usage-";
private final Client client;
private final Logger logger;
private final DateTimeFormatter dateTimeFormatter;
private final Map<String, Object> upsertMap;
private String docId;
public ElasticsearchUsagePersister(Client client, Logger logger) {
this.client = client;
this.logger = logger;
dateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ssXX", Locale.ROOT);
upsertMap = new HashMap<>();
upsertMap.put(ElasticsearchMappings.ES_TIMESTAMP, "");
upsertMap.put(Usage.INPUT_BYTES, null);
}
@Override
public void persistUsage(String jobId, long bytesRead, long fieldsRead, long recordsRead) {
ZonedDateTime nowTruncatedToHour = ZonedDateTime.now().truncatedTo(ChronoUnit.HOURS);
String formattedNowTruncatedToHour = nowTruncatedToHour.format(dateTimeFormatter);
docId = USAGE_DOC_ID_PREFIX + formattedNowTruncatedToHour;
upsertMap.put(ElasticsearchMappings.ES_TIMESTAMP, formattedNowTruncatedToHour);
// update global count
updateDocument(PRELERT_USAGE_INDEX, docId, bytesRead, fieldsRead, recordsRead);
updateDocument(ElasticsearchPersister.getJobIndexName(jobId), docId, bytesRead,
fieldsRead, recordsRead);
}
/**
* Update the metering document in the given index/id.
* Uses a script to update the volume field and 'upsert'
* to create the doc if it doesn't exist.
*
* @param index the index to persist to
* @param id Doc id is also its timestamp
* @param additionalBytes Add this value to the running total
* @param additionalFields Add this value to the running total
* @param additionalRecords Add this value to the running total
*/
private void updateDocument(String index, String id, long additionalBytes, long additionalFields, long additionalRecords) {
upsertMap.put(Usage.INPUT_BYTES, additionalBytes);
upsertMap.put(Usage.INPUT_FIELD_COUNT, additionalFields);
upsertMap.put(Usage.INPUT_RECORD_COUNT, additionalRecords);
logger.trace("ES API CALL: upsert ID " + id +
" type " + Usage.TYPE + " in index " + index +
" by running Groovy script update-usage with arguments bytes=" + additionalBytes +
" fieldCount=" + additionalFields + " recordCount=" + additionalRecords);
try {
ElasticsearchScripts.upsertViaScript(client, index, Usage.TYPE, id,
ElasticsearchScripts.newUpdateUsage(additionalBytes, additionalFields,
additionalRecords),
upsertMap);
} catch (VersionConflictEngineException e) {
logger.error("Failed to update the Usage document [" + id +"] in index [" + index + "]", e);
}
}
}

View File

@ -0,0 +1,168 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.persistence;
import org.elasticsearch.xpack.prelert.job.results.Influencer;
import java.util.Objects;
/**
* One time query builder for buckets.
* <ul>
* <li>From- Skip the first N Buckets. This parameter is for paging if not
* required set to 0. Default = 0</li>
* <li>Size- Take only this number of Buckets. Default =
* {@value DEFAULT_SIZE}</li>
* <li>Expand- Include anomaly records. Default= false</li>
* <li>IncludeInterim- Include interim results. Default = false</li>
* <li>anomalyScoreThreshold- Return only buckets with an anomalyScore &gt;=
* this value. Default = 0.0</li>
* <li>normalizedProbabilityThreshold- Return only buckets with a
* maxNormalizedProbability &gt;= this value. Default = 0.0</li>
* <li>epochStart- The start bucket time. A bucket with this timestamp will be
* included in the results. If 0 all buckets up to <code>endEpochMs</code> are
* returned. Default = -1</li>
* <li>epochEnd- The end bucket timestamp buckets up to but NOT including this
* timestamp are returned. If 0 all buckets from <code>startEpochMs</code> are
* returned. Default = -1</li>
* <li>partitionValue Set the bucket's max normalised probability to this
* partition field value's max normalised probability. Default = null</li>
* </ul>
*/
public final class InfluencersQueryBuilder {
public static final int DEFAULT_SIZE = 100;
private InfluencersQuery influencersQuery = new InfluencersQuery();
public InfluencersQueryBuilder from(int from) {
influencersQuery.from = from;
return this;
}
public InfluencersQueryBuilder size(int size) {
influencersQuery.size = size;
return this;
}
public InfluencersQueryBuilder includeInterim(boolean include) {
influencersQuery.includeInterim = include;
return this;
}
public InfluencersQueryBuilder anomalyScoreThreshold(Double anomalyScoreFilter) {
influencersQuery.anomalyScoreFilter = anomalyScoreFilter;
return this;
}
public InfluencersQueryBuilder sortField(String sortField) {
influencersQuery.sortField = sortField;
return this;
}
public InfluencersQueryBuilder sortDescending(boolean sortDescending) {
influencersQuery.sortDescending = sortDescending;
return this;
}
/**
* If startTime &gt;= 0 the parameter is not set
*/
public InfluencersQueryBuilder epochStart(String startTime) {
influencersQuery.epochStart = startTime;
return this;
}
/**
* If endTime &gt;= 0 the parameter is not set
*/
public InfluencersQueryBuilder epochEnd(String endTime) {
influencersQuery.epochEnd = endTime;
return this;
}
public InfluencersQueryBuilder.InfluencersQuery build() {
return influencersQuery;
}
public void clear() {
influencersQuery = new InfluencersQueryBuilder.InfluencersQuery();
}
public class InfluencersQuery {
private int from = 0;
private int size = DEFAULT_SIZE;
private boolean includeInterim = false;
private double anomalyScoreFilter = 0.0d;
private String epochStart;
private String epochEnd;
private String sortField = Influencer.ANOMALY_SCORE.getPreferredName();
private boolean sortDescending = false;
public int getFrom() {
return from;
}
public int getSize() {
return size;
}
public boolean isIncludeInterim() {
return includeInterim;
}
public double getAnomalyScoreFilter() {
return anomalyScoreFilter;
}
public String getEpochStart() {
return epochStart;
}
public String getEpochEnd() {
return epochEnd;
}
public String getSortField() {
return sortField;
}
public boolean isSortDescending() {
return sortDescending;
}
@Override
public int hashCode() {
return Objects.hash(from, size, includeInterim, anomalyScoreFilter, epochStart, epochEnd,
sortField, sortDescending);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
InfluencersQuery other = (InfluencersQuery) obj;
return Objects.equals(from, other.from) &&
Objects.equals(size, other.size) &&
Objects.equals(includeInterim, other.includeInterim) &&
Objects.equals(epochStart, other.epochStart) &&
Objects.equals(epochStart, other.epochStart) &&
Objects.equals(anomalyScoreFilter, other.anomalyScoreFilter) &&
Objects.equals(sortField, other.sortField) &&
this.sortDescending == other.sortDescending;
}
}
}

View File

@ -0,0 +1,23 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.prelert.job.persistence;
import org.elasticsearch.xpack.prelert.job.DataCounts;
/**
* Update a job's dataCounts
* i.e. the number of processed records, fields etc.
*/
public interface JobDataCountsPersister
{
/**
* Update the job's data counts stats and figures.
*
* @param jobId Job to update
* @param counts The counts
*/
void persistDataCounts(String jobId, DataCounts counts);
}

Some files were not shown because too many files have changed in this diff Show More