Detype Results (elastic/elasticsearch#384)
* Add result_type field to bucket * Query and delete buckets/records by result_type * Add a filter to the ElasticsearchBatchedDocumentsIterator subclasses for result_type:bucket * De-type Influencers, BucketInfluencers and Category Definitions * Revert de-typing CategoryDefinition * Resolve merge errors after rebase Original commit: elastic/x-pack-elasticsearch@65605432e8
This commit is contained in:
parent
37cd03ad4d
commit
02a94ce729
|
@ -364,8 +364,7 @@ extends Action<RevertModelSnapshotAction.Request, RevertModelSnapshotAction.Resp
|
||||||
ModelSnapshot modelSnapshot = revertCandidates.get(0);
|
ModelSnapshot modelSnapshot = revertCandidates.get(0);
|
||||||
|
|
||||||
// The quantiles can be large, and totally dominate the output -
|
// The quantiles can be large, and totally dominate the output -
|
||||||
// it's
|
// it's clearer to remove them
|
||||||
// clearer to remove them
|
|
||||||
modelSnapshot.setQuantiles(null);
|
modelSnapshot.setQuantiles(null);
|
||||||
return modelSnapshot;
|
return modelSnapshot;
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,16 +17,10 @@ import java.io.IOException;
|
||||||
|
|
||||||
import org.elasticsearch.xpack.prelert.job.results.Bucket;
|
import org.elasticsearch.xpack.prelert.job.results.Bucket;
|
||||||
|
|
||||||
class ElasticsearchBatchedBucketsIterator extends ElasticsearchBatchedDocumentsIterator<Bucket> {
|
class ElasticsearchBatchedBucketsIterator extends ElasticsearchBatchedResultsIterator<Bucket> {
|
||||||
|
|
||||||
public ElasticsearchBatchedBucketsIterator(Client client, String jobId, ParseFieldMatcher parserFieldMatcher) {
|
public ElasticsearchBatchedBucketsIterator(Client client, String jobId, ParseFieldMatcher parseFieldMatcher) {
|
||||||
super(client, JobResultsPersister.getJobIndexName(jobId), parserFieldMatcher);
|
super(client, jobId, Bucket.RESULT_TYPE_VALUE, parseFieldMatcher);
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected String getType()
|
|
||||||
{
|
|
||||||
return Bucket.TYPE.getPreferredName();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -10,6 +10,7 @@ import org.elasticsearch.action.search.SearchResponse;
|
||||||
import org.elasticsearch.client.Client;
|
import org.elasticsearch.client.Client;
|
||||||
import org.elasticsearch.common.ParseFieldMatcher;
|
import org.elasticsearch.common.ParseFieldMatcher;
|
||||||
import org.elasticsearch.common.logging.Loggers;
|
import org.elasticsearch.common.logging.Loggers;
|
||||||
|
import org.elasticsearch.index.query.QueryBuilder;
|
||||||
import org.elasticsearch.search.SearchHit;
|
import org.elasticsearch.search.SearchHit;
|
||||||
import org.elasticsearch.search.sort.SortBuilders;
|
import org.elasticsearch.search.sort.SortBuilders;
|
||||||
|
|
||||||
|
@ -45,6 +46,18 @@ abstract class ElasticsearchBatchedDocumentsIterator<T> implements BatchedDocume
|
||||||
isScrollInitialised = false;
|
isScrollInitialised = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected ElasticsearchBatchedDocumentsIterator(Client client, String index, ParseFieldMatcher parseFieldMatcher,
|
||||||
|
QueryBuilder queryBuilder) {
|
||||||
|
this.parseFieldMatcher = parseFieldMatcher;
|
||||||
|
this.client = Objects.requireNonNull(client);
|
||||||
|
this.index = Objects.requireNonNull(index);
|
||||||
|
this.parseFieldMatcher = Objects.requireNonNull(parseFieldMatcher);
|
||||||
|
totalHits = 0;
|
||||||
|
count = 0;
|
||||||
|
filterBuilder = new ResultsFilterBuilder(queryBuilder);
|
||||||
|
isScrollInitialised = false;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public BatchedDocumentsIterator<T> timeRange(long startEpochMs, long endEpochMs) {
|
public BatchedDocumentsIterator<T> timeRange(long startEpochMs, long endEpochMs) {
|
||||||
filterBuilder.timeRange(ElasticsearchMappings.ES_TIMESTAMP, startEpochMs, endEpochMs);
|
filterBuilder.timeRange(ElasticsearchMappings.ES_TIMESTAMP, startEpochMs, endEpochMs);
|
||||||
|
@ -74,7 +87,7 @@ abstract class ElasticsearchBatchedDocumentsIterator<T> implements BatchedDocume
|
||||||
}
|
}
|
||||||
|
|
||||||
private SearchResponse initScroll() {
|
private SearchResponse initScroll() {
|
||||||
LOGGER.trace("ES API CALL: search all of type " + getType() + " from index " + index);
|
LOGGER.trace("ES API CALL: search all of type {} from index {}", getType(), index);
|
||||||
|
|
||||||
isScrollInitialised = true;
|
isScrollInitialised = true;
|
||||||
|
|
||||||
|
|
|
@ -17,18 +17,12 @@ import java.io.IOException;
|
||||||
|
|
||||||
import org.elasticsearch.xpack.prelert.job.results.Influencer;
|
import org.elasticsearch.xpack.prelert.job.results.Influencer;
|
||||||
|
|
||||||
class ElasticsearchBatchedInfluencersIterator extends ElasticsearchBatchedDocumentsIterator<Influencer>
|
class ElasticsearchBatchedInfluencersIterator extends ElasticsearchBatchedResultsIterator<Influencer>
|
||||||
{
|
{
|
||||||
public ElasticsearchBatchedInfluencersIterator(Client client, String jobId,
|
public ElasticsearchBatchedInfluencersIterator(Client client, String jobId,
|
||||||
ParseFieldMatcher parserFieldMatcher)
|
ParseFieldMatcher parserFieldMatcher)
|
||||||
{
|
{
|
||||||
super(client, JobResultsPersister.getJobIndexName(jobId), parserFieldMatcher);
|
super(client, jobId, Influencer.RESULT_TYPE_VALUE, parserFieldMatcher);
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected String getType()
|
|
||||||
{
|
|
||||||
return Influencer.TYPE.getPreferredName();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -0,0 +1,24 @@
|
||||||
|
/*
|
||||||
|
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||||
|
* or more contributor license agreements. Licensed under the Elastic License;
|
||||||
|
* you may not use this file except in compliance with the Elastic License.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.xpack.prelert.job.persistence;
|
||||||
|
|
||||||
|
import org.elasticsearch.client.Client;
|
||||||
|
import org.elasticsearch.common.ParseFieldMatcher;
|
||||||
|
import org.elasticsearch.index.query.TermsQueryBuilder;
|
||||||
|
import org.elasticsearch.xpack.prelert.job.results.Result;
|
||||||
|
|
||||||
|
abstract class ElasticsearchBatchedResultsIterator<T> extends ElasticsearchBatchedDocumentsIterator<T> {
|
||||||
|
|
||||||
|
public ElasticsearchBatchedResultsIterator(Client client, String jobId, String resultType, ParseFieldMatcher parseFieldMatcher) {
|
||||||
|
super(client, JobResultsPersister.getJobIndexName(jobId), parseFieldMatcher,
|
||||||
|
new TermsQueryBuilder(Result.RESULT_TYPE.getPreferredName(), resultType));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected String getType() {
|
||||||
|
return Result.TYPE.getPreferredName();
|
||||||
|
}
|
||||||
|
}
|
|
@ -16,10 +16,11 @@ import org.elasticsearch.action.search.SearchAction;
|
||||||
import org.elasticsearch.action.search.SearchResponse;
|
import org.elasticsearch.action.search.SearchResponse;
|
||||||
import org.elasticsearch.client.Client;
|
import org.elasticsearch.client.Client;
|
||||||
import org.elasticsearch.common.logging.Loggers;
|
import org.elasticsearch.common.logging.Loggers;
|
||||||
|
import org.elasticsearch.index.query.BoolQueryBuilder;
|
||||||
import org.elasticsearch.index.query.QueryBuilder;
|
import org.elasticsearch.index.query.QueryBuilder;
|
||||||
import org.elasticsearch.index.query.QueryBuilders;
|
import org.elasticsearch.index.query.QueryBuilders;
|
||||||
|
import org.elasticsearch.index.query.TermsQueryBuilder;
|
||||||
import org.elasticsearch.search.SearchHit;
|
import org.elasticsearch.search.SearchHit;
|
||||||
import org.elasticsearch.search.SearchHitField;
|
|
||||||
import org.elasticsearch.search.sort.SortBuilders;
|
import org.elasticsearch.search.sort.SortBuilders;
|
||||||
import org.elasticsearch.xpack.prelert.job.ModelSizeStats;
|
import org.elasticsearch.xpack.prelert.job.ModelSizeStats;
|
||||||
import org.elasticsearch.xpack.prelert.job.ModelSnapshot;
|
import org.elasticsearch.xpack.prelert.job.ModelSnapshot;
|
||||||
|
@ -29,6 +30,7 @@ import org.elasticsearch.xpack.prelert.job.results.Bucket;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.BucketInfluencer;
|
import org.elasticsearch.xpack.prelert.job.results.BucketInfluencer;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.Influencer;
|
import org.elasticsearch.xpack.prelert.job.results.Influencer;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.ModelDebugOutput;
|
import org.elasticsearch.xpack.prelert.job.results.ModelDebugOutput;
|
||||||
|
import org.elasticsearch.xpack.prelert.job.results.Result;
|
||||||
|
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
import java.util.function.LongSupplier;
|
import java.util.function.LongSupplier;
|
||||||
|
@ -72,7 +74,7 @@ public class ElasticsearchBulkDeleter implements JobDataDeleter {
|
||||||
deleteRecords(bucket);
|
deleteRecords(bucket);
|
||||||
deleteBucketInfluencers(bucket);
|
deleteBucketInfluencers(bucket);
|
||||||
bulkRequestBuilder.add(
|
bulkRequestBuilder.add(
|
||||||
client.prepareDelete(JobResultsPersister.getJobIndexName(jobId), Bucket.TYPE.getPreferredName(), bucket.getId()));
|
client.prepareDelete(JobResultsPersister.getJobIndexName(jobId), Result.TYPE.getPreferredName(), bucket.getId()));
|
||||||
++deletedBucketCount;
|
++deletedBucketCount;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -83,20 +85,22 @@ public class ElasticsearchBulkDeleter implements JobDataDeleter {
|
||||||
// the scenes, and Elasticsearch documentation claims it's significantly
|
// the scenes, and Elasticsearch documentation claims it's significantly
|
||||||
// slower. Here we rely on the record timestamps being identical to the
|
// slower. Here we rely on the record timestamps being identical to the
|
||||||
// bucket timestamp.
|
// bucket timestamp.
|
||||||
deleteTypeByBucket(bucket, AnomalyRecord.TYPE.getPreferredName(), () -> ++deletedRecordCount);
|
deleteResultTypeByBucket(bucket, AnomalyRecord.RESULT_TYPE_VALUE, () -> ++deletedRecordCount);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void deleteTypeByBucket(Bucket bucket, String type, LongSupplier deleteCounter) {
|
private void deleteResultTypeByBucket(Bucket bucket, String resultType, LongSupplier deleteCounter) {
|
||||||
QueryBuilder query = QueryBuilders.termQuery(ElasticsearchMappings.ES_TIMESTAMP,
|
QueryBuilder timeQuery = QueryBuilders.termQuery(ElasticsearchMappings.ES_TIMESTAMP, bucket.getTimestamp().getTime());
|
||||||
bucket.getTimestamp().getTime());
|
QueryBuilder boolQuery = new BoolQueryBuilder()
|
||||||
|
.filter(timeQuery)
|
||||||
|
.filter(new TermsQueryBuilder(Result.RESULT_TYPE.getPreferredName(), resultType));
|
||||||
|
|
||||||
int done = 0;
|
int done = 0;
|
||||||
boolean finished = false;
|
boolean finished = false;
|
||||||
while (finished == false) {
|
while (finished == false) {
|
||||||
SearchResponse searchResponse = SearchAction.INSTANCE.newRequestBuilder(client)
|
SearchResponse searchResponse = SearchAction.INSTANCE.newRequestBuilder(client)
|
||||||
.setIndices(JobResultsPersister.getJobIndexName(jobId))
|
.setIndices(JobResultsPersister.getJobIndexName(jobId))
|
||||||
.setTypes(type)
|
.setTypes(Result.TYPE.getPreferredName())
|
||||||
.setQuery(query)
|
.setQuery(boolQuery)
|
||||||
.addSort(SortBuilders.fieldSort(ElasticsearchMappings.ES_DOC))
|
.addSort(SortBuilders.fieldSort(ElasticsearchMappings.ES_DOC))
|
||||||
.setSize(SCROLL_SIZE)
|
.setSize(SCROLL_SIZE)
|
||||||
.setFrom(done)
|
.setFrom(done)
|
||||||
|
@ -124,17 +128,17 @@ public class ElasticsearchBulkDeleter implements JobDataDeleter {
|
||||||
public void deleteBucketInfluencers(Bucket bucket) {
|
public void deleteBucketInfluencers(Bucket bucket) {
|
||||||
// Find the bucket influencers using the time stamp, relying on the
|
// Find the bucket influencers using the time stamp, relying on the
|
||||||
// bucket influencer timestamps being identical to the bucket timestamp.
|
// bucket influencer timestamps being identical to the bucket timestamp.
|
||||||
deleteTypeByBucket(bucket, BucketInfluencer.TYPE.getPreferredName(), () -> ++deletedBucketInfluencerCount);
|
deleteResultTypeByBucket(bucket, BucketInfluencer.RESULT_TYPE_VALUE, () -> ++deletedBucketInfluencerCount);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void deleteInfluencers(Bucket bucket) {
|
public void deleteInfluencers(Bucket bucket) {
|
||||||
// Find the influencers using the time stamp, relying on the influencer
|
// Find the influencers using the time stamp, relying on the influencer
|
||||||
// timestamps being identical to the bucket timestamp.
|
// timestamps being identical to the bucket timestamp.
|
||||||
deleteTypeByBucket(bucket, Influencer.TYPE.getPreferredName(), () -> ++deletedInfluencerCount);
|
deleteResultTypeByBucket(bucket, Influencer.RESULT_TYPE_VALUE, () -> ++deletedInfluencerCount);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void deleteBucketByTime(Bucket bucket) {
|
public void deleteBucketByTime(Bucket bucket) {
|
||||||
deleteTypeByBucket(bucket, Bucket.TYPE.getPreferredName(), () -> ++deletedBucketCount);
|
deleteResultTypeByBucket(bucket, Bucket.RESULT_TYPE_VALUE, () -> ++deletedBucketCount);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -147,7 +151,7 @@ public class ElasticsearchBulkDeleter implements JobDataDeleter {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
bulkRequestBuilder.add(
|
bulkRequestBuilder.add(
|
||||||
client.prepareDelete(JobResultsPersister.getJobIndexName(jobId), Influencer.TYPE.getPreferredName(), id));
|
client.prepareDelete(JobResultsPersister.getJobIndexName(jobId), Result.TYPE.getPreferredName(), id));
|
||||||
++deletedInfluencerCount;
|
++deletedInfluencerCount;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -188,8 +192,7 @@ public class ElasticsearchBulkDeleter implements JobDataDeleter {
|
||||||
QueryBuilder qb = QueryBuilders.termQuery(Bucket.IS_INTERIM.getPreferredName(), true);
|
QueryBuilder qb = QueryBuilders.termQuery(Bucket.IS_INTERIM.getPreferredName(), true);
|
||||||
|
|
||||||
SearchResponse searchResponse = client.prepareSearch(JobResultsPersister.getJobIndexName(jobId))
|
SearchResponse searchResponse = client.prepareSearch(JobResultsPersister.getJobIndexName(jobId))
|
||||||
.setTypes(Bucket.TYPE.getPreferredName(), AnomalyRecord.TYPE.getPreferredName(), Influencer.TYPE.getPreferredName(),
|
.setTypes(Result.RESULT_TYPE.getPreferredName())
|
||||||
BucketInfluencer.TYPE.getPreferredName())
|
|
||||||
.setQuery(qb)
|
.setQuery(qb)
|
||||||
.addSort(SortBuilders.fieldSort(ElasticsearchMappings.ES_DOC))
|
.addSort(SortBuilders.fieldSort(ElasticsearchMappings.ES_DOC))
|
||||||
.setScroll(SCROLL_CONTEXT_DURATION)
|
.setScroll(SCROLL_CONTEXT_DURATION)
|
||||||
|
@ -201,15 +204,15 @@ public class ElasticsearchBulkDeleter implements JobDataDeleter {
|
||||||
long totalDeletedCount = 0;
|
long totalDeletedCount = 0;
|
||||||
while (totalDeletedCount < totalHits) {
|
while (totalDeletedCount < totalHits) {
|
||||||
for (SearchHit hit : searchResponse.getHits()) {
|
for (SearchHit hit : searchResponse.getHits()) {
|
||||||
LOGGER.trace("Search hit for bucket: " + hit.toString() + ", " + hit.getId());
|
LOGGER.trace("Search hit for bucket: {}, {}", hit.toString(), hit.getId());
|
||||||
String type = hit.getType();
|
String type = (String) hit.getSource().get(Result.RESULT_TYPE.getPreferredName());
|
||||||
if (type.equals(Bucket.TYPE)) {
|
if (Bucket.RESULT_TYPE_VALUE.equals(type)) {
|
||||||
++deletedBucketCount;
|
++deletedBucketCount;
|
||||||
} else if (type.equals(AnomalyRecord.TYPE)) {
|
} else if (AnomalyRecord.RESULT_TYPE_VALUE.equals(type)) {
|
||||||
++deletedRecordCount;
|
++deletedRecordCount;
|
||||||
} else if (type.equals(BucketInfluencer.TYPE)) {
|
} else if (BucketInfluencer.RESULT_TYPE_VALUE.equals(type)) {
|
||||||
++deletedBucketInfluencerCount;
|
++deletedBucketInfluencerCount;
|
||||||
} else if (type.equals(Influencer.TYPE)) {
|
} else if (Influencer.RESULT_TYPE_VALUE.equals(type)) {
|
||||||
++deletedInfluencerCount;
|
++deletedInfluencerCount;
|
||||||
}
|
}
|
||||||
++totalDeletedCount;
|
++totalDeletedCount;
|
||||||
|
|
|
@ -57,11 +57,11 @@ import org.elasticsearch.xpack.prelert.job.persistence.InfluencersQueryBuilder.I
|
||||||
import org.elasticsearch.xpack.prelert.job.quantiles.Quantiles;
|
import org.elasticsearch.xpack.prelert.job.quantiles.Quantiles;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.AnomalyRecord;
|
import org.elasticsearch.xpack.prelert.job.results.AnomalyRecord;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.Bucket;
|
import org.elasticsearch.xpack.prelert.job.results.Bucket;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.BucketInfluencer;
|
|
||||||
import org.elasticsearch.xpack.prelert.job.results.CategoryDefinition;
|
import org.elasticsearch.xpack.prelert.job.results.CategoryDefinition;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.Influencer;
|
import org.elasticsearch.xpack.prelert.job.results.Influencer;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.ModelDebugOutput;
|
import org.elasticsearch.xpack.prelert.job.results.ModelDebugOutput;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.ReservedFieldNames;
|
import org.elasticsearch.xpack.prelert.job.results.ReservedFieldNames;
|
||||||
|
import org.elasticsearch.xpack.prelert.job.results.Result;
|
||||||
import org.elasticsearch.xpack.prelert.job.usage.Usage;
|
import org.elasticsearch.xpack.prelert.job.usage.Usage;
|
||||||
import org.elasticsearch.xpack.prelert.lists.ListDocument;
|
import org.elasticsearch.xpack.prelert.lists.ListDocument;
|
||||||
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
|
import org.elasticsearch.xpack.prelert.utils.ExceptionsHelper;
|
||||||
|
@ -160,7 +160,7 @@ public class ElasticsearchJobProvider implements JobProvider
|
||||||
*/
|
*/
|
||||||
private void createUsageMeteringIndex() {
|
private void createUsageMeteringIndex() {
|
||||||
try {
|
try {
|
||||||
LOGGER.trace("ES API CALL: index exists? " + PRELERT_USAGE_INDEX);
|
LOGGER.trace("ES API CALL: index exists? {}", PRELERT_USAGE_INDEX);
|
||||||
boolean indexExists = client.admin().indices()
|
boolean indexExists = client.admin().indices()
|
||||||
.exists(new IndicesExistsRequest(PRELERT_USAGE_INDEX))
|
.exists(new IndicesExistsRequest(PRELERT_USAGE_INDEX))
|
||||||
.get().isExists();
|
.get().isExists();
|
||||||
|
@ -170,12 +170,12 @@ public class ElasticsearchJobProvider implements JobProvider
|
||||||
|
|
||||||
XContentBuilder usageMapping = ElasticsearchMappings.usageMapping();
|
XContentBuilder usageMapping = ElasticsearchMappings.usageMapping();
|
||||||
|
|
||||||
LOGGER.trace("ES API CALL: create index " + PRELERT_USAGE_INDEX);
|
LOGGER.trace("ES API CALL: create index {}", PRELERT_USAGE_INDEX);
|
||||||
client.admin().indices().prepareCreate(PRELERT_USAGE_INDEX)
|
client.admin().indices().prepareCreate(PRELERT_USAGE_INDEX)
|
||||||
.setSettings(prelertIndexSettings())
|
.setSettings(prelertIndexSettings())
|
||||||
.addMapping(Usage.TYPE, usageMapping)
|
.addMapping(Usage.TYPE, usageMapping)
|
||||||
.get();
|
.get();
|
||||||
LOGGER.trace("ES API CALL: wait for yellow status " + PRELERT_USAGE_INDEX);
|
LOGGER.trace("ES API CALL: wait for yellow status {}", PRELERT_USAGE_INDEX);
|
||||||
client.admin().cluster().prepareHealth(PRELERT_USAGE_INDEX).setWaitForYellowStatus().execute().actionGet();
|
client.admin().cluster().prepareHealth(PRELERT_USAGE_INDEX).setWaitForYellowStatus().execute().actionGet();
|
||||||
}
|
}
|
||||||
} catch (InterruptedException | ExecutionException | IOException e) {
|
} catch (InterruptedException | ExecutionException | IOException e) {
|
||||||
|
@ -217,38 +217,31 @@ public class ElasticsearchJobProvider implements JobProvider
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void createJobRelatedIndices(Job job, ActionListener<Boolean> listener) {
|
public void createJobRelatedIndices(Job job, ActionListener<Boolean> listener) {
|
||||||
Collection<String> termFields = (job.getAnalysisConfig() != null) ? job.getAnalysisConfig().termFields() : null;
|
Collection<String> termFields = (job.getAnalysisConfig() != null) ? job.getAnalysisConfig().termFields() : Collections.emptyList();
|
||||||
Collection<String> influencers = (job.getAnalysisConfig() != null) ? job.getAnalysisConfig().getInfluencers() : null;
|
|
||||||
try {
|
try {
|
||||||
XContentBuilder bucketMapping = ElasticsearchMappings.bucketMapping();
|
XContentBuilder resultsMapping = ElasticsearchMappings.resultsMapping(termFields);
|
||||||
XContentBuilder bucketInfluencerMapping = ElasticsearchMappings.bucketInfluencerMapping();
|
|
||||||
XContentBuilder categorizerStateMapping = ElasticsearchMappings.categorizerStateMapping();
|
XContentBuilder categorizerStateMapping = ElasticsearchMappings.categorizerStateMapping();
|
||||||
XContentBuilder categoryDefinitionMapping = ElasticsearchMappings.categoryDefinitionMapping();
|
XContentBuilder categoryDefinitionMapping = ElasticsearchMappings.categoryDefinitionMapping();
|
||||||
XContentBuilder recordMapping = ElasticsearchMappings.recordMapping(termFields);
|
|
||||||
XContentBuilder quantilesMapping = ElasticsearchMappings.quantilesMapping();
|
XContentBuilder quantilesMapping = ElasticsearchMappings.quantilesMapping();
|
||||||
XContentBuilder modelStateMapping = ElasticsearchMappings.modelStateMapping();
|
XContentBuilder modelStateMapping = ElasticsearchMappings.modelStateMapping();
|
||||||
XContentBuilder modelSnapshotMapping = ElasticsearchMappings.modelSnapshotMapping();
|
XContentBuilder modelSnapshotMapping = ElasticsearchMappings.modelSnapshotMapping();
|
||||||
XContentBuilder modelSizeStatsMapping = ElasticsearchMappings.modelSizeStatsMapping();
|
XContentBuilder modelSizeStatsMapping = ElasticsearchMappings.modelSizeStatsMapping();
|
||||||
XContentBuilder influencerMapping = ElasticsearchMappings.influencerMapping(influencers);
|
|
||||||
XContentBuilder modelDebugMapping = ElasticsearchMappings.modelDebugOutputMapping(termFields);
|
XContentBuilder modelDebugMapping = ElasticsearchMappings.modelDebugOutputMapping(termFields);
|
||||||
XContentBuilder processingTimeMapping = ElasticsearchMappings.processingTimeMapping();
|
XContentBuilder processingTimeMapping = ElasticsearchMappings.processingTimeMapping();
|
||||||
XContentBuilder partitionScoreMapping = ElasticsearchMappings.bucketPartitionMaxNormalizedScores();
|
XContentBuilder partitionScoreMapping = ElasticsearchMappings.bucketPartitionMaxNormalizedScores();
|
||||||
XContentBuilder dataCountsMapping = ElasticsearchMappings.dataCountsMapping();
|
XContentBuilder dataCountsMapping = ElasticsearchMappings.dataCountsMapping();
|
||||||
|
|
||||||
String jobId = job.getId();
|
String jobId = job.getId();
|
||||||
LOGGER.trace("ES API CALL: create index " + job.getId());
|
LOGGER.trace("ES API CALL: create index {}", job.getId());
|
||||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest(JobResultsPersister.getJobIndexName(jobId));
|
CreateIndexRequest createIndexRequest = new CreateIndexRequest(JobResultsPersister.getJobIndexName(jobId));
|
||||||
createIndexRequest.settings(prelertIndexSettings());
|
createIndexRequest.settings(prelertIndexSettings());
|
||||||
createIndexRequest.mapping(Bucket.TYPE.getPreferredName(), bucketMapping);
|
createIndexRequest.mapping(Result.TYPE.getPreferredName(), resultsMapping);
|
||||||
createIndexRequest.mapping(BucketInfluencer.TYPE.getPreferredName(), bucketInfluencerMapping);
|
|
||||||
createIndexRequest.mapping(CategorizerState.TYPE, categorizerStateMapping);
|
createIndexRequest.mapping(CategorizerState.TYPE, categorizerStateMapping);
|
||||||
createIndexRequest.mapping(CategoryDefinition.TYPE.getPreferredName(), categoryDefinitionMapping);
|
createIndexRequest.mapping(CategoryDefinition.TYPE.getPreferredName(), categoryDefinitionMapping);
|
||||||
createIndexRequest.mapping(AnomalyRecord.TYPE.getPreferredName(), recordMapping);
|
|
||||||
createIndexRequest.mapping(Quantiles.TYPE.getPreferredName(), quantilesMapping);
|
createIndexRequest.mapping(Quantiles.TYPE.getPreferredName(), quantilesMapping);
|
||||||
createIndexRequest.mapping(ModelState.TYPE, modelStateMapping);
|
createIndexRequest.mapping(ModelState.TYPE, modelStateMapping);
|
||||||
createIndexRequest.mapping(ModelSnapshot.TYPE.getPreferredName(), modelSnapshotMapping);
|
createIndexRequest.mapping(ModelSnapshot.TYPE.getPreferredName(), modelSnapshotMapping);
|
||||||
createIndexRequest.mapping(ModelSizeStats.TYPE.getPreferredName(), modelSizeStatsMapping);
|
createIndexRequest.mapping(ModelSizeStats.TYPE.getPreferredName(), modelSizeStatsMapping);
|
||||||
createIndexRequest.mapping(Influencer.TYPE.getPreferredName(), influencerMapping);
|
|
||||||
createIndexRequest.mapping(ModelDebugOutput.TYPE.getPreferredName(), modelDebugMapping);
|
createIndexRequest.mapping(ModelDebugOutput.TYPE.getPreferredName(), modelDebugMapping);
|
||||||
createIndexRequest.mapping(ReservedFieldNames.BUCKET_PROCESSING_TIME_TYPE, processingTimeMapping);
|
createIndexRequest.mapping(ReservedFieldNames.BUCKET_PROCESSING_TIME_TYPE, processingTimeMapping);
|
||||||
createIndexRequest.mapping(ReservedFieldNames.PARTITION_NORMALIZED_PROB_TYPE, partitionScoreMapping);
|
createIndexRequest.mapping(ReservedFieldNames.PARTITION_NORMALIZED_PROB_TYPE, partitionScoreMapping);
|
||||||
|
@ -273,7 +266,7 @@ public class ElasticsearchJobProvider implements JobProvider
|
||||||
@Override
|
@Override
|
||||||
public void deleteJobRelatedIndices(String jobId, ActionListener<DeleteJobAction.Response> listener) {
|
public void deleteJobRelatedIndices(String jobId, ActionListener<DeleteJobAction.Response> listener) {
|
||||||
String indexName = JobResultsPersister.getJobIndexName(jobId);
|
String indexName = JobResultsPersister.getJobIndexName(jobId);
|
||||||
LOGGER.trace("ES API CALL: delete index " + indexName);
|
LOGGER.trace("ES API CALL: delete index {}", indexName);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indexName);
|
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indexName);
|
||||||
|
@ -330,6 +323,7 @@ public class ElasticsearchJobProvider implements JobProvider
|
||||||
|
|
||||||
SortBuilder<?> sortBuilder = new FieldSortBuilder(esSortField(query.getSortField()))
|
SortBuilder<?> sortBuilder = new FieldSortBuilder(esSortField(query.getSortField()))
|
||||||
.order(query.isSortDescending() ? SortOrder.DESC : SortOrder.ASC);
|
.order(query.isSortDescending() ? SortOrder.DESC : SortOrder.ASC);
|
||||||
|
|
||||||
QueryPage<Bucket> buckets = buckets(jobId, query.isIncludeInterim(), query.getFrom(), query.getSize(), fb, sortBuilder);
|
QueryPage<Bucket> buckets = buckets(jobId, query.isIncludeInterim(), query.getFrom(), query.getSize(), fb, sortBuilder);
|
||||||
|
|
||||||
if (Strings.isNullOrEmpty(query.getPartitionValue())) {
|
if (Strings.isNullOrEmpty(query.getPartitionValue())) {
|
||||||
|
@ -351,7 +345,6 @@ public class ElasticsearchJobProvider implements JobProvider
|
||||||
|
|
||||||
b.setAnomalyScore(b.partitionAnomalyScore(query.getPartitionValue()));
|
b.setAnomalyScore(b.partitionAnomalyScore(query.getPartitionValue()));
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return buckets;
|
return buckets;
|
||||||
|
@ -376,16 +369,21 @@ public class ElasticsearchJobProvider implements JobProvider
|
||||||
|
|
||||||
private QueryPage<Bucket> buckets(String jobId, boolean includeInterim, int from, int size,
|
private QueryPage<Bucket> buckets(String jobId, boolean includeInterim, int from, int size,
|
||||||
QueryBuilder fb, SortBuilder<?> sb) throws ResourceNotFoundException {
|
QueryBuilder fb, SortBuilder<?> sb) throws ResourceNotFoundException {
|
||||||
|
|
||||||
|
QueryBuilder boolQuery = new BoolQueryBuilder()
|
||||||
|
.filter(fb)
|
||||||
|
.filter(new TermsQueryBuilder(Result.RESULT_TYPE.getPreferredName(), Bucket.RESULT_TYPE_VALUE));
|
||||||
|
|
||||||
SearchResponse searchResponse;
|
SearchResponse searchResponse;
|
||||||
try {
|
try {
|
||||||
String indexName = JobResultsPersister.getJobIndexName(jobId);
|
String indexName = JobResultsPersister.getJobIndexName(jobId);
|
||||||
LOGGER.trace("ES API CALL: search all of type " + Bucket.TYPE +
|
LOGGER.trace("ES API CALL: search all of result type {} from index {} with filter from {} size {}",
|
||||||
" from index " + indexName + " sort ascending " + ElasticsearchMappings.ES_TIMESTAMP +
|
Bucket.RESULT_TYPE_VALUE, indexName, from, size);
|
||||||
" with filter after sort from " + from + " size " + size);
|
|
||||||
searchResponse = client.prepareSearch(indexName)
|
searchResponse = client.prepareSearch(indexName)
|
||||||
.setTypes(Bucket.TYPE.getPreferredName())
|
.setTypes(Result.TYPE.getPreferredName())
|
||||||
.addSort(sb)
|
.addSort(sb)
|
||||||
.setQuery(new ConstantScoreQueryBuilder(fb))
|
.setQuery(new ConstantScoreQueryBuilder(boolQuery))
|
||||||
.setFrom(from).setSize(size)
|
.setFrom(from).setSize(size)
|
||||||
.get();
|
.get();
|
||||||
} catch (IndexNotFoundException e) {
|
} catch (IndexNotFoundException e) {
|
||||||
|
@ -419,14 +417,16 @@ public class ElasticsearchJobProvider implements JobProvider
|
||||||
String indexName = JobResultsPersister.getJobIndexName(jobId);
|
String indexName = JobResultsPersister.getJobIndexName(jobId);
|
||||||
SearchHits hits;
|
SearchHits hits;
|
||||||
try {
|
try {
|
||||||
LOGGER.trace("ES API CALL: get Bucket with timestamp " + query.getTimestamp() +
|
LOGGER.trace("ES API CALL: get Bucket with timestamp {} from index {}", query.getTimestamp(), indexName);
|
||||||
" from index " + indexName);
|
QueryBuilder matchQuery = QueryBuilders.matchQuery(ElasticsearchMappings.ES_TIMESTAMP, query.getTimestamp());
|
||||||
QueryBuilder qb = QueryBuilders.matchQuery(ElasticsearchMappings.ES_TIMESTAMP,
|
|
||||||
query.getTimestamp());
|
QueryBuilder boolQuery = new BoolQueryBuilder()
|
||||||
|
.filter(matchQuery)
|
||||||
|
.filter(new TermsQueryBuilder(Result.RESULT_TYPE.getPreferredName(), Bucket.RESULT_TYPE_VALUE));
|
||||||
|
|
||||||
SearchResponse searchResponse = client.prepareSearch(indexName)
|
SearchResponse searchResponse = client.prepareSearch(indexName)
|
||||||
.setTypes(Bucket.TYPE.getPreferredName())
|
.setTypes(Result.TYPE.getPreferredName())
|
||||||
.setQuery(qb)
|
.setQuery(boolQuery)
|
||||||
.addSort(SortBuilders.fieldSort(ElasticsearchMappings.ES_DOC))
|
.addSort(SortBuilders.fieldSort(ElasticsearchMappings.ES_DOC))
|
||||||
.get();
|
.get();
|
||||||
hits = searchResponse.getHits();
|
hits = searchResponse.getHits();
|
||||||
|
@ -505,7 +505,7 @@ public class ElasticsearchJobProvider implements JobProvider
|
||||||
String indexName = JobResultsPersister.getJobIndexName(jobId);
|
String indexName = JobResultsPersister.getJobIndexName(jobId);
|
||||||
SearchRequestBuilder searchBuilder = client
|
SearchRequestBuilder searchBuilder = client
|
||||||
.prepareSearch(indexName)
|
.prepareSearch(indexName)
|
||||||
.setPostFilter(qb)
|
.setQuery(qb)
|
||||||
.addSort(sb)
|
.addSort(sb)
|
||||||
.setTypes(ReservedFieldNames.PARTITION_NORMALIZED_PROB_TYPE);
|
.setTypes(ReservedFieldNames.PARTITION_NORMALIZED_PROB_TYPE);
|
||||||
|
|
||||||
|
@ -601,8 +601,7 @@ public class ElasticsearchJobProvider implements JobProvider
|
||||||
// the scenes, and Elasticsearch documentation claims it's significantly
|
// the scenes, and Elasticsearch documentation claims it's significantly
|
||||||
// slower. Here we rely on the record timestamps being identical to the
|
// slower. Here we rely on the record timestamps being identical to the
|
||||||
// bucket timestamp.
|
// bucket timestamp.
|
||||||
QueryBuilder recordFilter = QueryBuilders.termQuery(ElasticsearchMappings.ES_TIMESTAMP,
|
QueryBuilder recordFilter = QueryBuilders.termQuery(ElasticsearchMappings.ES_TIMESTAMP, bucket.getTimestamp().getTime());
|
||||||
bucket.getTimestamp().getTime());
|
|
||||||
|
|
||||||
recordFilter = new ResultsFilterBuilder(recordFilter)
|
recordFilter = new ResultsFilterBuilder(recordFilter)
|
||||||
.interim(AnomalyRecord.IS_INTERIM.getPreferredName(), includeInterim)
|
.interim(AnomalyRecord.IS_INTERIM.getPreferredName(), includeInterim)
|
||||||
|
@ -624,9 +623,9 @@ public class ElasticsearchJobProvider implements JobProvider
|
||||||
@Override
|
@Override
|
||||||
public QueryPage<CategoryDefinition> categoryDefinitions(String jobId, int from, int size) {
|
public QueryPage<CategoryDefinition> categoryDefinitions(String jobId, int from, int size) {
|
||||||
String indexName = JobResultsPersister.getJobIndexName(jobId);
|
String indexName = JobResultsPersister.getJobIndexName(jobId);
|
||||||
LOGGER.trace("ES API CALL: search all of type " + CategoryDefinition.TYPE +
|
LOGGER.trace("ES API CALL: search all of type {} from index {} sort ascending {} from {} size {}",
|
||||||
" from index " + indexName + " sort ascending " + CategoryDefinition.CATEGORY_ID +
|
CategoryDefinition.TYPE.getPreferredName(), indexName, CategoryDefinition.CATEGORY_ID.getPreferredName(), from, size);
|
||||||
" from " + from + " size " + size);
|
|
||||||
SearchRequestBuilder searchBuilder = client.prepareSearch(indexName)
|
SearchRequestBuilder searchBuilder = client.prepareSearch(indexName)
|
||||||
.setTypes(CategoryDefinition.TYPE.getPreferredName())
|
.setTypes(CategoryDefinition.TYPE.getPreferredName())
|
||||||
.setFrom(from).setSize(size)
|
.setFrom(from).setSize(size)
|
||||||
|
@ -662,13 +661,15 @@ public class ElasticsearchJobProvider implements JobProvider
|
||||||
GetResponse response;
|
GetResponse response;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
LOGGER.trace("ES API CALL: get ID " + categoryId + " type " + CategoryDefinition.TYPE +
|
LOGGER.trace("ES API CALL: get ID {} type {} from index {}",
|
||||||
" from index " + indexName);
|
categoryId, CategoryDefinition.TYPE, indexName);
|
||||||
|
|
||||||
response = client.prepareGet(indexName, CategoryDefinition.TYPE.getPreferredName(), categoryId).get();
|
response = client.prepareGet(indexName, CategoryDefinition.TYPE.getPreferredName(), categoryId).get();
|
||||||
} catch (IndexNotFoundException e) {
|
} catch (IndexNotFoundException e) {
|
||||||
throw ExceptionsHelper.missingJobException(jobId);
|
throw ExceptionsHelper.missingJobException(jobId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if (response.isExists()) {
|
if (response.isExists()) {
|
||||||
BytesReference source = response.getSourceAsBytesRef();
|
BytesReference source = response.getSourceAsBytesRef();
|
||||||
XContentParser parser;
|
XContentParser parser;
|
||||||
|
@ -680,6 +681,7 @@ public class ElasticsearchJobProvider implements JobProvider
|
||||||
CategoryDefinition definition = CategoryDefinition.PARSER.apply(parser, () -> parseFieldMatcher);
|
CategoryDefinition definition = CategoryDefinition.PARSER.apply(parser, () -> parseFieldMatcher);
|
||||||
return new QueryPage<>(Collections.singletonList(definition), 1, CategoryDefinition.RESULTS_FIELD);
|
return new QueryPage<>(Collections.singletonList(definition), 1, CategoryDefinition.RESULTS_FIELD);
|
||||||
}
|
}
|
||||||
|
|
||||||
throw QueryPage.emptyQueryPage(Bucket.RESULTS_FIELD);
|
throw QueryPage.emptyQueryPage(Bucket.RESULTS_FIELD);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -724,10 +726,10 @@ public class ElasticsearchJobProvider implements JobProvider
|
||||||
|
|
||||||
recordFilter = new BoolQueryBuilder()
|
recordFilter = new BoolQueryBuilder()
|
||||||
.filter(recordFilter)
|
.filter(recordFilter)
|
||||||
.filter(new TermsQueryBuilder(AnomalyRecord.RESULT_TYPE.getPreferredName(), AnomalyRecord.RESULT_TYPE_VALUE));
|
.filter(new TermsQueryBuilder(Result.RESULT_TYPE.getPreferredName(), AnomalyRecord.RESULT_TYPE_VALUE));
|
||||||
|
|
||||||
SearchRequestBuilder searchBuilder = client.prepareSearch(indexName)
|
SearchRequestBuilder searchBuilder = client.prepareSearch(indexName)
|
||||||
.setTypes(AnomalyRecord.TYPE.getPreferredName())
|
.setTypes(Result.TYPE.getPreferredName())
|
||||||
.setQuery(recordFilter)
|
.setQuery(recordFilter)
|
||||||
.setFrom(from).setSize(size)
|
.setFrom(from).setSize(size)
|
||||||
.addSort(sb == null ? SortBuilders.fieldSort(ElasticsearchMappings.ES_DOC) : sb)
|
.addSort(sb == null ? SortBuilders.fieldSort(ElasticsearchMappings.ES_DOC) : sb)
|
||||||
|
@ -740,10 +742,10 @@ public class ElasticsearchJobProvider implements JobProvider
|
||||||
|
|
||||||
SearchResponse searchResponse;
|
SearchResponse searchResponse;
|
||||||
try {
|
try {
|
||||||
LOGGER.trace("ES API CALL: search all of type " + AnomalyRecord.TYPE +
|
LOGGER.trace("ES API CALL: search all of result type {} from index {}{}{} with filter after sort from {} size {}",
|
||||||
" from index " + indexName + ((sb != null) ? " with sort" : "") +
|
AnomalyRecord.RESULT_TYPE_VALUE, indexName, (sb != null) ? " with sort" : "",
|
||||||
(secondarySort.isEmpty() ? "" : " with secondary sort") +
|
secondarySort.isEmpty() ? "" : " with secondary sort", from, size);
|
||||||
" with filter after sort from " + from + " size " + size);
|
|
||||||
searchResponse = searchBuilder.get();
|
searchResponse = searchBuilder.get();
|
||||||
} catch (IndexNotFoundException e) {
|
} catch (IndexNotFoundException e) {
|
||||||
throw ExceptionsHelper.missingJobException(jobId);
|
throw ExceptionsHelper.missingJobException(jobId);
|
||||||
|
@ -786,14 +788,15 @@ public class ElasticsearchJobProvider implements JobProvider
|
||||||
private QueryPage<Influencer> influencers(String jobId, int from, int size, QueryBuilder filterBuilder, String sortField,
|
private QueryPage<Influencer> influencers(String jobId, int from, int size, QueryBuilder filterBuilder, String sortField,
|
||||||
boolean sortDescending) throws ResourceNotFoundException {
|
boolean sortDescending) throws ResourceNotFoundException {
|
||||||
String indexName = JobResultsPersister.getJobIndexName(jobId);
|
String indexName = JobResultsPersister.getJobIndexName(jobId);
|
||||||
LOGGER.trace("ES API CALL: search all of type " + Influencer.TYPE + " from index " + indexName
|
LOGGER.trace("ES API CALL: search all of result type {} from index {}{} with filter from {} size {}",
|
||||||
+ ((sortField != null)
|
() -> Influencer.RESULT_TYPE_VALUE, () -> indexName,
|
||||||
? " with sort " + (sortDescending ? "descending" : "ascending") + " on field " + esSortField(sortField) : "")
|
() -> (sortField != null) ?
|
||||||
+ " with filter after sort from " + from + " size " + size);
|
" with sort " + (sortDescending ? "descending" : "ascending") + " on field " + esSortField(sortField) : "",
|
||||||
|
() -> from, () -> size);
|
||||||
|
|
||||||
SearchRequestBuilder searchRequestBuilder = client.prepareSearch(indexName)
|
SearchRequestBuilder searchRequestBuilder = client.prepareSearch(indexName)
|
||||||
.setTypes(Influencer.TYPE.getPreferredName())
|
.setTypes(Result.TYPE.getPreferredName())
|
||||||
.setPostFilter(filterBuilder)
|
.setQuery(filterBuilder)
|
||||||
.setFrom(from).setSize(size);
|
.setFrom(from).setSize(size);
|
||||||
|
|
||||||
FieldSortBuilder sb = sortField == null ? SortBuilders.fieldSort(ElasticsearchMappings.ES_DOC)
|
FieldSortBuilder sb = sortField == null ? SortBuilders.fieldSort(ElasticsearchMappings.ES_DOC)
|
||||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.xpack.prelert.job.results.Influence;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.Influencer;
|
import org.elasticsearch.xpack.prelert.job.results.Influencer;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.ModelDebugOutput;
|
import org.elasticsearch.xpack.prelert.job.results.ModelDebugOutput;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.ReservedFieldNames;
|
import org.elasticsearch.xpack.prelert.job.results.ReservedFieldNames;
|
||||||
|
import org.elasticsearch.xpack.prelert.job.results.Result;
|
||||||
import org.elasticsearch.xpack.prelert.job.usage.Usage;
|
import org.elasticsearch.xpack.prelert.job.usage.Usage;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -93,6 +94,243 @@ public class ElasticsearchMappings {
|
||||||
private ElasticsearchMappings() {
|
private ElasticsearchMappings() {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create the Elasticsearch mapping for results objects
|
||||||
|
* {@link Bucket}s, {@link AnomalyRecord}s, {@link Influencer},
|
||||||
|
* {@link BucketInfluencer} and {@link CategoryDefinition}
|
||||||
|
*
|
||||||
|
* The '_all' field is disabled as the document isn't meant to be searched.
|
||||||
|
*
|
||||||
|
* @param termFieldNames All the term fields (by, over, partition) and influencers
|
||||||
|
* included in the mapping
|
||||||
|
*
|
||||||
|
* @return The mapping
|
||||||
|
* @throws IOException On write error
|
||||||
|
*/
|
||||||
|
public static XContentBuilder resultsMapping(Collection<String> termFieldNames) throws IOException {
|
||||||
|
XContentBuilder builder = jsonBuilder()
|
||||||
|
.startObject()
|
||||||
|
.startObject(Result.TYPE.getPreferredName())
|
||||||
|
.startObject(ALL)
|
||||||
|
.field(ENABLED, false)
|
||||||
|
// analyzer must be specified even though _all is disabled
|
||||||
|
// because all types in the same index must have the same
|
||||||
|
// analyzer for a given field
|
||||||
|
.field(ANALYZER, WHITESPACE)
|
||||||
|
.endObject()
|
||||||
|
.startObject(PROPERTIES)
|
||||||
|
.startObject(Result.RESULT_TYPE.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD)
|
||||||
|
.endObject()
|
||||||
|
.startObject(Job.ID.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD)
|
||||||
|
.endObject()
|
||||||
|
.startObject(ES_TIMESTAMP)
|
||||||
|
.field(TYPE, DATE)
|
||||||
|
.endObject()
|
||||||
|
.startObject(Bucket.ANOMALY_SCORE.getPreferredName())
|
||||||
|
.field(TYPE, DOUBLE)
|
||||||
|
.endObject()
|
||||||
|
.startObject(Bucket.INITIAL_ANOMALY_SCORE.getPreferredName())
|
||||||
|
.field(TYPE, DOUBLE)
|
||||||
|
.endObject()
|
||||||
|
.startObject(Bucket.MAX_NORMALIZED_PROBABILITY.getPreferredName())
|
||||||
|
.field(TYPE, DOUBLE)
|
||||||
|
.endObject()
|
||||||
|
.startObject(Bucket.IS_INTERIM.getPreferredName())
|
||||||
|
.field(TYPE, BOOLEAN)
|
||||||
|
.endObject()
|
||||||
|
.startObject(Bucket.RECORD_COUNT.getPreferredName())
|
||||||
|
.field(TYPE, LONG)
|
||||||
|
.endObject()
|
||||||
|
.startObject(Bucket.EVENT_COUNT.getPreferredName())
|
||||||
|
.field(TYPE, LONG)
|
||||||
|
.endObject()
|
||||||
|
.startObject(Bucket.BUCKET_SPAN.getPreferredName())
|
||||||
|
.field(TYPE, LONG)
|
||||||
|
.endObject()
|
||||||
|
.startObject(Bucket.PROCESSING_TIME_MS.getPreferredName())
|
||||||
|
.field(TYPE, LONG)
|
||||||
|
.endObject()
|
||||||
|
.startObject(Bucket.PARTITION_SCORES.getPreferredName())
|
||||||
|
.field(TYPE, NESTED)
|
||||||
|
.startObject(PROPERTIES)
|
||||||
|
.startObject(AnomalyRecord.PARTITION_FIELD_NAME.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.PARTITION_FIELD_VALUE.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.ANOMALY_SCORE.getPreferredName())
|
||||||
|
.field(TYPE, DOUBLE)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.PROBABILITY.getPreferredName())
|
||||||
|
.field(TYPE, DOUBLE)
|
||||||
|
.endObject()
|
||||||
|
.startObject(Bucket.PARTITION_SCORES.getPreferredName())
|
||||||
|
.field(TYPE, NESTED)
|
||||||
|
.startObject(PROPERTIES)
|
||||||
|
.startObject(AnomalyRecord.PARTITION_FIELD_NAME.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.PARTITION_FIELD_VALUE.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.ANOMALY_SCORE.getPreferredName())
|
||||||
|
.field(TYPE, DOUBLE)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.PROBABILITY.getPreferredName())
|
||||||
|
.field(TYPE, DOUBLE)
|
||||||
|
.endObject()
|
||||||
|
.endObject()
|
||||||
|
.endObject()
|
||||||
|
|
||||||
|
// bucket influencer mapping
|
||||||
|
.startObject(BucketInfluencer.INFLUENCER_FIELD_NAME.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD)
|
||||||
|
.endObject()
|
||||||
|
.startObject(BucketInfluencer.RAW_ANOMALY_SCORE.getPreferredName())
|
||||||
|
.field(TYPE, DOUBLE)
|
||||||
|
.endObject()
|
||||||
|
|
||||||
|
// influencer mapping
|
||||||
|
.startObject(Influencer.INFLUENCER_FIELD_VALUE.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD)
|
||||||
|
.endObject();
|
||||||
|
|
||||||
|
addAnomalyRecordFieldsToMapping(builder);
|
||||||
|
|
||||||
|
if (termFieldNames != null) {
|
||||||
|
ElasticsearchDotNotationReverser reverser = new ElasticsearchDotNotationReverser();
|
||||||
|
for (String fieldName : termFieldNames) {
|
||||||
|
reverser.add(fieldName, "");
|
||||||
|
}
|
||||||
|
for (Map.Entry<String, Object> entry : reverser.getMappingsMap().entrySet()) {
|
||||||
|
builder.field(entry.getKey(), entry.getValue());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
builder.endObject()
|
||||||
|
.endObject()
|
||||||
|
.endObject()
|
||||||
|
.endObject()
|
||||||
|
.endObject();
|
||||||
|
|
||||||
|
return builder;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* AnomalyRecord fields to be added under the 'properties' section of the mapping
|
||||||
|
* @param builder Add properties to this builder
|
||||||
|
* @return builder
|
||||||
|
* @throws IOException On write error
|
||||||
|
*/
|
||||||
|
private static XContentBuilder addAnomalyRecordFieldsToMapping(XContentBuilder builder)
|
||||||
|
throws IOException {
|
||||||
|
builder.startObject(AnomalyRecord.DETECTOR_INDEX.getPreferredName())
|
||||||
|
.field(TYPE, INTEGER).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.ACTUAL.getPreferredName())
|
||||||
|
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.TYPICAL.getPreferredName())
|
||||||
|
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.PROBABILITY.getPreferredName())
|
||||||
|
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.FUNCTION.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.FUNCTION_DESCRIPTION.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.BY_FIELD_NAME.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.BY_FIELD_VALUE.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.FIELD_NAME.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.PARTITION_FIELD_NAME.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.PARTITION_FIELD_VALUE.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.OVER_FIELD_NAME.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.OVER_FIELD_VALUE.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.NORMALIZED_PROBABILITY.getPreferredName())
|
||||||
|
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.INITIAL_NORMALIZED_PROBABILITY.getPreferredName())
|
||||||
|
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.CAUSES.getPreferredName())
|
||||||
|
.field(TYPE, NESTED)
|
||||||
|
.startObject(PROPERTIES)
|
||||||
|
.startObject(AnomalyCause.ACTUAL.getPreferredName())
|
||||||
|
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyCause.TYPICAL.getPreferredName())
|
||||||
|
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyCause.PROBABILITY.getPreferredName())
|
||||||
|
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyCause.FUNCTION.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyCause.FUNCTION_DESCRIPTION.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyCause.BY_FIELD_NAME.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyCause.BY_FIELD_VALUE.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyCause.CORRELATED_BY_FIELD_VALUE.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyCause.FIELD_NAME.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyCause.PARTITION_FIELD_NAME.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyCause.PARTITION_FIELD_VALUE.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyCause.OVER_FIELD_NAME.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyCause.OVER_FIELD_VALUE.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD)
|
||||||
|
.endObject()
|
||||||
|
.endObject()
|
||||||
|
.endObject()
|
||||||
|
.startObject(AnomalyRecord.INFLUENCERS.getPreferredName())
|
||||||
|
/* Array of influences */
|
||||||
|
.field(TYPE, NESTED)
|
||||||
|
.startObject(PROPERTIES)
|
||||||
|
.startObject(Influence.INFLUENCER_FIELD_NAME.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
||||||
|
.endObject()
|
||||||
|
.startObject(Influence.INFLUENCER_FIELD_VALUES.getPreferredName())
|
||||||
|
.field(TYPE, KEYWORD)
|
||||||
|
.endObject()
|
||||||
|
.endObject()
|
||||||
|
.endObject();
|
||||||
|
|
||||||
|
return builder;
|
||||||
|
}
|
||||||
|
|
||||||
public static XContentBuilder dataCountsMapping() throws IOException {
|
public static XContentBuilder dataCountsMapping() throws IOException {
|
||||||
return jsonBuilder()
|
return jsonBuilder()
|
||||||
|
@ -144,138 +382,6 @@ public class ElasticsearchMappings {
|
||||||
.endObject();
|
.endObject();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Create the Elasticsearch mapping for {@linkplain org.elasticsearch.xpack.prelert.job.results.Bucket}.
|
|
||||||
* The '_all' field is disabled as the document isn't meant to be searched.
|
|
||||||
*/
|
|
||||||
public static XContentBuilder bucketMapping() throws IOException {
|
|
||||||
return jsonBuilder()
|
|
||||||
.startObject()
|
|
||||||
.startObject(Bucket.TYPE.getPreferredName())
|
|
||||||
.startObject(ALL)
|
|
||||||
.field(ENABLED, false)
|
|
||||||
// analyzer must be specified even though _all is disabled
|
|
||||||
// because all types in the same index must have the same
|
|
||||||
// analyzer for a given field
|
|
||||||
.field(ANALYZER, WHITESPACE)
|
|
||||||
.endObject()
|
|
||||||
.startObject(PROPERTIES)
|
|
||||||
.startObject(Job.ID.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD)
|
|
||||||
.endObject()
|
|
||||||
.startObject(ES_TIMESTAMP)
|
|
||||||
.field(TYPE, DATE)
|
|
||||||
.endObject()
|
|
||||||
.startObject(Bucket.ANOMALY_SCORE.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE)
|
|
||||||
.endObject()
|
|
||||||
.startObject(Bucket.INITIAL_ANOMALY_SCORE.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE)
|
|
||||||
.endObject()
|
|
||||||
.startObject(Bucket.MAX_NORMALIZED_PROBABILITY.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE)
|
|
||||||
.endObject()
|
|
||||||
.startObject(Bucket.IS_INTERIM.getPreferredName())
|
|
||||||
.field(TYPE, BOOLEAN)
|
|
||||||
.endObject()
|
|
||||||
.startObject(Bucket.RECORD_COUNT.getPreferredName())
|
|
||||||
.field(TYPE, LONG)
|
|
||||||
.endObject()
|
|
||||||
.startObject(Bucket.EVENT_COUNT.getPreferredName())
|
|
||||||
.field(TYPE, LONG)
|
|
||||||
.endObject()
|
|
||||||
.startObject(Bucket.BUCKET_SPAN.getPreferredName())
|
|
||||||
.field(TYPE, LONG)
|
|
||||||
.endObject()
|
|
||||||
.startObject(Bucket.PROCESSING_TIME_MS.getPreferredName())
|
|
||||||
.field(TYPE, LONG)
|
|
||||||
.endObject()
|
|
||||||
.startObject(Bucket.BUCKET_INFLUENCERS.getPreferredName())
|
|
||||||
.field(TYPE, NESTED)
|
|
||||||
.startObject(PROPERTIES)
|
|
||||||
.startObject(BucketInfluencer.INFLUENCER_FIELD_NAME.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD)
|
|
||||||
.endObject()
|
|
||||||
.startObject(BucketInfluencer.INITIAL_ANOMALY_SCORE.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE)
|
|
||||||
.endObject()
|
|
||||||
.startObject(BucketInfluencer.ANOMALY_SCORE.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE)
|
|
||||||
.endObject()
|
|
||||||
.startObject(BucketInfluencer.RAW_ANOMALY_SCORE.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE)
|
|
||||||
.endObject()
|
|
||||||
.startObject(BucketInfluencer.PROBABILITY.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE)
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.startObject(Bucket.PARTITION_SCORES.getPreferredName())
|
|
||||||
.field(TYPE, NESTED)
|
|
||||||
.startObject(PROPERTIES)
|
|
||||||
.startObject(AnomalyRecord.PARTITION_FIELD_NAME.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.PARTITION_FIELD_VALUE.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.ANOMALY_SCORE.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.PROBABILITY.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE)
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create the Elasticsearch mapping for {@linkplain org.elasticsearch.xpack.prelert.job.results.BucketInfluencer}.
|
|
||||||
*/
|
|
||||||
public static XContentBuilder bucketInfluencerMapping() throws IOException {
|
|
||||||
return jsonBuilder()
|
|
||||||
.startObject()
|
|
||||||
.startObject(BucketInfluencer.TYPE.getPreferredName())
|
|
||||||
.startObject(ALL)
|
|
||||||
.field(ENABLED, false)
|
|
||||||
// analyzer must be specified even though _all is disabled
|
|
||||||
// because all types in the same index must have the same
|
|
||||||
// analyzer for a given field
|
|
||||||
.field(ANALYZER, WHITESPACE)
|
|
||||||
.endObject()
|
|
||||||
.startObject(PROPERTIES)
|
|
||||||
.startObject(Job.ID.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD)
|
|
||||||
.endObject()
|
|
||||||
.startObject(ES_TIMESTAMP)
|
|
||||||
.field(TYPE, DATE)
|
|
||||||
.endObject()
|
|
||||||
.startObject(Bucket.IS_INTERIM.getPreferredName())
|
|
||||||
.field(TYPE, BOOLEAN)
|
|
||||||
.endObject()
|
|
||||||
.startObject(BucketInfluencer.INFLUENCER_FIELD_NAME.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD)
|
|
||||||
.endObject()
|
|
||||||
.startObject(BucketInfluencer.INITIAL_ANOMALY_SCORE.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE)
|
|
||||||
.endObject()
|
|
||||||
.startObject(BucketInfluencer.ANOMALY_SCORE.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE)
|
|
||||||
.endObject()
|
|
||||||
.startObject(BucketInfluencer.RAW_ANOMALY_SCORE.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE)
|
|
||||||
.endObject()
|
|
||||||
.startObject(BucketInfluencer.PROBABILITY.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE)
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Partition normalized scores. There is one per bucket
|
* Partition normalized scores. There is one per bucket
|
||||||
* so the timestamp is sufficient to uniquely identify
|
* so the timestamp is sufficient to uniquely identify
|
||||||
|
@ -333,6 +439,36 @@ public class ElasticsearchMappings {
|
||||||
.endObject();
|
.endObject();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create the Elasticsearch mapping for {@linkplain Quantiles}.
|
||||||
|
* The '_all' field is disabled as the document isn't meant to be searched.
|
||||||
|
* <p>
|
||||||
|
* The quantile state string is not searchable (index = 'no') as it could be
|
||||||
|
* very large.
|
||||||
|
*/
|
||||||
|
public static XContentBuilder quantilesMapping() throws IOException {
|
||||||
|
return jsonBuilder()
|
||||||
|
.startObject()
|
||||||
|
.startObject(Quantiles.TYPE.getPreferredName())
|
||||||
|
.startObject(ALL)
|
||||||
|
.field(ENABLED, false)
|
||||||
|
// analyzer must be specified even though _all is disabled
|
||||||
|
// because all types in the same index must have the same
|
||||||
|
// analyzer for a given field
|
||||||
|
.field(ANALYZER, WHITESPACE)
|
||||||
|
.endObject()
|
||||||
|
.startObject(PROPERTIES)
|
||||||
|
.startObject(ES_TIMESTAMP)
|
||||||
|
.field(TYPE, DATE)
|
||||||
|
.endObject()
|
||||||
|
.startObject(Quantiles.QUANTILE_STATE.getPreferredName())
|
||||||
|
.field(TYPE, TEXT).field(INDEX, NO)
|
||||||
|
.endObject()
|
||||||
|
.endObject()
|
||||||
|
.endObject()
|
||||||
|
.endObject();
|
||||||
|
}
|
||||||
|
|
||||||
public static XContentBuilder categoryDefinitionMapping() throws IOException {
|
public static XContentBuilder categoryDefinitionMapping() throws IOException {
|
||||||
return jsonBuilder()
|
return jsonBuilder()
|
||||||
.startObject()
|
.startObject()
|
||||||
|
@ -368,178 +504,6 @@ public class ElasticsearchMappings {
|
||||||
.endObject();
|
.endObject();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @param termFieldNames Optionally, other field names to include in the
|
|
||||||
* mappings. Pass <code>null</code> if not required.
|
|
||||||
*/
|
|
||||||
public static XContentBuilder recordMapping(Collection<String> termFieldNames) throws IOException {
|
|
||||||
XContentBuilder builder = jsonBuilder()
|
|
||||||
.startObject()
|
|
||||||
.startObject(AnomalyRecord.TYPE.getPreferredName())
|
|
||||||
.startObject(ALL)
|
|
||||||
.field(ANALYZER, WHITESPACE)
|
|
||||||
.endObject()
|
|
||||||
.startObject(PROPERTIES)
|
|
||||||
.startObject(Job.ID.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(ES_TIMESTAMP)
|
|
||||||
.field(TYPE, DATE).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.DETECTOR_INDEX.getPreferredName())
|
|
||||||
.field(TYPE, INTEGER).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.ACTUAL.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.TYPICAL.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.PROBABILITY.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.FUNCTION.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.FUNCTION_DESCRIPTION.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.BY_FIELD_NAME.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.BY_FIELD_VALUE.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.FIELD_NAME.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.PARTITION_FIELD_NAME.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.PARTITION_FIELD_VALUE.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.OVER_FIELD_NAME.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.OVER_FIELD_VALUE.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.CAUSES.getPreferredName())
|
|
||||||
.field(TYPE, NESTED)
|
|
||||||
.startObject(PROPERTIES)
|
|
||||||
.startObject(AnomalyCause.ACTUAL.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyCause.TYPICAL.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyCause.PROBABILITY.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyCause.FUNCTION.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyCause.FUNCTION_DESCRIPTION.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyCause.BY_FIELD_NAME.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyCause.BY_FIELD_VALUE.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyCause.CORRELATED_BY_FIELD_VALUE.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyCause.FIELD_NAME.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyCause.PARTITION_FIELD_NAME.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyCause.PARTITION_FIELD_VALUE.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyCause.OVER_FIELD_NAME.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyCause.OVER_FIELD_VALUE.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD)
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.ANOMALY_SCORE.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.NORMALIZED_PROBABILITY.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.INITIAL_NORMALIZED_PROBABILITY.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.IS_INTERIM.getPreferredName())
|
|
||||||
.field(TYPE, BOOLEAN).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(AnomalyRecord.INFLUENCERS.getPreferredName())
|
|
||||||
/* Array of influences */
|
|
||||||
.field(TYPE, NESTED)
|
|
||||||
.startObject(PROPERTIES)
|
|
||||||
.startObject(Influence.INFLUENCER_FIELD_NAME.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(Influence.INFLUENCER_FIELD_VALUES.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD)
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject();
|
|
||||||
|
|
||||||
if (termFieldNames != null) {
|
|
||||||
ElasticsearchDotNotationReverser reverser = new ElasticsearchDotNotationReverser();
|
|
||||||
for (String fieldName : termFieldNames) {
|
|
||||||
reverser.add(fieldName, "");
|
|
||||||
}
|
|
||||||
for (Map.Entry<String, Object> entry : reverser.getMappingsMap().entrySet()) {
|
|
||||||
builder.field(entry.getKey(), entry.getValue());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return builder
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create the Elasticsearch mapping for {@linkplain Quantiles}.
|
|
||||||
* The '_all' field is disabled as the document isn't meant to be searched.
|
|
||||||
* <p>
|
|
||||||
* The quantile state string is not searchable (index = 'no') as it could be
|
|
||||||
* very large.
|
|
||||||
*/
|
|
||||||
public static XContentBuilder quantilesMapping() throws IOException {
|
|
||||||
return jsonBuilder()
|
|
||||||
.startObject()
|
|
||||||
.startObject(Quantiles.TYPE.getPreferredName())
|
|
||||||
.startObject(ALL)
|
|
||||||
.field(ENABLED, false)
|
|
||||||
// analyzer must be specified even though _all is disabled
|
|
||||||
// because all types in the same index must have the same
|
|
||||||
// analyzer for a given field
|
|
||||||
.field(ANALYZER, WHITESPACE)
|
|
||||||
.endObject()
|
|
||||||
.startObject(PROPERTIES)
|
|
||||||
.startObject(ES_TIMESTAMP)
|
|
||||||
.field(TYPE, DATE)
|
|
||||||
.endObject()
|
|
||||||
.startObject(Quantiles.QUANTILE_STATE.getPreferredName())
|
|
||||||
.field(TYPE, TEXT).field(INDEX, NO)
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create the Elasticsearch mapping for {@linkplain ModelState}.
|
* Create the Elasticsearch mapping for {@linkplain ModelState}.
|
||||||
* The model state could potentially be huge (over a gigabyte in size)
|
* The model state could potentially be huge (over a gigabyte in size)
|
||||||
|
@ -763,61 +727,6 @@ public class ElasticsearchMappings {
|
||||||
.endObject();
|
.endObject();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Influence results mapping
|
|
||||||
*
|
|
||||||
* @param influencerFieldNames Optionally, other field names to include in the
|
|
||||||
* mappings. Pass <code>null</code> if not required.
|
|
||||||
*/
|
|
||||||
public static XContentBuilder influencerMapping(Collection<String> influencerFieldNames) throws IOException {
|
|
||||||
XContentBuilder builder = jsonBuilder()
|
|
||||||
.startObject()
|
|
||||||
.startObject(Influencer.TYPE.getPreferredName())
|
|
||||||
.startObject(ALL)
|
|
||||||
.field(ANALYZER, WHITESPACE)
|
|
||||||
.endObject()
|
|
||||||
.startObject(PROPERTIES)
|
|
||||||
.startObject(Job.ID.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(ES_TIMESTAMP)
|
|
||||||
.field(TYPE, DATE).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(Influencer.PROBABILITY.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(Influencer.INITIAL_ANOMALY_SCORE.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(Influencer.ANOMALY_SCORE.getPreferredName())
|
|
||||||
.field(TYPE, DOUBLE).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(Influencer.INFLUENCER_FIELD_NAME.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject()
|
|
||||||
.startObject(Influencer.INFLUENCER_FIELD_VALUE.getPreferredName())
|
|
||||||
.field(TYPE, KEYWORD)
|
|
||||||
.endObject()
|
|
||||||
.startObject(Bucket.IS_INTERIM.getPreferredName())
|
|
||||||
.field(TYPE, BOOLEAN).field(INCLUDE_IN_ALL, false)
|
|
||||||
.endObject();
|
|
||||||
|
|
||||||
if (influencerFieldNames != null) {
|
|
||||||
ElasticsearchDotNotationReverser reverser = new ElasticsearchDotNotationReverser();
|
|
||||||
for (String fieldName : influencerFieldNames) {
|
|
||||||
reverser.add(fieldName, "");
|
|
||||||
}
|
|
||||||
for (Map.Entry<String, Object> entry : reverser.getMappingsMap().entrySet()) {
|
|
||||||
builder.field(entry.getKey(), entry.getValue());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return builder
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The Elasticsearch mappings for the usage documents
|
* The Elasticsearch mappings for the usage documents
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -15,6 +15,7 @@ import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.AnomalyRecord;
|
import org.elasticsearch.xpack.prelert.job.results.AnomalyRecord;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.Bucket;
|
import org.elasticsearch.xpack.prelert.job.results.Bucket;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.Influencer;
|
import org.elasticsearch.xpack.prelert.job.results.Influencer;
|
||||||
|
import org.elasticsearch.xpack.prelert.job.results.Result;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -46,8 +47,9 @@ public class JobRenormaliser extends AbstractComponent {
|
||||||
String jobId = bucket.getJobId();
|
String jobId = bucket.getJobId();
|
||||||
try {
|
try {
|
||||||
String indexName = JobResultsPersister.getJobIndexName(jobId);
|
String indexName = JobResultsPersister.getJobIndexName(jobId);
|
||||||
logger.trace("[{}] ES API CALL: index type {} to index {} with ID {}", jobId, Bucket.TYPE, indexName, bucket.getId());
|
logger.trace("[{}] ES API CALL: update result type {} to index {} with ID {}", jobId, Bucket.RESULT_TYPE_VALUE, indexName,
|
||||||
client.prepareIndex(indexName, Bucket.TYPE.getPreferredName(), bucket.getId())
|
bucket.getId());
|
||||||
|
client.prepareIndex(indexName, Result.TYPE.getPreferredName(), bucket.getId())
|
||||||
.setSource(jobResultsPersister.toXContentBuilder(bucket)).execute().actionGet();
|
.setSource(jobResultsPersister.toXContentBuilder(bucket)).execute().actionGet();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error(new ParameterizedMessage("[{}] Error updating bucket state", new Object[]{jobId}, e));
|
logger.error(new ParameterizedMessage("[{}] Error updating bucket state", new Object[]{jobId}, e));
|
||||||
|
@ -83,11 +85,11 @@ public class JobRenormaliser extends AbstractComponent {
|
||||||
for (AnomalyRecord record : records) {
|
for (AnomalyRecord record : records) {
|
||||||
String recordId = record.getId();
|
String recordId = record.getId();
|
||||||
String indexName = JobResultsPersister.getJobIndexName(jobId);
|
String indexName = JobResultsPersister.getJobIndexName(jobId);
|
||||||
logger.trace("[{}] ES BULK ACTION: update ID {} type {} in index {} using map of new values, for bucket {}",
|
logger.trace("[{}] ES BULK ACTION: update ID {} result type {} in index {} using map of new values, for bucket {}",
|
||||||
jobId, recordId, AnomalyRecord.TYPE, indexName, bucketId);
|
jobId, recordId, AnomalyRecord.RESULT_TYPE_VALUE, indexName, bucketId);
|
||||||
|
|
||||||
bulkRequest.add(
|
bulkRequest.add(
|
||||||
client.prepareIndex(indexName, AnomalyRecord.TYPE.getPreferredName(), recordId)
|
client.prepareIndex(indexName, Result.TYPE.getPreferredName(), recordId)
|
||||||
.setSource(jobResultsPersister.toXContentBuilder(record)));
|
.setSource(jobResultsPersister.toXContentBuilder(record)));
|
||||||
|
|
||||||
addedAny = true;
|
addedAny = true;
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.xpack.prelert.job.results.CategoryDefinition;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.Influencer;
|
import org.elasticsearch.xpack.prelert.job.results.Influencer;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.ModelDebugOutput;
|
import org.elasticsearch.xpack.prelert.job.results.ModelDebugOutput;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.ReservedFieldNames;
|
import org.elasticsearch.xpack.prelert.job.results.ReservedFieldNames;
|
||||||
|
import org.elasticsearch.xpack.prelert.job.results.Result;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Date;
|
import java.util.Date;
|
||||||
|
@ -73,13 +74,15 @@ public class JobResultsPersister extends AbstractComponent {
|
||||||
try {
|
try {
|
||||||
XContentBuilder content = toXContentBuilder(bucket);
|
XContentBuilder content = toXContentBuilder(bucket);
|
||||||
String indexName = getJobIndexName(jobId);
|
String indexName = getJobIndexName(jobId);
|
||||||
logger.trace("[{}] ES API CALL: index type {} to index {} at epoch {}", jobId, Bucket.TYPE, indexName, bucket.getEpoch());
|
logger.trace("[{}] ES API CALL: index result type {} to index {} at epoch {}", jobId, Bucket.RESULT_TYPE_VALUE, indexName,
|
||||||
IndexResponse response = client.prepareIndex(indexName, Bucket.TYPE.getPreferredName())
|
bucket.getEpoch());
|
||||||
|
IndexResponse response = client.prepareIndex(indexName, Result.TYPE.getPreferredName())
|
||||||
.setSource(content)
|
.setSource(content)
|
||||||
.execute().actionGet();
|
.execute().actionGet();
|
||||||
bucket.setId(response.getId());
|
bucket.setId(response.getId());
|
||||||
persistBucketInfluencersStandalone(jobId, bucket.getId(), bucket.getBucketInfluencers(), bucket.getTimestamp(),
|
persistBucketInfluencersStandalone(jobId, bucket.getId(), bucket.getBucketInfluencers(), bucket.getTimestamp(),
|
||||||
bucket.isInterim());
|
bucket.isInterim());
|
||||||
|
|
||||||
persistPerPartitionMaxProbabilities(bucket);
|
persistPerPartitionMaxProbabilities(bucket);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error(new ParameterizedMessage("[{}] Error persisting bucket", new Object[] {jobId}, e));
|
logger.error(new ParameterizedMessage("[{}] Error persisting bucket", new Object[] {jobId}, e));
|
||||||
|
@ -102,11 +105,12 @@ public class JobResultsPersister extends AbstractComponent {
|
||||||
for (AnomalyRecord record : records) {
|
for (AnomalyRecord record : records) {
|
||||||
content = toXContentBuilder(record);
|
content = toXContentBuilder(record);
|
||||||
|
|
||||||
logger.trace("[{}] ES BULK ACTION: index type {} to index {} with auto-generated ID", jobId, AnomalyRecord.TYPE, indexName);
|
logger.trace("[{}] ES BULK ACTION: index result type {} to index {} with auto-generated ID",
|
||||||
addRecordsRequest.add(client.prepareIndex(indexName, AnomalyRecord.TYPE.getPreferredName()).setSource(content));
|
jobId, AnomalyRecord.RESULT_TYPE_VALUE, indexName);
|
||||||
|
addRecordsRequest.add(client.prepareIndex(indexName, Result.TYPE.getPreferredName()).setSource(content));
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error(new ParameterizedMessage("[{}] Error persisting records", new Object[] {jobId}, e));
|
logger.error(new ParameterizedMessage("[{}] Error persisting records", new Object [] {jobId}, e));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,9 +136,9 @@ public class JobResultsPersister extends AbstractComponent {
|
||||||
try {
|
try {
|
||||||
for (Influencer influencer : influencers) {
|
for (Influencer influencer : influencers) {
|
||||||
content = toXContentBuilder(influencer);
|
content = toXContentBuilder(influencer);
|
||||||
logger.trace("[{}] ES BULK ACTION: index type {} to index {} with auto-generated ID",
|
logger.trace("[{}] ES BULK ACTION: index result type {} to index {} with auto-generated ID",
|
||||||
jobId, Influencer.TYPE, indexName);
|
jobId, Influencer.RESULT_TYPE_VALUE, indexName);
|
||||||
addInfluencersRequest.add(client.prepareIndex(indexName, Influencer.TYPE.getPreferredName()).setSource(content));
|
addInfluencersRequest.add(client.prepareIndex(indexName, Result.TYPE.getPreferredName()).setSource(content));
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error(new ParameterizedMessage("[{}] Error persisting influencers", new Object[] {jobId}, e));
|
logger.error(new ParameterizedMessage("[{}] Error persisting influencers", new Object[] {jobId}, e));
|
||||||
|
@ -216,7 +220,7 @@ public class JobResultsPersister extends AbstractComponent {
|
||||||
* Persist the influencer
|
* Persist the influencer
|
||||||
*/
|
*/
|
||||||
public void persistInfluencer(Influencer influencer) {
|
public void persistInfluencer(Influencer influencer) {
|
||||||
Persistable persistable = new Persistable(influencer.getJobId(), influencer, Influencer.TYPE::getPreferredName,
|
Persistable persistable = new Persistable(influencer.getJobId(), influencer, Result.TYPE::getPreferredName,
|
||||||
influencer::getId, () -> toXContentBuilder(influencer));
|
influencer::getId, () -> toXContentBuilder(influencer));
|
||||||
persistable.persist();
|
persistable.persist();
|
||||||
// Don't commit as we expect masses of these updates and they're not
|
// Don't commit as we expect masses of these updates and they're not
|
||||||
|
@ -296,10 +300,10 @@ public class JobResultsPersister extends AbstractComponent {
|
||||||
// Need consistent IDs to ensure overwriting on renormalisation
|
// Need consistent IDs to ensure overwriting on renormalisation
|
||||||
String id = bucketId + bucketInfluencer.getInfluencerFieldName();
|
String id = bucketId + bucketInfluencer.getInfluencerFieldName();
|
||||||
String indexName = getJobIndexName(jobId);
|
String indexName = getJobIndexName(jobId);
|
||||||
logger.trace("[{}] ES BULK ACTION: index type {} to index {} with ID {}", jobId, BucketInfluencer.TYPE, indexName, id);
|
logger.trace("[{}] ES BULK ACTION: index result type {} to index {} with ID {}", jobId, BucketInfluencer.RESULT_TYPE_VALUE,
|
||||||
|
indexName, id);
|
||||||
addBucketInfluencersRequest.add(
|
addBucketInfluencersRequest.add(
|
||||||
client.prepareIndex(indexName, BucketInfluencer.TYPE.getPreferredName(), id)
|
client.prepareIndex(indexName, Result.TYPE.getPreferredName(), id).setSource(content));
|
||||||
.setSource(content));
|
|
||||||
}
|
}
|
||||||
logger.trace("[{}] ES API CALL: bulk request with {} actions", jobId, addBucketInfluencersRequest.numberOfActions());
|
logger.trace("[{}] ES API CALL: bulk request with {} actions", jobId, addBucketInfluencersRequest.numberOfActions());
|
||||||
BulkResponse addBucketInfluencersResponse = addBucketInfluencersRequest.execute().actionGet();
|
BulkResponse addBucketInfluencersResponse = addBucketInfluencersRequest.execute().actionGet();
|
||||||
|
|
|
@ -29,16 +29,15 @@ import java.util.Objects;
|
||||||
* can be returned if the members have not been set.
|
* can be returned if the members have not been set.
|
||||||
*/
|
*/
|
||||||
public class AnomalyRecord extends ToXContentToBytes implements Writeable {
|
public class AnomalyRecord extends ToXContentToBytes implements Writeable {
|
||||||
/**
|
|
||||||
* Serialisation fields
|
|
||||||
*/
|
|
||||||
public static final ParseField TYPE = new ParseField("record");
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Result type
|
||||||
|
*/
|
||||||
|
public static final String RESULT_TYPE_VALUE = "record";
|
||||||
/**
|
/**
|
||||||
* Result fields (all detector types)
|
* Result fields (all detector types)
|
||||||
*/
|
*/
|
||||||
public static final ParseField JOB_ID = new ParseField("jobId");
|
public static final ParseField JOB_ID = new ParseField("jobId");
|
||||||
public static final ParseField RESULT_TYPE = new ParseField("result_type");
|
|
||||||
public static final ParseField DETECTOR_INDEX = new ParseField("detectorIndex");
|
public static final ParseField DETECTOR_INDEX = new ParseField("detectorIndex");
|
||||||
public static final ParseField PROBABILITY = new ParseField("probability");
|
public static final ParseField PROBABILITY = new ParseField("probability");
|
||||||
public static final ParseField BY_FIELD_NAME = new ParseField("byFieldName");
|
public static final ParseField BY_FIELD_NAME = new ParseField("byFieldName");
|
||||||
|
@ -78,11 +77,11 @@ public class AnomalyRecord extends ToXContentToBytes implements Writeable {
|
||||||
public static final ParseField INITIAL_NORMALIZED_PROBABILITY = new ParseField("initialNormalizedProbability");
|
public static final ParseField INITIAL_NORMALIZED_PROBABILITY = new ParseField("initialNormalizedProbability");
|
||||||
|
|
||||||
public static final ConstructingObjectParser<AnomalyRecord, ParseFieldMatcherSupplier> PARSER =
|
public static final ConstructingObjectParser<AnomalyRecord, ParseFieldMatcherSupplier> PARSER =
|
||||||
new ConstructingObjectParser<>(TYPE.getPreferredName(), a -> new AnomalyRecord((String) a[0]));
|
new ConstructingObjectParser<>(RESULT_TYPE_VALUE, a -> new AnomalyRecord((String) a[0]));
|
||||||
|
|
||||||
static {
|
static {
|
||||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), JOB_ID);
|
PARSER.declareString(ConstructingObjectParser.constructorArg(), JOB_ID);
|
||||||
PARSER.declareString((anomalyRecord, s) -> {}, RESULT_TYPE);
|
PARSER.declareString((anomalyRecord, s) -> {}, Result.RESULT_TYPE);
|
||||||
PARSER.declareDouble(AnomalyRecord::setProbability, PROBABILITY);
|
PARSER.declareDouble(AnomalyRecord::setProbability, PROBABILITY);
|
||||||
PARSER.declareDouble(AnomalyRecord::setAnomalyScore, ANOMALY_SCORE);
|
PARSER.declareDouble(AnomalyRecord::setAnomalyScore, ANOMALY_SCORE);
|
||||||
PARSER.declareDouble(AnomalyRecord::setNormalizedProbability, NORMALIZED_PROBABILITY);
|
PARSER.declareDouble(AnomalyRecord::setNormalizedProbability, NORMALIZED_PROBABILITY);
|
||||||
|
@ -114,8 +113,6 @@ public class AnomalyRecord extends ToXContentToBytes implements Writeable {
|
||||||
PARSER.declareObjectArray(AnomalyRecord::setInfluencers, Influence.PARSER, INFLUENCERS);
|
PARSER.declareObjectArray(AnomalyRecord::setInfluencers, Influence.PARSER, INFLUENCERS);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static final String RESULT_TYPE_VALUE = "record";
|
|
||||||
|
|
||||||
private final String jobId;
|
private final String jobId;
|
||||||
private String id;
|
private String id;
|
||||||
private int detectorIndex;
|
private int detectorIndex;
|
||||||
|
@ -246,7 +243,7 @@ public class AnomalyRecord extends ToXContentToBytes implements Writeable {
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
builder.startObject();
|
builder.startObject();
|
||||||
builder.field(JOB_ID.getPreferredName(), jobId);
|
builder.field(JOB_ID.getPreferredName(), jobId);
|
||||||
builder.field(RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE);
|
builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE);
|
||||||
builder.field(PROBABILITY.getPreferredName(), probability);
|
builder.field(PROBABILITY.getPreferredName(), probability);
|
||||||
builder.field(ANOMALY_SCORE.getPreferredName(), anomalyScore);
|
builder.field(ANOMALY_SCORE.getPreferredName(), anomalyScore);
|
||||||
builder.field(NORMALIZED_PROBABILITY.getPreferredName(), normalizedProbability);
|
builder.field(NORMALIZED_PROBABILITY.getPreferredName(), normalizedProbability);
|
||||||
|
|
|
@ -26,8 +26,6 @@ import java.util.Objects;
|
||||||
public class AutodetectResult extends ToXContentToBytes implements Writeable {
|
public class AutodetectResult extends ToXContentToBytes implements Writeable {
|
||||||
|
|
||||||
public static final ParseField TYPE = new ParseField("autodetect_result");
|
public static final ParseField TYPE = new ParseField("autodetect_result");
|
||||||
public static final ParseField RECORDS = new ParseField("records");
|
|
||||||
public static final ParseField INFLUENCERS = new ParseField("influencers");
|
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
public static final ConstructingObjectParser<AutodetectResult, ParseFieldMatcherSupplier> PARSER = new ConstructingObjectParser<>(
|
public static final ConstructingObjectParser<AutodetectResult, ParseFieldMatcherSupplier> PARSER = new ConstructingObjectParser<>(
|
||||||
|
@ -36,9 +34,9 @@ public class AutodetectResult extends ToXContentToBytes implements Writeable {
|
||||||
(ModelDebugOutput) a[6], (CategoryDefinition) a[7], (FlushAcknowledgement) a[8]));
|
(ModelDebugOutput) a[6], (CategoryDefinition) a[7], (FlushAcknowledgement) a[8]));
|
||||||
|
|
||||||
static {
|
static {
|
||||||
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), Bucket.PARSER, Bucket.TYPE);
|
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), Bucket.PARSER, Bucket.RESULT_TYPE_FIELD);
|
||||||
PARSER.declareObjectArray(ConstructingObjectParser.optionalConstructorArg(), AnomalyRecord.PARSER, RECORDS);
|
PARSER.declareObjectArray(ConstructingObjectParser.optionalConstructorArg(), AnomalyRecord.PARSER, AnomalyRecord.RESULTS_FIELD);
|
||||||
PARSER.declareObjectArray(ConstructingObjectParser.optionalConstructorArg(), Influencer.PARSER, INFLUENCERS);
|
PARSER.declareObjectArray(ConstructingObjectParser.optionalConstructorArg(), Influencer.PARSER, Influencer.RESULTS_FIELD);
|
||||||
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), Quantiles.PARSER, Quantiles.TYPE);
|
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), Quantiles.PARSER, Quantiles.TYPE);
|
||||||
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), ModelSnapshot.PARSER, ModelSnapshot.TYPE);
|
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), ModelSnapshot.PARSER, ModelSnapshot.TYPE);
|
||||||
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), ModelSizeStats.PARSER, ModelSizeStats.TYPE);
|
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), ModelSizeStats.PARSER, ModelSizeStats.TYPE);
|
||||||
|
@ -151,9 +149,9 @@ public class AutodetectResult extends ToXContentToBytes implements Writeable {
|
||||||
@Override
|
@Override
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
builder.startObject();
|
builder.startObject();
|
||||||
addNullableField(Bucket.TYPE, bucket, builder);
|
addNullableField(Bucket.RESULT_TYPE_FIELD, bucket, builder);
|
||||||
addNullableField(RECORDS, records, builder);
|
addNullableField(AnomalyRecord.RESULTS_FIELD, records, builder);
|
||||||
addNullableField(INFLUENCERS, influencers, builder);
|
addNullableField(Influencer.RESULTS_FIELD, influencers, builder);
|
||||||
addNullableField(Quantiles.TYPE, quantiles, builder);
|
addNullableField(Quantiles.TYPE, quantiles, builder);
|
||||||
addNullableField(ModelSnapshot.TYPE, modelSnapshot, builder);
|
addNullableField(ModelSnapshot.TYPE, modelSnapshot, builder);
|
||||||
addNullableField(ModelSizeStats.TYPE, modelSizeStats, builder);
|
addNullableField(ModelSizeStats.TYPE, modelSizeStats, builder);
|
||||||
|
|
|
@ -54,12 +54,13 @@ public class Bucket extends ToXContentToBytes implements Writeable {
|
||||||
public static final ParseField RESULTS_FIELD = new ParseField("buckets");
|
public static final ParseField RESULTS_FIELD = new ParseField("buckets");
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Elasticsearch type
|
* Result type
|
||||||
*/
|
*/
|
||||||
public static final ParseField TYPE = new ParseField("bucket");
|
public static final String RESULT_TYPE_VALUE = "bucket";
|
||||||
|
public static final ParseField RESULT_TYPE_FIELD = new ParseField(RESULT_TYPE_VALUE);
|
||||||
|
|
||||||
public static final ConstructingObjectParser<Bucket, ParseFieldMatcherSupplier> PARSER =
|
public static final ConstructingObjectParser<Bucket, ParseFieldMatcherSupplier> PARSER =
|
||||||
new ConstructingObjectParser<>(TYPE.getPreferredName(), a -> new Bucket((String) a[0]));
|
new ConstructingObjectParser<>(RESULT_TYPE_VALUE, a -> new Bucket((String) a[0]));
|
||||||
|
|
||||||
static {
|
static {
|
||||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), JOB_ID);
|
PARSER.declareString(ConstructingObjectParser.constructorArg(), JOB_ID);
|
||||||
|
@ -82,6 +83,7 @@ public class Bucket extends ToXContentToBytes implements Writeable {
|
||||||
PARSER.declareLong(Bucket::setBucketSpan, BUCKET_SPAN);
|
PARSER.declareLong(Bucket::setBucketSpan, BUCKET_SPAN);
|
||||||
PARSER.declareLong(Bucket::setProcessingTimeMs, PROCESSING_TIME_MS);
|
PARSER.declareLong(Bucket::setProcessingTimeMs, PROCESSING_TIME_MS);
|
||||||
PARSER.declareObjectArray(Bucket::setPartitionScores, PartitionScore.PARSER, PARTITION_SCORES);
|
PARSER.declareObjectArray(Bucket::setPartitionScores, PartitionScore.PARSER, PARTITION_SCORES);
|
||||||
|
PARSER.declareString((bucket, s) -> {}, Result.RESULT_TYPE);
|
||||||
}
|
}
|
||||||
|
|
||||||
private final String jobId;
|
private final String jobId;
|
||||||
|
@ -173,6 +175,7 @@ public class Bucket extends ToXContentToBytes implements Writeable {
|
||||||
builder.field(BUCKET_INFLUENCERS.getPreferredName(), bucketInfluencers);
|
builder.field(BUCKET_INFLUENCERS.getPreferredName(), bucketInfluencers);
|
||||||
builder.field(PROCESSING_TIME_MS.getPreferredName(), processingTimeMs);
|
builder.field(PROCESSING_TIME_MS.getPreferredName(), processingTimeMs);
|
||||||
builder.field(PARTITION_SCORES.getPreferredName(), partitionScores);
|
builder.field(PARTITION_SCORES.getPreferredName(), partitionScores);
|
||||||
|
builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE);
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
return builder;
|
return builder;
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,14 +24,10 @@ import java.util.Objects;
|
||||||
|
|
||||||
public class BucketInfluencer extends ToXContentToBytes implements Writeable {
|
public class BucketInfluencer extends ToXContentToBytes implements Writeable {
|
||||||
/**
|
/**
|
||||||
* Elasticsearch type
|
* Result type
|
||||||
*/
|
*/
|
||||||
public static final ParseField TYPE = new ParseField("bucketInfluencer");
|
public static final String RESULT_TYPE_VALUE = "bucketInfluencer";
|
||||||
|
public static final ParseField RESULT_TYPE_FIELD = new ParseField(RESULT_TYPE_VALUE);
|
||||||
/**
|
|
||||||
* This is the field name of the time bucket influencer.
|
|
||||||
*/
|
|
||||||
public static final ParseField BUCKET_TIME = new ParseField("bucketTime");
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Field names
|
* Field names
|
||||||
|
@ -46,10 +42,11 @@ public class BucketInfluencer extends ToXContentToBytes implements Writeable {
|
||||||
public static final ParseField TIMESTAMP = new ParseField("timestamp");
|
public static final ParseField TIMESTAMP = new ParseField("timestamp");
|
||||||
|
|
||||||
public static final ConstructingObjectParser<BucketInfluencer, ParseFieldMatcherSupplier> PARSER =
|
public static final ConstructingObjectParser<BucketInfluencer, ParseFieldMatcherSupplier> PARSER =
|
||||||
new ConstructingObjectParser<>(TYPE.getPreferredName(), a -> new BucketInfluencer((String) a[0]));
|
new ConstructingObjectParser<>(RESULT_TYPE_FIELD.getPreferredName(), a -> new BucketInfluencer((String) a[0]));
|
||||||
|
|
||||||
static {
|
static {
|
||||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID);
|
PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID);
|
||||||
|
PARSER.declareString((bucketInfluencer, s) -> {}, Result.RESULT_TYPE);
|
||||||
PARSER.declareString(BucketInfluencer::setInfluencerFieldName, INFLUENCER_FIELD_NAME);
|
PARSER.declareString(BucketInfluencer::setInfluencerFieldName, INFLUENCER_FIELD_NAME);
|
||||||
PARSER.declareDouble(BucketInfluencer::setInitialAnomalyScore, INITIAL_ANOMALY_SCORE);
|
PARSER.declareDouble(BucketInfluencer::setInitialAnomalyScore, INITIAL_ANOMALY_SCORE);
|
||||||
PARSER.declareDouble(BucketInfluencer::setAnomalyScore, ANOMALY_SCORE);
|
PARSER.declareDouble(BucketInfluencer::setAnomalyScore, ANOMALY_SCORE);
|
||||||
|
@ -123,6 +120,7 @@ public class BucketInfluencer extends ToXContentToBytes implements Writeable {
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
builder.startObject();
|
builder.startObject();
|
||||||
builder.field(Job.ID.getPreferredName(), jobId);
|
builder.field(Job.ID.getPreferredName(), jobId);
|
||||||
|
builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE);
|
||||||
if (influenceField != null) {
|
if (influenceField != null) {
|
||||||
builder.field(INFLUENCER_FIELD_NAME.getPreferredName(), influenceField);
|
builder.field(INFLUENCER_FIELD_NAME.getPreferredName(), influenceField);
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,9 +23,10 @@ import java.util.Objects;
|
||||||
|
|
||||||
public class Influencer extends ToXContentToBytes implements Writeable {
|
public class Influencer extends ToXContentToBytes implements Writeable {
|
||||||
/**
|
/**
|
||||||
* Elasticsearch type
|
* Result type
|
||||||
*/
|
*/
|
||||||
public static final ParseField TYPE = new ParseField("influencer");
|
public static final String RESULT_TYPE_VALUE = "influencer";
|
||||||
|
public static final ParseField RESULT_TYPE_FIELD = new ParseField(RESULT_TYPE_VALUE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Field names
|
* Field names
|
||||||
|
@ -42,12 +43,13 @@ public class Influencer extends ToXContentToBytes implements Writeable {
|
||||||
public static final ParseField RESULTS_FIELD = new ParseField("influencers");
|
public static final ParseField RESULTS_FIELD = new ParseField("influencers");
|
||||||
|
|
||||||
public static final ConstructingObjectParser<Influencer, ParseFieldMatcherSupplier> PARSER = new ConstructingObjectParser<>(
|
public static final ConstructingObjectParser<Influencer, ParseFieldMatcherSupplier> PARSER = new ConstructingObjectParser<>(
|
||||||
TYPE.getPreferredName(), a -> new Influencer((String) a[0], (String) a[1], (String) a[2]));
|
RESULT_TYPE_FIELD.getPreferredName(), a -> new Influencer((String) a[0], (String) a[1], (String) a[2]));
|
||||||
|
|
||||||
static {
|
static {
|
||||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), JOB_ID);
|
PARSER.declareString(ConstructingObjectParser.constructorArg(), JOB_ID);
|
||||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), INFLUENCER_FIELD_NAME);
|
PARSER.declareString(ConstructingObjectParser.constructorArg(), INFLUENCER_FIELD_NAME);
|
||||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), INFLUENCER_FIELD_VALUE);
|
PARSER.declareString(ConstructingObjectParser.constructorArg(), INFLUENCER_FIELD_VALUE);
|
||||||
|
PARSER.declareString((influencer, s) -> {}, Result.RESULT_TYPE);
|
||||||
PARSER.declareDouble(Influencer::setProbability, PROBABILITY);
|
PARSER.declareDouble(Influencer::setProbability, PROBABILITY);
|
||||||
PARSER.declareDouble(Influencer::setAnomalyScore, ANOMALY_SCORE);
|
PARSER.declareDouble(Influencer::setAnomalyScore, ANOMALY_SCORE);
|
||||||
PARSER.declareDouble(Influencer::setInitialAnomalyScore, INITIAL_ANOMALY_SCORE);
|
PARSER.declareDouble(Influencer::setInitialAnomalyScore, INITIAL_ANOMALY_SCORE);
|
||||||
|
@ -116,6 +118,7 @@ public class Influencer extends ToXContentToBytes implements Writeable {
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
builder.startObject();
|
builder.startObject();
|
||||||
builder.field(JOB_ID.getPreferredName(), jobId);
|
builder.field(JOB_ID.getPreferredName(), jobId);
|
||||||
|
builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE);
|
||||||
builder.field(INFLUENCER_FIELD_NAME.getPreferredName(), influenceField);
|
builder.field(INFLUENCER_FIELD_NAME.getPreferredName(), influenceField);
|
||||||
builder.field(INFLUENCER_FIELD_VALUE.getPreferredName(), influenceValue);
|
builder.field(INFLUENCER_FIELD_VALUE.getPreferredName(), influenceValue);
|
||||||
builder.field(ANOMALY_SCORE.getPreferredName(), anomalyScore);
|
builder.field(ANOMALY_SCORE.getPreferredName(), anomalyScore);
|
||||||
|
|
|
@ -91,13 +91,10 @@ public final class ReservedFieldNames {
|
||||||
Bucket.IS_INTERIM.getPreferredName(),
|
Bucket.IS_INTERIM.getPreferredName(),
|
||||||
Bucket.RECORD_COUNT.getPreferredName(),
|
Bucket.RECORD_COUNT.getPreferredName(),
|
||||||
Bucket.EVENT_COUNT.getPreferredName(),
|
Bucket.EVENT_COUNT.getPreferredName(),
|
||||||
Bucket.RECORDS.getPreferredName(),
|
|
||||||
Bucket.BUCKET_INFLUENCERS.getPreferredName(),
|
|
||||||
Bucket.INITIAL_ANOMALY_SCORE.getPreferredName(),
|
Bucket.INITIAL_ANOMALY_SCORE.getPreferredName(),
|
||||||
Bucket.PROCESSING_TIME_MS.getPreferredName(),
|
Bucket.PROCESSING_TIME_MS.getPreferredName(),
|
||||||
Bucket.PARTITION_SCORES.getPreferredName(),
|
Bucket.PARTITION_SCORES.getPreferredName(),
|
||||||
|
|
||||||
BucketInfluencer.BUCKET_TIME.getPreferredName(), BucketInfluencer.INFLUENCER_FIELD_NAME.getPreferredName(),
|
|
||||||
BucketInfluencer.INITIAL_ANOMALY_SCORE.getPreferredName(), BucketInfluencer.ANOMALY_SCORE.getPreferredName(),
|
BucketInfluencer.INITIAL_ANOMALY_SCORE.getPreferredName(), BucketInfluencer.ANOMALY_SCORE.getPreferredName(),
|
||||||
BucketInfluencer.RAW_ANOMALY_SCORE.getPreferredName(), BucketInfluencer.PROBABILITY.getPreferredName(),
|
BucketInfluencer.RAW_ANOMALY_SCORE.getPreferredName(), BucketInfluencer.PROBABILITY.getPreferredName(),
|
||||||
|
|
||||||
|
@ -151,18 +148,17 @@ public final class ReservedFieldNames {
|
||||||
ModelSnapshot.RESTORE_PRIORITY.getPreferredName(),
|
ModelSnapshot.RESTORE_PRIORITY.getPreferredName(),
|
||||||
ModelSnapshot.SNAPSHOT_ID.getPreferredName(),
|
ModelSnapshot.SNAPSHOT_ID.getPreferredName(),
|
||||||
ModelSnapshot.SNAPSHOT_DOC_COUNT.getPreferredName(),
|
ModelSnapshot.SNAPSHOT_DOC_COUNT.getPreferredName(),
|
||||||
ModelSizeStats.TYPE.getPreferredName(),
|
|
||||||
ModelSnapshot.LATEST_RECORD_TIME.getPreferredName(),
|
ModelSnapshot.LATEST_RECORD_TIME.getPreferredName(),
|
||||||
ModelSnapshot.LATEST_RESULT_TIME.getPreferredName(),
|
ModelSnapshot.LATEST_RESULT_TIME.getPreferredName(),
|
||||||
|
|
||||||
Quantiles.QUANTILE_STATE.getPreferredName(),
|
Quantiles.QUANTILE_STATE.getPreferredName(),
|
||||||
|
|
||||||
|
Result.RESULT_TYPE.getPreferredName(),
|
||||||
|
|
||||||
Usage.INPUT_BYTES,
|
Usage.INPUT_BYTES,
|
||||||
Usage.INPUT_FIELD_COUNT,
|
Usage.INPUT_FIELD_COUNT,
|
||||||
Usage.INPUT_RECORD_COUNT,
|
Usage.INPUT_RECORD_COUNT,
|
||||||
Usage.TIMESTAMP,
|
Usage.TIMESTAMP,
|
||||||
Usage.TYPE,
|
|
||||||
|
|
||||||
|
|
||||||
JOB_ID_NAME,
|
JOB_ID_NAME,
|
||||||
ES_TIMESTAMP
|
ES_TIMESTAMP
|
||||||
|
|
|
@ -0,0 +1,20 @@
|
||||||
|
/*
|
||||||
|
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||||
|
* or more contributor license agreements. Licensed under the Elastic License;
|
||||||
|
* you may not use this file except in compliance with the Elastic License.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.xpack.prelert.job.results;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.ParseField;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Common attributes of the result types
|
||||||
|
*/
|
||||||
|
public class Result {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Serialisation fields
|
||||||
|
*/
|
||||||
|
public static final ParseField TYPE = new ParseField("result");
|
||||||
|
public static final ParseField RESULT_TYPE = new ParseField("result_type");
|
||||||
|
}
|
|
@ -31,6 +31,18 @@ import static org.hamcrest.Matchers.not;
|
||||||
|
|
||||||
public class PrelertJobIT extends ESRestTestCase {
|
public class PrelertJobIT extends ESRestTestCase {
|
||||||
|
|
||||||
|
private static final String RESULT_MAPPING = "{ \"mappings\": {\"result\": { \"properties\": { " +
|
||||||
|
"\"result_type\": { \"type\" : \"keyword\" }," +
|
||||||
|
"\"timestamp\": { \"type\" : \"date\" }, " +
|
||||||
|
"\"anomalyScore\": { \"type\" : \"double\" }, " +
|
||||||
|
"\"normalizedProbability\": { \"type\" : \"double\" }, " +
|
||||||
|
"\"overFieldValue\": { \"type\" : \"keyword\" }, " +
|
||||||
|
"\"partitionFieldValue\": { \"type\" : \"keyword\" }, " +
|
||||||
|
"\"byFieldValue\": { \"type\" : \"keyword\" }, " +
|
||||||
|
"\"fieldName\": { \"type\" : \"keyword\" }, " +
|
||||||
|
"\"function\": { \"type\" : \"keyword\" } " +
|
||||||
|
"} } } }";
|
||||||
|
|
||||||
public void testPutJob_GivenFarequoteConfig() throws Exception {
|
public void testPutJob_GivenFarequoteConfig() throws Exception {
|
||||||
Response response = createFarequoteJob();
|
Response response = createFarequoteJob();
|
||||||
|
|
||||||
|
@ -204,6 +216,31 @@ public class PrelertJobIT extends ESRestTestCase {
|
||||||
assertThat(responseAsString, not(isEmptyString()));
|
assertThat(responseAsString, not(isEmptyString()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetRecordResults() throws Exception {
|
||||||
|
Map<String, String> params = new HashMap<>();
|
||||||
|
params.put("start", "1200"); // inclusive
|
||||||
|
params.put("end", "1400"); // exclusive
|
||||||
|
|
||||||
|
ResponseException e = expectThrows(ResponseException.class,
|
||||||
|
() -> client().performRequest("get", PrelertPlugin.BASE_PATH + "results/1/records", params));
|
||||||
|
assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(404));
|
||||||
|
assertThat(e.getMessage(), containsString("No known job with id '1'"));
|
||||||
|
|
||||||
|
addRecordResult("1", "1234");
|
||||||
|
addRecordResult("1", "1235");
|
||||||
|
addRecordResult("1", "1236");
|
||||||
|
Response response = client().performRequest("get", PrelertPlugin.BASE_PATH + "results/1/records", params);
|
||||||
|
assertThat(response.getStatusLine().getStatusCode(), equalTo(200));
|
||||||
|
String responseAsString = responseEntityToString(response);
|
||||||
|
assertThat(responseAsString, containsString("\"count\":3"));
|
||||||
|
|
||||||
|
params.put("end", "1235");
|
||||||
|
response = client().performRequest("get", PrelertPlugin.BASE_PATH + "results/1/records", params);
|
||||||
|
assertThat(response.getStatusLine().getStatusCode(), equalTo(200));
|
||||||
|
responseAsString = responseEntityToString(response);
|
||||||
|
assertThat(responseAsString, containsString("\"count\":1"));
|
||||||
|
}
|
||||||
|
|
||||||
public void testPauseAndResumeJob() throws Exception {
|
public void testPauseAndResumeJob() throws Exception {
|
||||||
createFarequoteJob();
|
createFarequoteJob();
|
||||||
|
|
||||||
|
@ -251,17 +288,32 @@ public class PrelertJobIT extends ESRestTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
private Response addBucketResult(String jobId, String timestamp) throws Exception {
|
private Response addBucketResult(String jobId, String timestamp) throws Exception {
|
||||||
String createIndexBody = "{ \"mappings\": {\"bucket\": { \"properties\": { \"timestamp\": { \"type\" : \"date\" } } } } }";
|
|
||||||
try {
|
try {
|
||||||
client().performRequest("put", "prelertresults-" + jobId, Collections.emptyMap(), new StringEntity(createIndexBody));
|
client().performRequest("put", "prelertresults-" + jobId, Collections.emptyMap(), new StringEntity(RESULT_MAPPING));
|
||||||
} catch (ResponseException e) {
|
} catch (ResponseException e) {
|
||||||
// it is ok: the index already exists
|
// it is ok: the index already exists
|
||||||
assertThat(e.getMessage(), containsString("index_already_exists_exception"));
|
assertThat(e.getMessage(), containsString("index_already_exists_exception"));
|
||||||
assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400));
|
assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400));
|
||||||
}
|
}
|
||||||
|
|
||||||
String bucketResult = String.format(Locale.ROOT, "{\"jobId\":\"%s\", \"timestamp\": \"%s\"}", jobId, timestamp);
|
String bucketResult =
|
||||||
return client().performRequest("put", "prelertresults-" + jobId + "/bucket/" + timestamp,
|
String.format(Locale.ROOT, "{\"jobId\":\"%s\", \"timestamp\": \"%s\", \"result_type\":\"bucket\"}", jobId, timestamp);
|
||||||
|
return client().performRequest("put", "prelertresults-" + jobId + "/result/" + timestamp,
|
||||||
|
Collections.singletonMap("refresh", "true"), new StringEntity(bucketResult));
|
||||||
|
}
|
||||||
|
|
||||||
|
private Response addRecordResult(String jobId, String timestamp) throws Exception {
|
||||||
|
try {
|
||||||
|
client().performRequest("put", "prelertresults-" + jobId, Collections.emptyMap(), new StringEntity(RESULT_MAPPING));
|
||||||
|
} catch (ResponseException e) {
|
||||||
|
// it is ok: the index already exists
|
||||||
|
assertThat(e.getMessage(), containsString("index_already_exists_exception"));
|
||||||
|
assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400));
|
||||||
|
}
|
||||||
|
|
||||||
|
String bucketResult =
|
||||||
|
String.format(Locale.ROOT, "{\"jobId\":\"%s\", \"timestamp\": \"%s\", \"result_type\":\"record\"}", jobId, timestamp);
|
||||||
|
return client().performRequest("put", "prelertresults-" + jobId + "/result/" + timestamp,
|
||||||
Collections.singletonMap("refresh", "true"), new StringEntity(bucketResult));
|
Collections.singletonMap("refresh", "true"), new StringEntity(bucketResult));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -30,6 +30,7 @@ import org.elasticsearch.xpack.prelert.job.results.AnomalyRecord;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.Bucket;
|
import org.elasticsearch.xpack.prelert.job.results.Bucket;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.CategoryDefinition;
|
import org.elasticsearch.xpack.prelert.job.results.CategoryDefinition;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.Influencer;
|
import org.elasticsearch.xpack.prelert.job.results.Influencer;
|
||||||
|
import org.elasticsearch.xpack.prelert.job.results.Result;
|
||||||
import org.mockito.ArgumentCaptor;
|
import org.mockito.ArgumentCaptor;
|
||||||
import org.mockito.Captor;
|
import org.mockito.Captor;
|
||||||
|
|
||||||
|
@ -46,7 +47,6 @@ import java.util.concurrent.ExecutionException;
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
|
|
||||||
import static org.elasticsearch.xpack.prelert.job.JobTests.buildJobBuilder;
|
import static org.elasticsearch.xpack.prelert.job.JobTests.buildJobBuilder;
|
||||||
import static org.hamcrest.Matchers.empty;
|
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
import static org.hamcrest.Matchers.instanceOf;
|
import static org.hamcrest.Matchers.instanceOf;
|
||||||
import static org.mockito.Matchers.any;
|
import static org.mockito.Matchers.any;
|
||||||
|
@ -220,7 +220,7 @@ public class ElasticsearchJobProviderTests extends ESTestCase {
|
||||||
int size = 10;
|
int size = 10;
|
||||||
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
||||||
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
||||||
.prepareSearch("prelertresults-" + jobId, Bucket.TYPE.getPreferredName(), from, size, response, queryBuilder);
|
.prepareSearch("prelertresults-" + jobId, Result.TYPE.getPreferredName(), from, size, response, queryBuilder);
|
||||||
|
|
||||||
Client client = clientBuilder.build();
|
Client client = clientBuilder.build();
|
||||||
ElasticsearchJobProvider provider = createProvider(client);
|
ElasticsearchJobProvider provider = createProvider(client);
|
||||||
|
@ -253,7 +253,7 @@ public class ElasticsearchJobProviderTests extends ESTestCase {
|
||||||
int size = 17;
|
int size = 17;
|
||||||
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
||||||
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
||||||
.prepareSearch("prelertresults-" + jobId, Bucket.TYPE.getPreferredName(), from, size, response, queryBuilder);
|
.prepareSearch("prelertresults-" + jobId, Result.TYPE.getPreferredName(), from, size, response, queryBuilder);
|
||||||
|
|
||||||
Client client = clientBuilder.build();
|
Client client = clientBuilder.build();
|
||||||
ElasticsearchJobProvider provider = createProvider(client);
|
ElasticsearchJobProvider provider = createProvider(client);
|
||||||
|
@ -287,7 +287,7 @@ public class ElasticsearchJobProviderTests extends ESTestCase {
|
||||||
int size = 17;
|
int size = 17;
|
||||||
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
||||||
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
||||||
.prepareSearch("prelertresults-" + jobId, Bucket.TYPE.getPreferredName(), from, size, response, queryBuilder);
|
.prepareSearch("prelertresults-" + jobId, Result.TYPE.getPreferredName(), from, size, response, queryBuilder);
|
||||||
|
|
||||||
Client client = clientBuilder.build();
|
Client client = clientBuilder.build();
|
||||||
ElasticsearchJobProvider provider = createProvider(client);
|
ElasticsearchJobProvider provider = createProvider(client);
|
||||||
|
@ -323,7 +323,7 @@ public class ElasticsearchJobProviderTests extends ESTestCase {
|
||||||
SearchResponse response = createSearchResponse(false, source);
|
SearchResponse response = createSearchResponse(false, source);
|
||||||
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
||||||
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
||||||
.prepareSearch("prelertresults-" + jobId, Bucket.TYPE.getPreferredName(), 0, 0, response, queryBuilder);
|
.prepareSearch("prelertresults-" + jobId, Result.TYPE.getPreferredName(), 0, 0, response, queryBuilder);
|
||||||
|
|
||||||
Client client = clientBuilder.build();
|
Client client = clientBuilder.build();
|
||||||
ElasticsearchJobProvider provider = createProvider(client);
|
ElasticsearchJobProvider provider = createProvider(client);
|
||||||
|
@ -349,7 +349,7 @@ public class ElasticsearchJobProviderTests extends ESTestCase {
|
||||||
SearchResponse response = createSearchResponse(true, source);
|
SearchResponse response = createSearchResponse(true, source);
|
||||||
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
||||||
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
||||||
.prepareSearch("prelertresults-" + jobId, Bucket.TYPE.getPreferredName(), 0, 0, response, queryBuilder);
|
.prepareSearch("prelertresults-" + jobId, Result.TYPE.getPreferredName(), 0, 0, response, queryBuilder);
|
||||||
|
|
||||||
Client client = clientBuilder.build();
|
Client client = clientBuilder.build();
|
||||||
ElasticsearchJobProvider provider = createProvider(client);
|
ElasticsearchJobProvider provider = createProvider(client);
|
||||||
|
@ -378,7 +378,7 @@ public class ElasticsearchJobProviderTests extends ESTestCase {
|
||||||
SearchResponse response = createSearchResponse(true, source);
|
SearchResponse response = createSearchResponse(true, source);
|
||||||
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
||||||
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
||||||
.prepareSearch("prelertresults-" + jobId, Bucket.TYPE.getPreferredName(), 0, 0, response, queryBuilder);
|
.prepareSearch("prelertresults-" + jobId, Result.TYPE.getPreferredName(), 0, 0, response, queryBuilder);
|
||||||
|
|
||||||
Client client = clientBuilder.build();
|
Client client = clientBuilder.build();
|
||||||
ElasticsearchJobProvider provider = createProvider(client);
|
ElasticsearchJobProvider provider = createProvider(client);
|
||||||
|
@ -418,7 +418,7 @@ public class ElasticsearchJobProviderTests extends ESTestCase {
|
||||||
SearchResponse response = createSearchResponse(true, source);
|
SearchResponse response = createSearchResponse(true, source);
|
||||||
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
||||||
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
||||||
.prepareSearch("prelertresults-" + jobId, AnomalyRecord.TYPE.getPreferredName(), from, size, response, queryBuilder);
|
.prepareSearch("prelertresults-" + jobId, Result.TYPE.getPreferredName(), from, size, response, queryBuilder);
|
||||||
|
|
||||||
Client client = clientBuilder.build();
|
Client client = clientBuilder.build();
|
||||||
ElasticsearchJobProvider provider = createProvider(client);
|
ElasticsearchJobProvider provider = createProvider(client);
|
||||||
|
@ -468,7 +468,7 @@ public class ElasticsearchJobProviderTests extends ESTestCase {
|
||||||
SearchResponse response = createSearchResponse(true, source);
|
SearchResponse response = createSearchResponse(true, source);
|
||||||
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
||||||
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
||||||
.prepareSearch("prelertresults-" + jobId, AnomalyRecord.TYPE.getPreferredName(), from, size, response, queryBuilder);
|
.prepareSearch("prelertresults-" + jobId, Result.TYPE.getPreferredName(), from, size, response, queryBuilder);
|
||||||
|
|
||||||
Client client = clientBuilder.build();
|
Client client = clientBuilder.build();
|
||||||
ElasticsearchJobProvider provider = createProvider(client);
|
ElasticsearchJobProvider provider = createProvider(client);
|
||||||
|
@ -525,7 +525,7 @@ public class ElasticsearchJobProviderTests extends ESTestCase {
|
||||||
SearchResponse response = createSearchResponse(true, source);
|
SearchResponse response = createSearchResponse(true, source);
|
||||||
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
||||||
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
||||||
.prepareSearch("prelertresults-" + jobId, AnomalyRecord.TYPE.getPreferredName(), from, size, response, queryBuilder);
|
.prepareSearch("prelertresults-" + jobId, Result.TYPE.getPreferredName(), from, size, response, queryBuilder);
|
||||||
|
|
||||||
Client client = clientBuilder.build();
|
Client client = clientBuilder.build();
|
||||||
ElasticsearchJobProvider provider = createProvider(client);
|
ElasticsearchJobProvider provider = createProvider(client);
|
||||||
|
@ -564,7 +564,7 @@ public class ElasticsearchJobProviderTests extends ESTestCase {
|
||||||
SearchResponse response = createSearchResponse(true, source);
|
SearchResponse response = createSearchResponse(true, source);
|
||||||
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
||||||
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
||||||
.prepareSearchAnySize("prelertresults-" + jobId, AnomalyRecord.TYPE.getPreferredName(), response, queryBuilder);
|
.prepareSearchAnySize("prelertresults-" + jobId, Result.TYPE.getPreferredName(), response, queryBuilder);
|
||||||
|
|
||||||
Client client = clientBuilder.build();
|
Client client = clientBuilder.build();
|
||||||
ElasticsearchJobProvider provider = createProvider(client);
|
ElasticsearchJobProvider provider = createProvider(client);
|
||||||
|
@ -596,7 +596,7 @@ public class ElasticsearchJobProviderTests extends ESTestCase {
|
||||||
SearchResponse response = createSearchResponse(true, source);
|
SearchResponse response = createSearchResponse(true, source);
|
||||||
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
||||||
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
||||||
.prepareSearchAnySize("prelertresults-" + jobId, AnomalyRecord.TYPE.getPreferredName(), response, queryBuilder);
|
.prepareSearchAnySize("prelertresults-" + jobId, Result.TYPE.getPreferredName(), response, queryBuilder);
|
||||||
|
|
||||||
Client client = clientBuilder.build();
|
Client client = clientBuilder.build();
|
||||||
ElasticsearchJobProvider provider = createProvider(client);
|
ElasticsearchJobProvider provider = createProvider(client);
|
||||||
|
@ -691,7 +691,8 @@ public class ElasticsearchJobProviderTests extends ESTestCase {
|
||||||
SearchResponse response = createSearchResponse(true, source);
|
SearchResponse response = createSearchResponse(true, source);
|
||||||
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
||||||
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
||||||
.prepareSearch("prelertresults-" + jobId, Influencer.TYPE.getPreferredName(), from, size, response, queryBuilder);
|
.prepareSearch("prelertresults-" + jobId, Result.TYPE.getPreferredName(),
|
||||||
|
from, size, response, queryBuilder);
|
||||||
|
|
||||||
Client client = clientBuilder.build();
|
Client client = clientBuilder.build();
|
||||||
ElasticsearchJobProvider provider = createProvider(client);
|
ElasticsearchJobProvider provider = createProvider(client);
|
||||||
|
@ -751,7 +752,8 @@ public class ElasticsearchJobProviderTests extends ESTestCase {
|
||||||
SearchResponse response = createSearchResponse(true, source);
|
SearchResponse response = createSearchResponse(true, source);
|
||||||
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME).addClusterStatusYellowResponse()
|
||||||
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
.addIndicesExistsResponse(ElasticsearchJobProvider.PRELERT_USAGE_INDEX, true)
|
||||||
.prepareSearch("prelertresults-" + jobId, Influencer.TYPE.getPreferredName(), from, size, response, queryBuilder);
|
.prepareSearch("prelertresults-" + jobId, Result.TYPE.getPreferredName(), from, size, response,
|
||||||
|
queryBuilder);
|
||||||
|
|
||||||
Client client = clientBuilder.build();
|
Client client = clientBuilder.build();
|
||||||
ElasticsearchJobProvider provider = createProvider(client);
|
ElasticsearchJobProvider provider = createProvider(client);
|
||||||
|
|
|
@ -18,13 +18,10 @@ import org.elasticsearch.xpack.prelert.job.audit.AuditActivity;
|
||||||
import org.elasticsearch.xpack.prelert.job.audit.AuditMessage;
|
import org.elasticsearch.xpack.prelert.job.audit.AuditMessage;
|
||||||
import org.elasticsearch.xpack.prelert.job.metadata.Allocation;
|
import org.elasticsearch.xpack.prelert.job.metadata.Allocation;
|
||||||
import org.elasticsearch.xpack.prelert.job.quantiles.Quantiles;
|
import org.elasticsearch.xpack.prelert.job.quantiles.Quantiles;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.AnomalyRecord;
|
|
||||||
import org.elasticsearch.xpack.prelert.job.results.Bucket;
|
|
||||||
import org.elasticsearch.xpack.prelert.job.results.BucketInfluencer;
|
|
||||||
import org.elasticsearch.xpack.prelert.job.results.CategoryDefinition;
|
import org.elasticsearch.xpack.prelert.job.results.CategoryDefinition;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.Influencer;
|
|
||||||
import org.elasticsearch.xpack.prelert.job.results.ModelDebugOutput;
|
import org.elasticsearch.xpack.prelert.job.results.ModelDebugOutput;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.ReservedFieldNames;
|
import org.elasticsearch.xpack.prelert.job.results.ReservedFieldNames;
|
||||||
|
import org.elasticsearch.xpack.prelert.job.results.Result;
|
||||||
import org.elasticsearch.xpack.prelert.job.usage.Usage;
|
import org.elasticsearch.xpack.prelert.job.usage.Usage;
|
||||||
import org.elasticsearch.xpack.prelert.lists.ListDocument;
|
import org.elasticsearch.xpack.prelert.lists.ListDocument;
|
||||||
|
|
||||||
|
@ -38,6 +35,7 @@ import java.io.ByteArrayInputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.lang.reflect.InvocationTargetException;
|
import java.lang.reflect.InvocationTargetException;
|
||||||
import java.nio.charset.StandardCharsets;
|
import java.nio.charset.StandardCharsets;
|
||||||
|
import java.util.Collections;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
|
@ -48,15 +46,15 @@ public class ElasticsearchMappingsTests extends ESTestCase {
|
||||||
JsonToken token = parser.nextToken();
|
JsonToken token = parser.nextToken();
|
||||||
while (token != null && token != JsonToken.END_OBJECT) {
|
while (token != null && token != JsonToken.END_OBJECT) {
|
||||||
switch (token) {
|
switch (token) {
|
||||||
case START_OBJECT:
|
case START_OBJECT:
|
||||||
parseJson(parser, expected);
|
parseJson(parser, expected);
|
||||||
break;
|
break;
|
||||||
case FIELD_NAME:
|
case FIELD_NAME:
|
||||||
String fieldName = parser.getCurrentName();
|
String fieldName = parser.getCurrentName();
|
||||||
expected.add(fieldName);
|
expected.add(fieldName);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
token = parser.nextToken();
|
token = parser.nextToken();
|
||||||
}
|
}
|
||||||
|
@ -85,16 +83,13 @@ public class ElasticsearchMappingsTests extends ESTestCase {
|
||||||
overridden.add(ElasticsearchMappings.WHITESPACE);
|
overridden.add(ElasticsearchMappings.WHITESPACE);
|
||||||
|
|
||||||
// These are not reserved because they're data types, not field names
|
// These are not reserved because they're data types, not field names
|
||||||
overridden.add(AnomalyRecord.TYPE.getPreferredName());
|
overridden.add(Result.TYPE.getPreferredName());
|
||||||
overridden.add(AuditActivity.TYPE.getPreferredName());
|
overridden.add(AuditActivity.TYPE.getPreferredName());
|
||||||
overridden.add(AuditMessage.TYPE.getPreferredName());
|
overridden.add(AuditMessage.TYPE.getPreferredName());
|
||||||
overridden.add(Bucket.TYPE.getPreferredName());
|
|
||||||
overridden.add(DataCounts.TYPE.getPreferredName());
|
overridden.add(DataCounts.TYPE.getPreferredName());
|
||||||
overridden.add(ReservedFieldNames.BUCKET_PROCESSING_TIME_TYPE);
|
overridden.add(ReservedFieldNames.BUCKET_PROCESSING_TIME_TYPE);
|
||||||
overridden.add(BucketInfluencer.TYPE.getPreferredName());
|
|
||||||
overridden.add(CategorizerState.TYPE);
|
overridden.add(CategorizerState.TYPE);
|
||||||
overridden.add(CategoryDefinition.TYPE.getPreferredName());
|
overridden.add(CategoryDefinition.TYPE.getPreferredName());
|
||||||
overridden.add(Influencer.TYPE.getPreferredName());
|
|
||||||
overridden.add(Job.TYPE);
|
overridden.add(Job.TYPE);
|
||||||
overridden.add(ListDocument.TYPE.getPreferredName());
|
overridden.add(ListDocument.TYPE.getPreferredName());
|
||||||
overridden.add(ModelDebugOutput.TYPE.getPreferredName());
|
overridden.add(ModelDebugOutput.TYPE.getPreferredName());
|
||||||
|
@ -129,12 +124,7 @@ public class ElasticsearchMappingsTests extends ESTestCase {
|
||||||
parser = new JsonFactory().createParser(inputStream);
|
parser = new JsonFactory().createParser(inputStream);
|
||||||
parseJson(parser, expected);
|
parseJson(parser, expected);
|
||||||
|
|
||||||
builder = ElasticsearchMappings.bucketInfluencerMapping();
|
builder = ElasticsearchMappings.resultsMapping(Collections.emptyList());
|
||||||
inputStream = new BufferedInputStream(new ByteArrayInputStream(builder.string().getBytes(StandardCharsets.UTF_8)));
|
|
||||||
parser = new JsonFactory().createParser(inputStream);
|
|
||||||
parseJson(parser, expected);
|
|
||||||
|
|
||||||
builder = ElasticsearchMappings.bucketMapping();
|
|
||||||
inputStream = new BufferedInputStream(new ByteArrayInputStream(builder.string().getBytes(StandardCharsets.UTF_8)));
|
inputStream = new BufferedInputStream(new ByteArrayInputStream(builder.string().getBytes(StandardCharsets.UTF_8)));
|
||||||
parser = new JsonFactory().createParser(inputStream);
|
parser = new JsonFactory().createParser(inputStream);
|
||||||
parseJson(parser, expected);
|
parseJson(parser, expected);
|
||||||
|
@ -159,11 +149,6 @@ public class ElasticsearchMappingsTests extends ESTestCase {
|
||||||
parser = new JsonFactory().createParser(inputStream);
|
parser = new JsonFactory().createParser(inputStream);
|
||||||
parseJson(parser, expected);
|
parseJson(parser, expected);
|
||||||
|
|
||||||
builder = ElasticsearchMappings.influencerMapping(null);
|
|
||||||
inputStream = new BufferedInputStream(new ByteArrayInputStream(builder.string().getBytes(StandardCharsets.UTF_8)));
|
|
||||||
parser = new JsonFactory().createParser(inputStream);
|
|
||||||
parseJson(parser, expected);
|
|
||||||
|
|
||||||
builder = ElasticsearchMappings.modelDebugOutputMapping(null);
|
builder = ElasticsearchMappings.modelDebugOutputMapping(null);
|
||||||
inputStream = new BufferedInputStream(new ByteArrayInputStream(builder.string().getBytes(StandardCharsets.UTF_8)));
|
inputStream = new BufferedInputStream(new ByteArrayInputStream(builder.string().getBytes(StandardCharsets.UTF_8)));
|
||||||
parser = new JsonFactory().createParser(inputStream);
|
parser = new JsonFactory().createParser(inputStream);
|
||||||
|
@ -194,17 +179,24 @@ public class ElasticsearchMappingsTests extends ESTestCase {
|
||||||
parser = new JsonFactory().createParser(inputStream);
|
parser = new JsonFactory().createParser(inputStream);
|
||||||
parseJson(parser, expected);
|
parseJson(parser, expected);
|
||||||
|
|
||||||
builder = ElasticsearchMappings.recordMapping(null);
|
|
||||||
inputStream = new BufferedInputStream(new ByteArrayInputStream(builder.string().getBytes(StandardCharsets.UTF_8)));
|
|
||||||
parser = new JsonFactory().createParser(inputStream);
|
|
||||||
parseJson(parser, expected);
|
|
||||||
|
|
||||||
builder = ElasticsearchMappings.usageMapping();
|
builder = ElasticsearchMappings.usageMapping();
|
||||||
inputStream = new BufferedInputStream(new ByteArrayInputStream(builder.string().getBytes(StandardCharsets.UTF_8)));
|
inputStream = new BufferedInputStream(new ByteArrayInputStream(builder.string().getBytes(StandardCharsets.UTF_8)));
|
||||||
parser = new JsonFactory().createParser(inputStream);
|
parser = new JsonFactory().createParser(inputStream);
|
||||||
parseJson(parser, expected);
|
parseJson(parser, expected);
|
||||||
|
|
||||||
expected.removeAll(overridden);
|
expected.removeAll(overridden);
|
||||||
|
|
||||||
|
if (ReservedFieldNames.RESERVED_FIELD_NAMES.size() != expected.size()) {
|
||||||
|
Set<String> diff = new HashSet<>(ReservedFieldNames.RESERVED_FIELD_NAMES);
|
||||||
|
diff.removeAll(expected);
|
||||||
|
System.out.println("Fields in ReservedFieldNames but not in expected: " + diff);
|
||||||
|
|
||||||
|
diff = new HashSet<>(expected);
|
||||||
|
diff.removeAll(ReservedFieldNames.RESERVED_FIELD_NAMES);
|
||||||
|
System.out.println("Fields in expected but not in ReservedFieldNames: " + diff);
|
||||||
|
}
|
||||||
|
assertEquals(ReservedFieldNames.RESERVED_FIELD_NAMES.size(), expected.size());
|
||||||
|
|
||||||
for (String s : expected) {
|
for (String s : expected) {
|
||||||
// By comparing like this the failure messages say which string is
|
// By comparing like this the failure messages say which string is
|
||||||
// missing
|
// missing
|
||||||
|
|
|
@ -10,11 +10,13 @@ import org.elasticsearch.client.Client;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
import org.elasticsearch.xpack.prelert.job.results.Result;
|
||||||
|
import org.mockito.ArgumentCaptor;
|
||||||
|
|
||||||
import org.elasticsearch.xpack.prelert.job.results.AnomalyRecord;
|
import org.elasticsearch.xpack.prelert.job.results.AnomalyRecord;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.Bucket;
|
import org.elasticsearch.xpack.prelert.job.results.Bucket;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.BucketInfluencer;
|
import org.elasticsearch.xpack.prelert.job.results.BucketInfluencer;
|
||||||
import org.elasticsearch.xpack.prelert.job.results.Influencer;
|
import org.elasticsearch.xpack.prelert.job.results.Influencer;
|
||||||
import org.mockito.ArgumentCaptor;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
@ -35,8 +37,8 @@ public class JobResultsPersisterTests extends ESTestCase {
|
||||||
BulkResponse response = mock(BulkResponse.class);
|
BulkResponse response = mock(BulkResponse.class);
|
||||||
String responseId = "abcXZY54321";
|
String responseId = "abcXZY54321";
|
||||||
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME)
|
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME)
|
||||||
.prepareIndex("prelertresults-" + JOB_ID, Bucket.TYPE.getPreferredName(), responseId, captor)
|
.prepareIndex("prelertresults-" + JOB_ID, Result.TYPE.getPreferredName(), responseId, captor)
|
||||||
.prepareIndex("prelertresults-" + JOB_ID, BucketInfluencer.TYPE.getPreferredName(), "", captor)
|
.prepareIndex("prelertresults-" + JOB_ID, Result.TYPE.getPreferredName(), "", captor)
|
||||||
.prepareBulk(response);
|
.prepareBulk(response);
|
||||||
|
|
||||||
Client client = clientBuilder.build();
|
Client client = clientBuilder.build();
|
||||||
|
@ -91,7 +93,7 @@ public class JobResultsPersisterTests extends ESTestCase {
|
||||||
ArgumentCaptor<XContentBuilder> captor = ArgumentCaptor.forClass(XContentBuilder.class);
|
ArgumentCaptor<XContentBuilder> captor = ArgumentCaptor.forClass(XContentBuilder.class);
|
||||||
BulkResponse response = mock(BulkResponse.class);
|
BulkResponse response = mock(BulkResponse.class);
|
||||||
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME)
|
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME)
|
||||||
.prepareIndex("prelertresults-" + JOB_ID, AnomalyRecord.TYPE.getPreferredName(), "", captor)
|
.prepareIndex("prelertresults-" + JOB_ID, Result.TYPE.getPreferredName(), "", captor)
|
||||||
.prepareBulk(response);
|
.prepareBulk(response);
|
||||||
Client client = clientBuilder.build();
|
Client client = clientBuilder.build();
|
||||||
|
|
||||||
|
@ -153,7 +155,7 @@ public class JobResultsPersisterTests extends ESTestCase {
|
||||||
ArgumentCaptor<XContentBuilder> captor = ArgumentCaptor.forClass(XContentBuilder.class);
|
ArgumentCaptor<XContentBuilder> captor = ArgumentCaptor.forClass(XContentBuilder.class);
|
||||||
BulkResponse response = mock(BulkResponse.class);
|
BulkResponse response = mock(BulkResponse.class);
|
||||||
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME)
|
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME)
|
||||||
.prepareIndex("prelertresults-" + JOB_ID, Influencer.TYPE.getPreferredName(), "", captor)
|
.prepareIndex("prelertresults-" + JOB_ID, Result.TYPE.getPreferredName(), "", captor)
|
||||||
.prepareBulk(response);
|
.prepareBulk(response);
|
||||||
Client client = clientBuilder.build();
|
Client client = clientBuilder.build();
|
||||||
|
|
||||||
|
|
|
@ -4,16 +4,21 @@ setup:
|
||||||
index: prelertresults-farequote
|
index: prelertresults-farequote
|
||||||
body:
|
body:
|
||||||
mappings:
|
mappings:
|
||||||
bucket:
|
results:
|
||||||
properties:
|
properties:
|
||||||
|
"jobId":
|
||||||
|
type: keyword
|
||||||
|
"result_type":
|
||||||
|
type: keyword
|
||||||
"timestamp":
|
"timestamp":
|
||||||
type: date
|
type: date
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
index:
|
index:
|
||||||
index: prelertresults-farequote
|
index: prelertresults-farequote
|
||||||
type: bucket
|
type: result
|
||||||
id: 1
|
id: 1
|
||||||
body: { "jobId": "farequote", "timestamp": "2016-06-01T00:00:00Z" }
|
body: { "jobId": "farequote", "result_type": "bucket", "timestamp": "2016-06-01T00:00:00Z" }
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
indices.refresh:
|
indices.refresh:
|
||||||
|
@ -30,9 +35,10 @@ setup:
|
||||||
- match: { count: 1 }
|
- match: { count: 1 }
|
||||||
- match: { buckets.0.timestamp: 1464739200000 }
|
- match: { buckets.0.timestamp: 1464739200000 }
|
||||||
- match: { buckets.0.jobId: farequote}
|
- match: { buckets.0.jobId: farequote}
|
||||||
|
- match: { buckets.0.result_type: bucket}
|
||||||
|
|
||||||
---
|
---
|
||||||
"Test result bucket api":
|
"Test result single bucket api":
|
||||||
- do:
|
- do:
|
||||||
xpack.prelert.get_buckets:
|
xpack.prelert.get_buckets:
|
||||||
jobId: "farequote"
|
jobId: "farequote"
|
||||||
|
@ -40,3 +46,4 @@ setup:
|
||||||
|
|
||||||
- match: { buckets.0.timestamp: 1464739200000}
|
- match: { buckets.0.timestamp: 1464739200000}
|
||||||
- match: { buckets.0.jobId: farequote }
|
- match: { buckets.0.jobId: farequote }
|
||||||
|
- match: { buckets.0.result_type: bucket}
|
||||||
|
|
|
@ -4,7 +4,7 @@ setup:
|
||||||
index: prelertresults-farequote
|
index: prelertresults-farequote
|
||||||
body:
|
body:
|
||||||
mappings:
|
mappings:
|
||||||
influencer:
|
result:
|
||||||
properties:
|
properties:
|
||||||
"jobId":
|
"jobId":
|
||||||
type: keyword
|
type: keyword
|
||||||
|
@ -12,17 +12,20 @@ setup:
|
||||||
type: date
|
type: date
|
||||||
"anomalyScore":
|
"anomalyScore":
|
||||||
type: float
|
type: float
|
||||||
|
"result_type":
|
||||||
|
type: keyword
|
||||||
- do:
|
- do:
|
||||||
index:
|
index:
|
||||||
index: prelertresults-farequote
|
index: prelertresults-farequote
|
||||||
type: influencer
|
type: result
|
||||||
id: 1
|
id: 1
|
||||||
body:
|
body:
|
||||||
{
|
{
|
||||||
"jobId": "farequote",
|
"jobId": "farequote",
|
||||||
"timestamp": "2016-06-01T00:00:00Z",
|
"timestamp": "2016-06-01T00:00:00Z",
|
||||||
"influencerFieldName": "foo",
|
"influencerFieldName": "foo",
|
||||||
"influencerFieldValue": "bar"
|
"influencerFieldValue": "bar",
|
||||||
|
"result_type" : "influencer"
|
||||||
}
|
}
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
|
|
|
@ -30,9 +30,9 @@ setup:
|
||||||
- do:
|
- do:
|
||||||
index:
|
index:
|
||||||
index: prelertresults-farequote
|
index: prelertresults-farequote
|
||||||
type: record
|
type: result
|
||||||
id: 2
|
id: 2
|
||||||
body: { "jobId": "farequote", "timestamp": "2016-06-01T00:00:00Z", "result_type": "record" }
|
body: { "jobId": "farequote", "result_type": "record", "timestamp": "2016-06-01T00:00:00Z", "result_type": "record" }
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
indices.refresh:
|
indices.refresh:
|
||||||
|
|
|
@ -50,23 +50,23 @@ setup:
|
||||||
- do:
|
- do:
|
||||||
index:
|
index:
|
||||||
index: prelertresults-foo
|
index: prelertresults-foo
|
||||||
type: bucket
|
type: result
|
||||||
id: 1
|
id: 1
|
||||||
body: { "jobId": "foo", "timestamp": "2016-06-02T00:00:00Z" }
|
body: { "jobId": "foo", "result_type": "bucket", "timestamp": "2016-06-02T00:00:00Z" }
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
index:
|
index:
|
||||||
index: prelertresults-foo
|
index: prelertresults-foo
|
||||||
type: bucket
|
type: result
|
||||||
id: 2
|
id: 2
|
||||||
body: { "jobId": "foo", "timestamp": "2016-06-01T12:00:00Z" }
|
body: { "jobId": "foo", "result_type": "bucket", "timestamp": "2016-06-01T12:00:00Z" }
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
index:
|
index:
|
||||||
index: prelertresults-foo
|
index: prelertresults-foo
|
||||||
type: bucket
|
type: result
|
||||||
id: 3
|
id: 3
|
||||||
body: { "jobId": "foo", "timestamp": "2016-05-01T00:00:00Z" }
|
body: { "jobId": "foo", "result_type": "bucket", "timestamp": "2016-05-01T00:00:00Z" }
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
indices.refresh:
|
indices.refresh:
|
||||||
|
|
Loading…
Reference in New Issue