Store input fields for anomaly records and influencers (elastic/elasticsearch#799)

* Store input fields for anomaly records and influencers

* Address review comments

* Remove DotNotationReverser

* Remove duplicated constants

* Can’t use the same date for all records as they will have equivalent Ids

Original commit: elastic/x-pack-elasticsearch@40796b5efc
This commit is contained in:
David Kyle 2017-01-30 13:43:15 +00:00
parent 79d1a10a86
commit 4eab74ce29
11 changed files with 204 additions and 233 deletions

View File

@ -1,137 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ml.job.persistence;
import java.util.Locale;
import java.util.Map;
import java.util.TreeMap;
import java.util.regex.Pattern;
import org.elasticsearch.xpack.ml.job.results.ReservedFieldNames;
/**
* Interprets field names containing dots as nested JSON structures.
* This matches what Elasticsearch does.
*/
class ElasticsearchDotNotationReverser {
private static final char DOT = '.';
private static final Pattern DOT_PATTERN = Pattern.compile("\\.");
private final Map<String, Object> resultsMap;
public ElasticsearchDotNotationReverser() {
resultsMap = new TreeMap<>();
}
// TODO - could handle values of all types Elasticsearch does, e.g. date,
// long, int, double, etc. However, at the moment field values in our
// results are only strings, so it's not "minimum viable product" right
// now. Hence this method only takes fieldValue as a String and there are
// no overloads.
/**
* Given a field name and value, convert it to a map representation of the
* (potentially nested) JSON structure Elasticsearch would use to store it.
* For example:
* <code>foo = x</code> goes to <code>{ "foo" : "x" }</code> and
* <code>foo.bar = y</code> goes to <code>{ "foo" : { "bar" : "y" } }</code>
*/
@SuppressWarnings("unchecked")
public void add(String fieldName, String fieldValue) {
if (fieldName == null || fieldValue == null) {
return;
}
// Minimise processing in the simple case of no dots in the field name.
if (fieldName.indexOf(DOT) == -1) {
if (ReservedFieldNames.RESERVED_FIELD_NAMES.contains(fieldName)) {
return;
}
resultsMap.put(fieldName, fieldValue);
return;
}
String[] segments = DOT_PATTERN.split(fieldName);
// If any segment created by the split is a reserved word then ignore
// the whole field.
for (String segment : segments) {
if (ReservedFieldNames.RESERVED_FIELD_NAMES.contains(segment)) {
return;
}
}
Map<String, Object> layerMap = resultsMap;
for (int i = 0; i < segments.length; ++i) {
String segment = segments[i];
if (i == segments.length - 1) {
layerMap.put(segment, fieldValue);
} else {
Object existingLayerValue = layerMap.get(segment);
if (existingLayerValue == null) {
Map<String, Object> nextLayerMap = new TreeMap<>();
layerMap.put(segment, nextLayerMap);
layerMap = nextLayerMap;
} else {
if (existingLayerValue instanceof Map) {
layerMap = (Map<String, Object>) existingLayerValue;
} else {
// This implies an inconsistency - different additions
// imply the same path leads to both an object and a
// value. For example:
// foo.bar = x
// foo.bar.baz = y
return;
}
}
}
}
}
public Map<String, Object> getResultsMap() {
return resultsMap;
}
/**
* Mappings for a given hierarchical structure are more complex than the
* basic results.
*/
public Map<String, Object> getMappingsMap() {
Map<String, Object> mappingsMap = new TreeMap<>();
recurseMappingsLevel(resultsMap, mappingsMap);
return mappingsMap;
}
@SuppressWarnings("unchecked")
private void recurseMappingsLevel(Map<String, Object> resultsMap, Map<String, Object> mappingsMap) {
for (Map.Entry<String, Object> entry : resultsMap.entrySet()) {
Map<String, Object> typeMap = new TreeMap<>();
String name = entry.getKey();
Object value = entry.getValue();
if (value instanceof Map) {
Map<String, Object> propertiesMap = new TreeMap<>();
recurseMappingsLevel((Map<String, Object>) value, propertiesMap);
typeMap.put(ElasticsearchMappings.TYPE, ElasticsearchMappings.OBJECT);
typeMap.put(ElasticsearchMappings.PROPERTIES, propertiesMap);
mappingsMap.put(name, typeMap);
} else {
String fieldType = value.getClass().getSimpleName().toLowerCase(Locale.ROOT);
if ("string".equals(fieldType)) {
fieldType = "keyword";
}
typeMap.put(ElasticsearchMappings.TYPE,
// Even though the add() method currently only supports
// strings, this way of getting the type would work for
// many Elasticsearch types, e.g. date, int, long,
// double and boolean
fieldType);
mappingsMap.put(name, typeMap);
}
}
}
}

View File

@ -12,6 +12,7 @@ import org.elasticsearch.xpack.ml.job.config.Job;
import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelSizeStats; import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelSizeStats;
import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelSnapshot;
import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelState; import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelState;
import org.elasticsearch.xpack.ml.job.results.ReservedFieldNames;
import org.elasticsearch.xpack.ml.notifications.AuditActivity; import org.elasticsearch.xpack.ml.notifications.AuditActivity;
import org.elasticsearch.xpack.ml.notifications.AuditMessage; import org.elasticsearch.xpack.ml.notifications.AuditMessage;
import org.elasticsearch.xpack.ml.job.process.autodetect.state.Quantiles; import org.elasticsearch.xpack.ml.job.process.autodetect.state.Quantiles;
@ -226,13 +227,9 @@ public class ElasticsearchMappings {
addInfluencerFieldsToMapping(builder); addInfluencerFieldsToMapping(builder);
addModelSizeStatsFieldsToMapping(builder); addModelSizeStatsFieldsToMapping(builder);
if (termFieldNames != null) { for (String fieldName : termFieldNames) {
ElasticsearchDotNotationReverser reverser = new ElasticsearchDotNotationReverser(); if (ReservedFieldNames.isValidFieldName(fieldName)) {
for (String fieldName : termFieldNames) { builder.startObject(fieldName).field(TYPE, KEYWORD).endObject();
reverser.add(fieldName, "");
}
for (Map.Entry<String, Object> entry : reverser.getMappingsMap().entrySet()) {
builder.field(entry.getKey(), entry.getValue());
} }
} }

View File

@ -7,6 +7,7 @@ package org.elasticsearch.xpack.ml.job.results;
import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.io.stream.Writeable;
@ -21,8 +22,12 @@ import org.elasticsearch.xpack.ml.utils.time.TimeUtils;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Date; import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.Objects; import java.util.Objects;
import java.util.Set;
/** /**
* Anomaly Record POJO. * Anomaly Record POJO.
@ -78,7 +83,8 @@ public class AnomalyRecord extends ToXContentToBytes implements Writeable {
public static final ParseField INITIAL_NORMALIZED_PROBABILITY = new ParseField("initial_normalized_probability"); public static final ParseField INITIAL_NORMALIZED_PROBABILITY = new ParseField("initial_normalized_probability");
public static final ConstructingObjectParser<AnomalyRecord, Void> PARSER = public static final ConstructingObjectParser<AnomalyRecord, Void> PARSER =
new ConstructingObjectParser<>(RESULT_TYPE_VALUE, a -> new AnomalyRecord((String) a[0], (Date) a[1], (long) a[2], (int) a[3])); new ConstructingObjectParser<>(RESULT_TYPE_VALUE, true,
a -> new AnomalyRecord((String) a[0], (Date) a[1], (long) a[2], (int) a[3]));
static { static {
PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID);
@ -144,7 +150,7 @@ public class AnomalyRecord extends ToXContentToBytes implements Writeable {
private final Date timestamp; private final Date timestamp;
private final long bucketSpan; private final long bucketSpan;
private List<Influence> influencers; private List<Influence> influences;
public AnomalyRecord(String jobId, Date timestamp, long bucketSpan, int sequenceNum) { public AnomalyRecord(String jobId, Date timestamp, long bucketSpan, int sequenceNum) {
this.jobId = jobId; this.jobId = jobId;
@ -185,7 +191,7 @@ public class AnomalyRecord extends ToXContentToBytes implements Writeable {
timestamp = new Date(in.readLong()); timestamp = new Date(in.readLong());
bucketSpan = in.readLong(); bucketSpan = in.readLong();
if (in.readBoolean()) { if (in.readBoolean()) {
influencers = in.readList(Influence::new); influences = in.readList(Influence::new);
} }
} }
@ -226,10 +232,10 @@ public class AnomalyRecord extends ToXContentToBytes implements Writeable {
out.writeDouble(initialNormalizedProbability); out.writeDouble(initialNormalizedProbability);
out.writeLong(timestamp.getTime()); out.writeLong(timestamp.getTime());
out.writeLong(bucketSpan); out.writeLong(bucketSpan);
boolean hasInfluencers = influencers != null; boolean hasInfluencers = influences != null;
out.writeBoolean(hasInfluencers); out.writeBoolean(hasInfluencers);
if (hasInfluencers) { if (hasInfluencers) {
out.writeList(influencers); out.writeList(influences);
} }
} }
@ -286,13 +292,45 @@ public class AnomalyRecord extends ToXContentToBytes implements Writeable {
if (causes != null) { if (causes != null) {
builder.field(CAUSES.getPreferredName(), causes); builder.field(CAUSES.getPreferredName(), causes);
} }
if (influencers != null) { if (influences != null) {
builder.field(INFLUENCERS.getPreferredName(), influencers); builder.field(INFLUENCERS.getPreferredName(), influences);
} }
Map<String, Set<String>> inputFields = inputFieldMap();
for (String fieldName : inputFields.keySet()) {
builder.field(fieldName, inputFields.get(fieldName));
}
builder.endObject(); builder.endObject();
return builder; return builder;
} }
private Map<String, Set<String>> inputFieldMap() {
Map<String, Set<String>> result = new HashMap<>();
addInputFieldsToMap(result, byFieldName, byFieldValue);
addInputFieldsToMap(result, overFieldName, overFieldValue);
addInputFieldsToMap(result, partitionFieldName, partitionFieldValue);
if (influences != null) {
for (Influence inf : influences) {
String fieldName = inf.getInfluencerFieldName();
for (String fieldValue : inf.getInfluencerFieldValues()) {
addInputFieldsToMap(result, fieldName, fieldValue);
}
}
}
return result;
}
private void addInputFieldsToMap(Map<String, Set<String>> inputFields, String fieldName, String fieldValue) {
if (!Strings.isNullOrEmpty(fieldName) && fieldValue != null) {
if (ReservedFieldNames.isValidFieldName(fieldName)) {
inputFields.computeIfAbsent(fieldName, k -> new HashSet<String>()).add(fieldValue);
}
}
}
public String getJobId() { public String getJobId() {
return this.jobId; return this.jobId;
} }
@ -475,11 +513,11 @@ public class AnomalyRecord extends ToXContentToBytes implements Writeable {
} }
public List<Influence> getInfluencers() { public List<Influence> getInfluencers() {
return influencers; return influences;
} }
public void setInfluencers(List<Influence> influencers) { public void setInfluencers(List<Influence> influencers) {
this.influencers = influencers; this.influences = influencers;
} }
@ -489,7 +527,7 @@ public class AnomalyRecord extends ToXContentToBytes implements Writeable {
normalizedProbability, initialNormalizedProbability, typical, actual, normalizedProbability, initialNormalizedProbability, typical, actual,
function, functionDescription, fieldName, byFieldName, byFieldValue, correlatedByFieldValue, function, functionDescription, fieldName, byFieldName, byFieldValue, correlatedByFieldValue,
partitionFieldName, partitionFieldValue, overFieldName, overFieldValue, partitionFieldName, partitionFieldValue, overFieldName, overFieldValue,
timestamp, isInterim, causes, influencers, jobId); timestamp, isInterim, causes, influences, jobId);
} }
@ -528,6 +566,6 @@ public class AnomalyRecord extends ToXContentToBytes implements Writeable {
&& Objects.equals(this.timestamp, that.timestamp) && Objects.equals(this.timestamp, that.timestamp)
&& Objects.equals(this.isInterim, that.isInterim) && Objects.equals(this.isInterim, that.isInterim)
&& Objects.equals(this.causes, that.causes) && Objects.equals(this.causes, that.causes)
&& Objects.equals(this.influencers, that.influencers); && Objects.equals(this.influences, that.influences);
} }
} }

View File

@ -45,7 +45,7 @@ public class Influencer extends ToXContentToBytes implements Writeable {
public static final ParseField RESULTS_FIELD = new ParseField("influencers"); public static final ParseField RESULTS_FIELD = new ParseField("influencers");
public static final ConstructingObjectParser<Influencer, Void> PARSER = new ConstructingObjectParser<>( public static final ConstructingObjectParser<Influencer, Void> PARSER = new ConstructingObjectParser<>(
RESULT_TYPE_FIELD.getPreferredName(), a -> new Influencer((String) a[0], (String) a[1], (String) a[2], RESULT_TYPE_FIELD.getPreferredName(), true, a -> new Influencer((String) a[0], (String) a[1], (String) a[2],
(Date) a[3], (long) a[4], (int) a[5])); (Date) a[3], (long) a[4], (int) a[5]));
static { static {
@ -123,6 +123,9 @@ public class Influencer extends ToXContentToBytes implements Writeable {
builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE); builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE);
builder.field(INFLUENCER_FIELD_NAME.getPreferredName(), influenceField); builder.field(INFLUENCER_FIELD_NAME.getPreferredName(), influenceField);
builder.field(INFLUENCER_FIELD_VALUE.getPreferredName(), influenceValue); builder.field(INFLUENCER_FIELD_VALUE.getPreferredName(), influenceValue);
if (ReservedFieldNames.isValidFieldName(influenceField)) {
builder.field(influenceField, influenceValue);
}
builder.field(ANOMALY_SCORE.getPreferredName(), anomalyScore); builder.field(ANOMALY_SCORE.getPreferredName(), anomalyScore);
builder.field(INITIAL_ANOMALY_SCORE.getPreferredName(), initialAnomalyScore); builder.field(INITIAL_ANOMALY_SCORE.getPreferredName(), initialAnomalyScore);
builder.field(PROBABILITY.getPreferredName(), probability); builder.field(PROBABILITY.getPreferredName(), probability);

View File

@ -15,6 +15,7 @@ import org.elasticsearch.xpack.ml.job.process.autodetect.state.Quantiles;
import java.util.Arrays; import java.util.Arrays;
import java.util.HashSet; import java.util.HashSet;
import java.util.Set; import java.util.Set;
import java.util.regex.Pattern;
/** /**
@ -25,15 +26,7 @@ import java.util.Set;
* contains raw data and in others it contains some aspect of our output. * contains raw data and in others it contains some aspect of our output.
*/ */
public final class ReservedFieldNames { public final class ReservedFieldNames {
/** private static final Pattern DOT_PATTERN = Pattern.compile("\\.");
* jobId isn't in this package, so redefine.
*/
private static final String JOB_ID_NAME = Job.ID.getPreferredName();
/**
* @timestamp isn't in this package, so redefine.
*/
private static final String ES_TIMESTAMP = "timestamp";
/** /**
* This array should be updated to contain all the field names that appear * This array should be updated to contain all the field names that appear
@ -45,6 +38,8 @@ public final class ReservedFieldNames {
private static final String[] RESERVED_FIELD_NAME_ARRAY = { private static final String[] RESERVED_FIELD_NAME_ARRAY = {
ElasticsearchMappings.ALL_FIELD_VALUES, ElasticsearchMappings.ALL_FIELD_VALUES,
Job.ID.getPreferredName(),
AnomalyCause.PROBABILITY.getPreferredName(), AnomalyCause.PROBABILITY.getPreferredName(),
AnomalyCause.OVER_FIELD_NAME.getPreferredName(), AnomalyCause.OVER_FIELD_NAME.getPreferredName(),
AnomalyCause.OVER_FIELD_VALUE.getPreferredName(), AnomalyCause.OVER_FIELD_VALUE.getPreferredName(),
@ -93,6 +88,7 @@ public final class ReservedFieldNames {
Bucket.INITIAL_ANOMALY_SCORE.getPreferredName(), Bucket.INITIAL_ANOMALY_SCORE.getPreferredName(),
Bucket.PROCESSING_TIME_MS.getPreferredName(), Bucket.PROCESSING_TIME_MS.getPreferredName(),
Bucket.PARTITION_SCORES.getPreferredName(), Bucket.PARTITION_SCORES.getPreferredName(),
Bucket.TIMESTAMP.getPreferredName(),
BucketInfluencer.INITIAL_ANOMALY_SCORE.getPreferredName(), BucketInfluencer.ANOMALY_SCORE.getPreferredName(), BucketInfluencer.INITIAL_ANOMALY_SCORE.getPreferredName(), BucketInfluencer.ANOMALY_SCORE.getPreferredName(),
BucketInfluencer.RAW_ANOMALY_SCORE.getPreferredName(), BucketInfluencer.PROBABILITY.getPreferredName(), BucketInfluencer.RAW_ANOMALY_SCORE.getPreferredName(), BucketInfluencer.PROBABILITY.getPreferredName(),
@ -151,12 +147,24 @@ public final class ReservedFieldNames {
Quantiles.QUANTILE_STATE.getPreferredName(), Quantiles.QUANTILE_STATE.getPreferredName(),
Result.RESULT_TYPE.getPreferredName(), Result.RESULT_TYPE.getPreferredName()
JOB_ID_NAME,
ES_TIMESTAMP
}; };
/**
* Test if fieldName is one of the reserved names or if it contains dots then
* that the segment before the first dot is not a reserved name. A fieldName
* containing dots represents nested fields in which case we only care about
* the top level.
*
* @param fieldName Document field name. This may contain dots '.'
* @return True if fieldName is not a reserved name or the top level segment
* is not a reserved name.
*/
public static boolean isValidFieldName(String fieldName) {
String[] segments = DOT_PATTERN.split(fieldName);
return !RESERVED_FIELD_NAMES.contains(segments[0]);
}
/** /**
* A set of all reserved field names in our results. Fields from the raw * A set of all reserved field names in our results. Fields from the raw
* data with these names are not added to any result. * data with these names are not added to any result.

View File

@ -29,13 +29,11 @@ import org.elasticsearch.xpack.ml.job.process.normalizer.Renormalizer;
import org.elasticsearch.xpack.ml.job.process.normalizer.noop.NoOpRenormalizer; import org.elasticsearch.xpack.ml.job.process.normalizer.noop.NoOpRenormalizer;
import org.elasticsearch.xpack.ml.job.process.autodetect.state.Quantiles; import org.elasticsearch.xpack.ml.job.process.autodetect.state.Quantiles;
import org.elasticsearch.xpack.ml.job.results.AnomalyRecord; import org.elasticsearch.xpack.ml.job.results.AnomalyRecord;
import org.elasticsearch.xpack.ml.job.results.AnomalyRecordTests;
import org.elasticsearch.xpack.ml.job.results.Bucket; import org.elasticsearch.xpack.ml.job.results.Bucket;
import org.elasticsearch.xpack.ml.job.results.BucketTests; import org.elasticsearch.xpack.ml.job.results.BucketTests;
import org.elasticsearch.xpack.ml.job.results.CategoryDefinition; import org.elasticsearch.xpack.ml.job.results.CategoryDefinition;
import org.elasticsearch.xpack.ml.job.results.CategoryDefinitionTests; import org.elasticsearch.xpack.ml.job.results.CategoryDefinitionTests;
import org.elasticsearch.xpack.ml.job.results.Influencer; import org.elasticsearch.xpack.ml.job.results.Influencer;
import org.elasticsearch.xpack.ml.job.results.InfluencerTests;
import org.elasticsearch.xpack.ml.job.results.ModelDebugOutput; import org.elasticsearch.xpack.ml.job.results.ModelDebugOutput;
import org.elasticsearch.xpack.ml.job.results.ModelDebugOutputTests; import org.elasticsearch.xpack.ml.job.results.ModelDebugOutputTests;
import org.junit.Before; import org.junit.Before;
@ -121,7 +119,7 @@ public class AutodetectResultProcessorIT extends ESSingleNodeTestCase {
bucket.setRecords(Collections.emptyList()); bucket.setRecords(Collections.emptyList());
assertEquals(bucket, persistedBucket.results().get(0)); assertEquals(bucket, persistedBucket.results().get(0));
QueryPage<AnomalyRecord> persistedRecords = getRecords(new RecordsQueryBuilder().includeInterim(true).build()); QueryPage<AnomalyRecord> persistedRecords = getRecords(new RecordsQueryBuilder().build());
assertResultsAreSame(records, persistedRecords); assertResultsAreSame(records, persistedRecords);
QueryPage<Influencer> persistedInfluencers = getInfluencers(); QueryPage<Influencer> persistedInfluencers = getInfluencers();
@ -282,9 +280,12 @@ public class AutodetectResultProcessorIT extends ESSingleNodeTestCase {
} }
private void createJob() { private void createJob() {
Detector detector = new Detector.Builder("avg", "metric_field").build(); Detector.Builder detectorBuilder = new Detector.Builder("avg", "metric_field");
detectorBuilder.setByFieldName("by_instance");
Job.Builder jobBuilder = new Job.Builder(JOB_ID); Job.Builder jobBuilder = new Job.Builder(JOB_ID);
jobBuilder.setAnalysisConfig(new AnalysisConfig.Builder(Collections.singletonList(detector))); AnalysisConfig.Builder analysisConfBuilder = new AnalysisConfig.Builder(Collections.singletonList(detectorBuilder.build()));
analysisConfBuilder.setInfluencers(Collections.singletonList("influence_field"));
jobBuilder.setAnalysisConfig(analysisConfBuilder);
jobProvider.createJobResultIndex(jobBuilder.build(), new ActionListener<Boolean>() { jobProvider.createJobResultIndex(jobBuilder.build(), new ActionListener<Boolean>() {
@Override @Override
@ -307,9 +308,11 @@ public class AutodetectResultProcessorIT extends ESSingleNodeTestCase {
List<AnomalyRecord> records = new ArrayList<>(); List<AnomalyRecord> records = new ArrayList<>();
int count = randomIntBetween(0, 100); int count = randomIntBetween(0, 100);
AnomalyRecordTests anomalyRecordGenerator = new AnomalyRecordTests(); Date now = new Date(randomNonNegativeLong());
for (int i=0; i<count; i++) { for (int i=0; i<count; i++) {
AnomalyRecord r = anomalyRecordGenerator.createTestInstance(JOB_ID, i); AnomalyRecord r = new AnomalyRecord(JOB_ID, now, 3600L, i);
r.setByFieldName("by_instance");
r.setByFieldValue(randomAsciiOfLength(8));
r.setInterim(isInterim); r.setInterim(isInterim);
records.add(r); records.add(r);
} }
@ -320,9 +323,9 @@ public class AutodetectResultProcessorIT extends ESSingleNodeTestCase {
List<Influencer> influencers = new ArrayList<>(); List<Influencer> influencers = new ArrayList<>();
int count = randomIntBetween(0, 100); int count = randomIntBetween(0, 100);
InfluencerTests influencerGenerator = new InfluencerTests(); Date now = new Date();
for (int i=0; i<count; i++) { for (int i=0; i<count; i++) {
Influencer influencer = influencerGenerator.createTestInstance(JOB_ID); Influencer influencer = new Influencer(JOB_ID, "influence_field", randomAsciiOfLength(10), now, 3600L, i);
influencer.setInterim(isInterim); influencer.setInterim(isInterim);
influencers.add(influencer); influencers.add(influencer);
} }

View File

@ -1,55 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ml.job.persistence;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.test.ESTestCase;
public class ElasticsearchDotNotationReverserTests extends ESTestCase {
public void testResultsMap() throws Exception {
ElasticsearchDotNotationReverser reverser = createReverser();
String expected = "{\"complex\":{\"nested\":{\"structure\":{\"first\":\"x\"," +
"\"second\":\"y\"},\"value\":\"z\"}},\"cpu\":{\"system\":\"5\"," +
"\"user\":\"10\",\"wait\":\"1\"},\"simple\":\"simon\"}";
String actual = XContentFactory.jsonBuilder().map(reverser.getResultsMap()).string();
assertEquals(expected, actual);
}
public void testMappingsMap() throws Exception {
ElasticsearchDotNotationReverser reverser = createReverser();
String expected = "{\"complex\":{\"properties\":{\"nested\":{\"properties\":" +
"{\"structure\":{\"properties\":{\"first\":{\"type\":\"keyword\"}," +
"\"second\":{\"type\":\"keyword\"}},\"type\":\"object\"}," +
"\"value\":{\"type\":\"keyword\"}},\"type\":\"object\"}}," +
"\"type\":\"object\"},\"cpu\":{\"properties\":{\"system\":" +
"{\"type\":\"keyword\"},\"user\":{\"type\":\"keyword\"}," +
"\"wait\":{\"type\":\"keyword\"}},\"type\":\"object\"}," +
"\"simple\":{\"type\":\"keyword\"}}";
String actual = XContentFactory.jsonBuilder().map(reverser.getMappingsMap()).string();
assertEquals(expected, actual);
}
private ElasticsearchDotNotationReverser createReverser() {
ElasticsearchDotNotationReverser reverser = new ElasticsearchDotNotationReverser();
// This should get ignored as it's a reserved field name
reverser.add("bucket_span", "3600");
reverser.add("simple", "simon");
reverser.add("cpu.user", "10");
reverser.add("cpu.system", "5");
reverser.add("cpu.wait", "1");
// This should get ignored as one of its segments is a reserved field name
reverser.add("foo.bucket_span", "3600");
reverser.add("complex.nested.structure.first", "x");
reverser.add("complex.nested.structure.second", "y");
reverser.add("complex.nested.value", "z");
return reverser;
}
}

View File

@ -6,6 +6,7 @@
package org.elasticsearch.xpack.ml.job.persistence; package org.elasticsearch.xpack.ml.job.persistence;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.ml.job.process.autodetect.state.CategorizerState; import org.elasticsearch.xpack.ml.job.process.autodetect.state.CategorizerState;
import org.elasticsearch.xpack.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.ml.job.process.autodetect.state.DataCounts;
@ -13,6 +14,7 @@ import org.elasticsearch.xpack.ml.job.config.Job;
import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelSizeStats; import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelSizeStats;
import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelSnapshot;
import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelState; import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelState;
import org.elasticsearch.xpack.ml.job.results.AnomalyRecord;
import org.elasticsearch.xpack.ml.notifications.AuditActivity; import org.elasticsearch.xpack.ml.notifications.AuditActivity;
import org.elasticsearch.xpack.ml.notifications.AuditMessage; import org.elasticsearch.xpack.ml.notifications.AuditMessage;
import org.elasticsearch.xpack.ml.job.metadata.Allocation; import org.elasticsearch.xpack.ml.job.metadata.Allocation;
@ -32,8 +34,10 @@ import java.io.ByteArrayInputStream;
import java.io.IOException; import java.io.IOException;
import java.lang.reflect.InvocationTargetException; import java.lang.reflect.InvocationTargetException;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import java.util.Map;
import java.util.Set; import java.util.Set;
@ -169,4 +173,26 @@ public class ElasticsearchMappingsTests extends ESTestCase {
} }
} }
@SuppressWarnings("unchecked")
public void testResultMapping() throws IOException {
XContentBuilder builder = ElasticsearchMappings.resultsMapping(
Arrays.asList("instance", AnomalyRecord.ANOMALY_SCORE.getPreferredName()));
XContentParser parser = createParser(builder);
Map<String, Object> type = (Map<String, Object>) parser.map().get(Result.TYPE.getPreferredName());
Map<String, Object> properties = (Map<String, Object>) type.get(ElasticsearchMappings.PROPERTIES);
// check a keyword mapping for the 'instance' field was created
Map<String, Object> instanceMapping = (Map<String, Object>) properties.get("instance");
assertNotNull(instanceMapping);
String dataType = (String)instanceMapping.get(ElasticsearchMappings.TYPE);
assertEquals(ElasticsearchMappings.KEYWORD, dataType);
// check anomaly score wasn't overwritten
Map<String, Object> anomalyScoreMapping = (Map<String, Object>) properties.get(AnomalyRecord.ANOMALY_SCORE.getPreferredName());
assertNotNull(anomalyScoreMapping);
dataType = (String)anomalyScoreMapping.get(ElasticsearchMappings.TYPE);
assertEquals(ElasticsearchMappings.DOUBLE, dataType);
}
} }

View File

@ -6,13 +6,18 @@
package org.elasticsearch.xpack.ml.job.results; package org.elasticsearch.xpack.ml.job.results;
import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.xpack.ml.support.AbstractSerializingTestCase; import org.elasticsearch.xpack.ml.support.AbstractSerializingTestCase;
import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.Date; import java.util.Date;
import java.util.List; import java.util.List;
import java.util.Map;
public class AnomalyRecordTests extends AbstractSerializingTestCase<AnomalyRecord> { public class AnomalyRecordTests extends AbstractSerializingTestCase<AnomalyRecord> {
@ -79,4 +84,47 @@ public class AnomalyRecordTests extends AbstractSerializingTestCase<AnomalyRecor
protected AnomalyRecord parseInstance(XContentParser parser) { protected AnomalyRecord parseInstance(XContentParser parser) {
return AnomalyRecord.PARSER.apply(parser, null); return AnomalyRecord.PARSER.apply(parser, null);
} }
@SuppressWarnings("unchecked")
public void testToXContentIncludesInputFields() throws IOException {
AnomalyRecord record = createTestInstance();
record.setByFieldName("byfn");
record.setByFieldValue("byfv");
record.setOverFieldName("overfn");
record.setOverFieldValue("overfv");
record.setPartitionFieldName("partfn");
record.setPartitionFieldValue("partfv");
Influence influence1 = new Influence("inffn", Arrays.asList("inffv1", "inffv2"));
Influence influence2 = new Influence("inffn", Arrays.asList("inffv1", "inffv2"));
record.setInfluencers(Arrays.asList(influence1, influence2));
XContentBuilder builder = toXContent(record, XContentType.JSON);
XContentParser parser = createParser(builder);
Map<String, Object> map = parser.map();
List<String> serialisedByFieldValues = (List<String>) map.get(record.getByFieldName());
assertEquals(Collections.singletonList(record.getByFieldValue()), serialisedByFieldValues);
List<String> serialisedOverFieldValues = (List<String>) map.get(record.getOverFieldName());
assertEquals(Collections.singletonList(record.getOverFieldValue()), serialisedOverFieldValues);
List<String> serialisedPartFieldValues = (List<String>) map.get(record.getPartitionFieldName());
assertEquals(Collections.singletonList(record.getPartitionFieldValue()), serialisedPartFieldValues);
List<String> serialisedInfFieldValues1 = (List<String>) map.get(influence1.getInfluencerFieldName());
assertEquals(influence1.getInfluencerFieldValues(), serialisedInfFieldValues1);
List<String> serialisedInfFieldValues2 = (List<String>) map.get(influence2.getInfluencerFieldName());
assertEquals(influence2.getInfluencerFieldValues(), serialisedInfFieldValues2);
}
@SuppressWarnings("unchecked")
public void testToXContentDoesNotIncludesReservedWordInputFields() throws IOException {
AnomalyRecord record = createTestInstance();
record.setByFieldName(AnomalyRecord.BUCKET_SPAN.getPreferredName());
record.setByFieldValue("bar");
XContentBuilder builder = toXContent(record, XContentType.JSON);
XContentParser parser = createParser(builder);
Object value = parser.map().get(AnomalyRecord.BUCKET_SPAN.getPreferredName());
assertNotEquals("bar", value);
assertEquals((Long)record.getBucketSpan(), (Long)value);
}
} }

View File

@ -6,9 +6,12 @@
package org.elasticsearch.xpack.ml.job.results; package org.elasticsearch.xpack.ml.job.results;
import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.io.stream.Writeable.Reader;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.xpack.ml.support.AbstractSerializingTestCase; import org.elasticsearch.xpack.ml.support.AbstractSerializingTestCase;
import java.io.IOException;
import java.util.Date; import java.util.Date;
public class InfluencerTests extends AbstractSerializingTestCase<Influencer> { public class InfluencerTests extends AbstractSerializingTestCase<Influencer> {
@ -37,4 +40,22 @@ public class InfluencerTests extends AbstractSerializingTestCase<Influencer> {
return Influencer.PARSER.apply(parser, null); return Influencer.PARSER.apply(parser, null);
} }
public void testToXContentIncludesNameValueField() throws IOException {
Influencer influencer = createTestInstance("foo");
XContentBuilder builder = toXContent(influencer, XContentType.JSON);
XContentParser parser = createParser(builder);
String serialisedFieldName = (String) parser.map().get(influencer.getInfluencerFieldName());
assertNotNull(serialisedFieldName);
assertEquals(influencer.getInfluencerFieldValue(), serialisedFieldName);
}
public void testToXContentDoesNotIncludeNameValueFieldWhenReservedWord() throws IOException {
Influencer influencer = new Influencer("foo", AnomalyRecord.ANOMALY_SCORE.getPreferredName(), "bar", new Date(), 300L, 0);
XContentBuilder builder = toXContent(influencer, XContentType.JSON);
XContentParser parser = createParser(builder);
Object serialisedFieldValue = parser.map().get(AnomalyRecord.ANOMALY_SCORE.getPreferredName());
assertNotEquals("bar", serialisedFieldValue);
assertEquals(0.0, (Double)serialisedFieldValue, 0.0001);
}
} }

View File

@ -0,0 +1,19 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ml.job.results;
import org.elasticsearch.test.ESTestCase;
public class ReservedFieldNamesTests extends ESTestCase {
public void testIsValidFieldName() throws Exception {
assertTrue(ReservedFieldNames.isValidFieldName("host"));
assertTrue(ReservedFieldNames.isValidFieldName("host.actual"));
assertFalse(ReservedFieldNames.isValidFieldName("actual.host"));
assertFalse(ReservedFieldNames.isValidFieldName(AnomalyRecord.BUCKET_SPAN.getPreferredName()));
}
}