From 4eab74ce295eddb24d00d1be634baaad1f486bba Mon Sep 17 00:00:00 2001 From: David Kyle Date: Mon, 30 Jan 2017 13:43:15 +0000 Subject: [PATCH] Store input fields for anomaly records and influencers (elastic/elasticsearch#799) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Store input fields for anomaly records and influencers * Address review comments * Remove DotNotationReverser * Remove duplicated constants * Can’t use the same date for all records as they will have equivalent Ids Original commit: elastic/x-pack-elasticsearch@40796b5efc8d2854fdb7c870520bf65e75834aca --- .../ElasticsearchDotNotationReverser.java | 137 ------------------ .../persistence/ElasticsearchMappings.java | 11 +- .../xpack/ml/job/results/AnomalyRecord.java | 60 ++++++-- .../xpack/ml/job/results/Influencer.java | 5 +- .../ml/job/results/ReservedFieldNames.java | 34 +++-- .../AutodetectResultProcessorIT.java | 21 +-- ...ElasticsearchDotNotationReverserTests.java | 55 ------- .../ElasticsearchMappingsTests.java | 26 ++++ .../ml/job/results/AnomalyRecordTests.java | 48 ++++++ .../xpack/ml/job/results/InfluencerTests.java | 21 +++ .../job/results/ReservedFieldNamesTests.java | 19 +++ 11 files changed, 204 insertions(+), 233 deletions(-) delete mode 100644 elasticsearch/src/main/java/org/elasticsearch/xpack/ml/job/persistence/ElasticsearchDotNotationReverser.java delete mode 100644 elasticsearch/src/test/java/org/elasticsearch/xpack/ml/job/persistence/ElasticsearchDotNotationReverserTests.java create mode 100644 elasticsearch/src/test/java/org/elasticsearch/xpack/ml/job/results/ReservedFieldNamesTests.java diff --git a/elasticsearch/src/main/java/org/elasticsearch/xpack/ml/job/persistence/ElasticsearchDotNotationReverser.java b/elasticsearch/src/main/java/org/elasticsearch/xpack/ml/job/persistence/ElasticsearchDotNotationReverser.java deleted file mode 100644 index 1702d87f70b..00000000000 --- a/elasticsearch/src/main/java/org/elasticsearch/xpack/ml/job/persistence/ElasticsearchDotNotationReverser.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.job.persistence; - -import java.util.Locale; -import java.util.Map; -import java.util.TreeMap; -import java.util.regex.Pattern; - -import org.elasticsearch.xpack.ml.job.results.ReservedFieldNames; - -/** - * Interprets field names containing dots as nested JSON structures. - * This matches what Elasticsearch does. - */ -class ElasticsearchDotNotationReverser { - private static final char DOT = '.'; - private static final Pattern DOT_PATTERN = Pattern.compile("\\."); - - private final Map resultsMap; - - public ElasticsearchDotNotationReverser() { - resultsMap = new TreeMap<>(); - } - - // TODO - could handle values of all types Elasticsearch does, e.g. date, - // long, int, double, etc. However, at the moment field values in our - // results are only strings, so it's not "minimum viable product" right - // now. Hence this method only takes fieldValue as a String and there are - // no overloads. - - /** - * Given a field name and value, convert it to a map representation of the - * (potentially nested) JSON structure Elasticsearch would use to store it. - * For example: - * foo = x goes to { "foo" : "x" } and - * foo.bar = y goes to { "foo" : { "bar" : "y" } } - */ - @SuppressWarnings("unchecked") - public void add(String fieldName, String fieldValue) { - if (fieldName == null || fieldValue == null) { - return; - } - - // Minimise processing in the simple case of no dots in the field name. - if (fieldName.indexOf(DOT) == -1) { - if (ReservedFieldNames.RESERVED_FIELD_NAMES.contains(fieldName)) { - return; - } - resultsMap.put(fieldName, fieldValue); - return; - } - - String[] segments = DOT_PATTERN.split(fieldName); - - // If any segment created by the split is a reserved word then ignore - // the whole field. - for (String segment : segments) { - if (ReservedFieldNames.RESERVED_FIELD_NAMES.contains(segment)) { - return; - } - } - - Map layerMap = resultsMap; - for (int i = 0; i < segments.length; ++i) { - String segment = segments[i]; - if (i == segments.length - 1) { - layerMap.put(segment, fieldValue); - } else { - Object existingLayerValue = layerMap.get(segment); - if (existingLayerValue == null) { - Map nextLayerMap = new TreeMap<>(); - layerMap.put(segment, nextLayerMap); - layerMap = nextLayerMap; - } else { - if (existingLayerValue instanceof Map) { - layerMap = (Map) existingLayerValue; - } else { - // This implies an inconsistency - different additions - // imply the same path leads to both an object and a - // value. For example: - // foo.bar = x - // foo.bar.baz = y - return; - } - } - } - } - } - - public Map getResultsMap() { - return resultsMap; - } - - /** - * Mappings for a given hierarchical structure are more complex than the - * basic results. - */ - public Map getMappingsMap() { - Map mappingsMap = new TreeMap<>(); - recurseMappingsLevel(resultsMap, mappingsMap); - return mappingsMap; - } - - @SuppressWarnings("unchecked") - private void recurseMappingsLevel(Map resultsMap, Map mappingsMap) { - for (Map.Entry entry : resultsMap.entrySet()) { - Map typeMap = new TreeMap<>(); - - String name = entry.getKey(); - Object value = entry.getValue(); - if (value instanceof Map) { - Map propertiesMap = new TreeMap<>(); - recurseMappingsLevel((Map) value, propertiesMap); - - typeMap.put(ElasticsearchMappings.TYPE, ElasticsearchMappings.OBJECT); - typeMap.put(ElasticsearchMappings.PROPERTIES, propertiesMap); - mappingsMap.put(name, typeMap); - } else { - String fieldType = value.getClass().getSimpleName().toLowerCase(Locale.ROOT); - if ("string".equals(fieldType)) { - fieldType = "keyword"; - } - typeMap.put(ElasticsearchMappings.TYPE, - // Even though the add() method currently only supports - // strings, this way of getting the type would work for - // many Elasticsearch types, e.g. date, int, long, - // double and boolean - fieldType); - mappingsMap.put(name, typeMap); - } - } - } -} diff --git a/elasticsearch/src/main/java/org/elasticsearch/xpack/ml/job/persistence/ElasticsearchMappings.java b/elasticsearch/src/main/java/org/elasticsearch/xpack/ml/job/persistence/ElasticsearchMappings.java index 578f382de0b..f8400dd44f8 100644 --- a/elasticsearch/src/main/java/org/elasticsearch/xpack/ml/job/persistence/ElasticsearchMappings.java +++ b/elasticsearch/src/main/java/org/elasticsearch/xpack/ml/job/persistence/ElasticsearchMappings.java @@ -12,6 +12,7 @@ import org.elasticsearch.xpack.ml.job.config.Job; import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelSizeStats; import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelState; +import org.elasticsearch.xpack.ml.job.results.ReservedFieldNames; import org.elasticsearch.xpack.ml.notifications.AuditActivity; import org.elasticsearch.xpack.ml.notifications.AuditMessage; import org.elasticsearch.xpack.ml.job.process.autodetect.state.Quantiles; @@ -226,13 +227,9 @@ public class ElasticsearchMappings { addInfluencerFieldsToMapping(builder); addModelSizeStatsFieldsToMapping(builder); - if (termFieldNames != null) { - ElasticsearchDotNotationReverser reverser = new ElasticsearchDotNotationReverser(); - for (String fieldName : termFieldNames) { - reverser.add(fieldName, ""); - } - for (Map.Entry entry : reverser.getMappingsMap().entrySet()) { - builder.field(entry.getKey(), entry.getValue()); + for (String fieldName : termFieldNames) { + if (ReservedFieldNames.isValidFieldName(fieldName)) { + builder.startObject(fieldName).field(TYPE, KEYWORD).endObject(); } } diff --git a/elasticsearch/src/main/java/org/elasticsearch/xpack/ml/job/results/AnomalyRecord.java b/elasticsearch/src/main/java/org/elasticsearch/xpack/ml/job/results/AnomalyRecord.java index 936dbe4e6f9..59af044c852 100644 --- a/elasticsearch/src/main/java/org/elasticsearch/xpack/ml/job/results/AnomalyRecord.java +++ b/elasticsearch/src/main/java/org/elasticsearch/xpack/ml/job/results/AnomalyRecord.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.ml.job.results; import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -21,8 +22,12 @@ import org.elasticsearch.xpack.ml.utils.time.TimeUtils; import java.io.IOException; import java.util.ArrayList; import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Objects; +import java.util.Set; /** * Anomaly Record POJO. @@ -78,7 +83,8 @@ public class AnomalyRecord extends ToXContentToBytes implements Writeable { public static final ParseField INITIAL_NORMALIZED_PROBABILITY = new ParseField("initial_normalized_probability"); public static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>(RESULT_TYPE_VALUE, a -> new AnomalyRecord((String) a[0], (Date) a[1], (long) a[2], (int) a[3])); + new ConstructingObjectParser<>(RESULT_TYPE_VALUE, true, + a -> new AnomalyRecord((String) a[0], (Date) a[1], (long) a[2], (int) a[3])); static { PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); @@ -144,7 +150,7 @@ public class AnomalyRecord extends ToXContentToBytes implements Writeable { private final Date timestamp; private final long bucketSpan; - private List influencers; + private List influences; public AnomalyRecord(String jobId, Date timestamp, long bucketSpan, int sequenceNum) { this.jobId = jobId; @@ -185,7 +191,7 @@ public class AnomalyRecord extends ToXContentToBytes implements Writeable { timestamp = new Date(in.readLong()); bucketSpan = in.readLong(); if (in.readBoolean()) { - influencers = in.readList(Influence::new); + influences = in.readList(Influence::new); } } @@ -226,10 +232,10 @@ public class AnomalyRecord extends ToXContentToBytes implements Writeable { out.writeDouble(initialNormalizedProbability); out.writeLong(timestamp.getTime()); out.writeLong(bucketSpan); - boolean hasInfluencers = influencers != null; + boolean hasInfluencers = influences != null; out.writeBoolean(hasInfluencers); if (hasInfluencers) { - out.writeList(influencers); + out.writeList(influences); } } @@ -286,13 +292,45 @@ public class AnomalyRecord extends ToXContentToBytes implements Writeable { if (causes != null) { builder.field(CAUSES.getPreferredName(), causes); } - if (influencers != null) { - builder.field(INFLUENCERS.getPreferredName(), influencers); + if (influences != null) { + builder.field(INFLUENCERS.getPreferredName(), influences); } + + Map> inputFields = inputFieldMap(); + for (String fieldName : inputFields.keySet()) { + builder.field(fieldName, inputFields.get(fieldName)); + } + builder.endObject(); return builder; } + private Map> inputFieldMap() { + Map> result = new HashMap<>(); + + addInputFieldsToMap(result, byFieldName, byFieldValue); + addInputFieldsToMap(result, overFieldName, overFieldValue); + addInputFieldsToMap(result, partitionFieldName, partitionFieldValue); + + if (influences != null) { + for (Influence inf : influences) { + String fieldName = inf.getInfluencerFieldName(); + for (String fieldValue : inf.getInfluencerFieldValues()) { + addInputFieldsToMap(result, fieldName, fieldValue); + } + } + } + return result; + } + + private void addInputFieldsToMap(Map> inputFields, String fieldName, String fieldValue) { + if (!Strings.isNullOrEmpty(fieldName) && fieldValue != null) { + if (ReservedFieldNames.isValidFieldName(fieldName)) { + inputFields.computeIfAbsent(fieldName, k -> new HashSet()).add(fieldValue); + } + } + } + public String getJobId() { return this.jobId; } @@ -475,11 +513,11 @@ public class AnomalyRecord extends ToXContentToBytes implements Writeable { } public List getInfluencers() { - return influencers; + return influences; } public void setInfluencers(List influencers) { - this.influencers = influencers; + this.influences = influencers; } @@ -489,7 +527,7 @@ public class AnomalyRecord extends ToXContentToBytes implements Writeable { normalizedProbability, initialNormalizedProbability, typical, actual, function, functionDescription, fieldName, byFieldName, byFieldValue, correlatedByFieldValue, partitionFieldName, partitionFieldValue, overFieldName, overFieldValue, - timestamp, isInterim, causes, influencers, jobId); + timestamp, isInterim, causes, influences, jobId); } @@ -528,6 +566,6 @@ public class AnomalyRecord extends ToXContentToBytes implements Writeable { && Objects.equals(this.timestamp, that.timestamp) && Objects.equals(this.isInterim, that.isInterim) && Objects.equals(this.causes, that.causes) - && Objects.equals(this.influencers, that.influencers); + && Objects.equals(this.influences, that.influences); } } diff --git a/elasticsearch/src/main/java/org/elasticsearch/xpack/ml/job/results/Influencer.java b/elasticsearch/src/main/java/org/elasticsearch/xpack/ml/job/results/Influencer.java index 9c27f0335c1..84355d44ddd 100644 --- a/elasticsearch/src/main/java/org/elasticsearch/xpack/ml/job/results/Influencer.java +++ b/elasticsearch/src/main/java/org/elasticsearch/xpack/ml/job/results/Influencer.java @@ -45,7 +45,7 @@ public class Influencer extends ToXContentToBytes implements Writeable { public static final ParseField RESULTS_FIELD = new ParseField("influencers"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - RESULT_TYPE_FIELD.getPreferredName(), a -> new Influencer((String) a[0], (String) a[1], (String) a[2], + RESULT_TYPE_FIELD.getPreferredName(), true, a -> new Influencer((String) a[0], (String) a[1], (String) a[2], (Date) a[3], (long) a[4], (int) a[5])); static { @@ -123,6 +123,9 @@ public class Influencer extends ToXContentToBytes implements Writeable { builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE); builder.field(INFLUENCER_FIELD_NAME.getPreferredName(), influenceField); builder.field(INFLUENCER_FIELD_VALUE.getPreferredName(), influenceValue); + if (ReservedFieldNames.isValidFieldName(influenceField)) { + builder.field(influenceField, influenceValue); + } builder.field(ANOMALY_SCORE.getPreferredName(), anomalyScore); builder.field(INITIAL_ANOMALY_SCORE.getPreferredName(), initialAnomalyScore); builder.field(PROBABILITY.getPreferredName(), probability); diff --git a/elasticsearch/src/main/java/org/elasticsearch/xpack/ml/job/results/ReservedFieldNames.java b/elasticsearch/src/main/java/org/elasticsearch/xpack/ml/job/results/ReservedFieldNames.java index 03bf05dfa82..ed0e58cca29 100644 --- a/elasticsearch/src/main/java/org/elasticsearch/xpack/ml/job/results/ReservedFieldNames.java +++ b/elasticsearch/src/main/java/org/elasticsearch/xpack/ml/job/results/ReservedFieldNames.java @@ -15,6 +15,7 @@ import org.elasticsearch.xpack.ml.job.process.autodetect.state.Quantiles; import java.util.Arrays; import java.util.HashSet; import java.util.Set; +import java.util.regex.Pattern; /** @@ -25,15 +26,7 @@ import java.util.Set; * contains raw data and in others it contains some aspect of our output. */ public final class ReservedFieldNames { - /** - * jobId isn't in this package, so redefine. - */ - private static final String JOB_ID_NAME = Job.ID.getPreferredName(); - - /** - * @timestamp isn't in this package, so redefine. - */ - private static final String ES_TIMESTAMP = "timestamp"; + private static final Pattern DOT_PATTERN = Pattern.compile("\\."); /** * This array should be updated to contain all the field names that appear @@ -45,6 +38,8 @@ public final class ReservedFieldNames { private static final String[] RESERVED_FIELD_NAME_ARRAY = { ElasticsearchMappings.ALL_FIELD_VALUES, + Job.ID.getPreferredName(), + AnomalyCause.PROBABILITY.getPreferredName(), AnomalyCause.OVER_FIELD_NAME.getPreferredName(), AnomalyCause.OVER_FIELD_VALUE.getPreferredName(), @@ -93,6 +88,7 @@ public final class ReservedFieldNames { Bucket.INITIAL_ANOMALY_SCORE.getPreferredName(), Bucket.PROCESSING_TIME_MS.getPreferredName(), Bucket.PARTITION_SCORES.getPreferredName(), + Bucket.TIMESTAMP.getPreferredName(), BucketInfluencer.INITIAL_ANOMALY_SCORE.getPreferredName(), BucketInfluencer.ANOMALY_SCORE.getPreferredName(), BucketInfluencer.RAW_ANOMALY_SCORE.getPreferredName(), BucketInfluencer.PROBABILITY.getPreferredName(), @@ -151,12 +147,24 @@ public final class ReservedFieldNames { Quantiles.QUANTILE_STATE.getPreferredName(), - Result.RESULT_TYPE.getPreferredName(), - - JOB_ID_NAME, - ES_TIMESTAMP + Result.RESULT_TYPE.getPreferredName() }; + /** + * Test if fieldName is one of the reserved names or if it contains dots then + * that the segment before the first dot is not a reserved name. A fieldName + * containing dots represents nested fields in which case we only care about + * the top level. + * + * @param fieldName Document field name. This may contain dots '.' + * @return True if fieldName is not a reserved name or the top level segment + * is not a reserved name. + */ + public static boolean isValidFieldName(String fieldName) { + String[] segments = DOT_PATTERN.split(fieldName); + return !RESERVED_FIELD_NAMES.contains(segments[0]); + } + /** * A set of all reserved field names in our results. Fields from the raw * data with these names are not added to any result. diff --git a/elasticsearch/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java b/elasticsearch/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java index c361daa3891..8b207120a46 100644 --- a/elasticsearch/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java +++ b/elasticsearch/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java @@ -29,13 +29,11 @@ import org.elasticsearch.xpack.ml.job.process.normalizer.Renormalizer; import org.elasticsearch.xpack.ml.job.process.normalizer.noop.NoOpRenormalizer; import org.elasticsearch.xpack.ml.job.process.autodetect.state.Quantiles; import org.elasticsearch.xpack.ml.job.results.AnomalyRecord; -import org.elasticsearch.xpack.ml.job.results.AnomalyRecordTests; import org.elasticsearch.xpack.ml.job.results.Bucket; import org.elasticsearch.xpack.ml.job.results.BucketTests; import org.elasticsearch.xpack.ml.job.results.CategoryDefinition; import org.elasticsearch.xpack.ml.job.results.CategoryDefinitionTests; import org.elasticsearch.xpack.ml.job.results.Influencer; -import org.elasticsearch.xpack.ml.job.results.InfluencerTests; import org.elasticsearch.xpack.ml.job.results.ModelDebugOutput; import org.elasticsearch.xpack.ml.job.results.ModelDebugOutputTests; import org.junit.Before; @@ -121,7 +119,7 @@ public class AutodetectResultProcessorIT extends ESSingleNodeTestCase { bucket.setRecords(Collections.emptyList()); assertEquals(bucket, persistedBucket.results().get(0)); - QueryPage persistedRecords = getRecords(new RecordsQueryBuilder().includeInterim(true).build()); + QueryPage persistedRecords = getRecords(new RecordsQueryBuilder().build()); assertResultsAreSame(records, persistedRecords); QueryPage persistedInfluencers = getInfluencers(); @@ -282,9 +280,12 @@ public class AutodetectResultProcessorIT extends ESSingleNodeTestCase { } private void createJob() { - Detector detector = new Detector.Builder("avg", "metric_field").build(); + Detector.Builder detectorBuilder = new Detector.Builder("avg", "metric_field"); + detectorBuilder.setByFieldName("by_instance"); Job.Builder jobBuilder = new Job.Builder(JOB_ID); - jobBuilder.setAnalysisConfig(new AnalysisConfig.Builder(Collections.singletonList(detector))); + AnalysisConfig.Builder analysisConfBuilder = new AnalysisConfig.Builder(Collections.singletonList(detectorBuilder.build())); + analysisConfBuilder.setInfluencers(Collections.singletonList("influence_field")); + jobBuilder.setAnalysisConfig(analysisConfBuilder); jobProvider.createJobResultIndex(jobBuilder.build(), new ActionListener() { @Override @@ -307,9 +308,11 @@ public class AutodetectResultProcessorIT extends ESSingleNodeTestCase { List records = new ArrayList<>(); int count = randomIntBetween(0, 100); - AnomalyRecordTests anomalyRecordGenerator = new AnomalyRecordTests(); + Date now = new Date(randomNonNegativeLong()); for (int i=0; i influencers = new ArrayList<>(); int count = randomIntBetween(0, 100); - InfluencerTests influencerGenerator = new InfluencerTests(); + Date now = new Date(); for (int i=0; i type = (Map) parser.map().get(Result.TYPE.getPreferredName()); + Map properties = (Map) type.get(ElasticsearchMappings.PROPERTIES); + + // check a keyword mapping for the 'instance' field was created + Map instanceMapping = (Map) properties.get("instance"); + assertNotNull(instanceMapping); + String dataType = (String)instanceMapping.get(ElasticsearchMappings.TYPE); + assertEquals(ElasticsearchMappings.KEYWORD, dataType); + + // check anomaly score wasn't overwritten + Map anomalyScoreMapping = (Map) properties.get(AnomalyRecord.ANOMALY_SCORE.getPreferredName()); + assertNotNull(anomalyScoreMapping); + dataType = (String)anomalyScoreMapping.get(ElasticsearchMappings.TYPE); + assertEquals(ElasticsearchMappings.DOUBLE, dataType); + } + } diff --git a/elasticsearch/src/test/java/org/elasticsearch/xpack/ml/job/results/AnomalyRecordTests.java b/elasticsearch/src/test/java/org/elasticsearch/xpack/ml/job/results/AnomalyRecordTests.java index 79bfb5719cb..448df6e592c 100644 --- a/elasticsearch/src/test/java/org/elasticsearch/xpack/ml/job/results/AnomalyRecordTests.java +++ b/elasticsearch/src/test/java/org/elasticsearch/xpack/ml/job/results/AnomalyRecordTests.java @@ -6,13 +6,18 @@ package org.elasticsearch.xpack.ml.job.results; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.ml.support.AbstractSerializingTestCase; +import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.Date; import java.util.List; +import java.util.Map; public class AnomalyRecordTests extends AbstractSerializingTestCase { @@ -79,4 +84,47 @@ public class AnomalyRecordTests extends AbstractSerializingTestCase map = parser.map(); + List serialisedByFieldValues = (List) map.get(record.getByFieldName()); + assertEquals(Collections.singletonList(record.getByFieldValue()), serialisedByFieldValues); + List serialisedOverFieldValues = (List) map.get(record.getOverFieldName()); + assertEquals(Collections.singletonList(record.getOverFieldValue()), serialisedOverFieldValues); + List serialisedPartFieldValues = (List) map.get(record.getPartitionFieldName()); + assertEquals(Collections.singletonList(record.getPartitionFieldValue()), serialisedPartFieldValues); + + List serialisedInfFieldValues1 = (List) map.get(influence1.getInfluencerFieldName()); + assertEquals(influence1.getInfluencerFieldValues(), serialisedInfFieldValues1); + List serialisedInfFieldValues2 = (List) map.get(influence2.getInfluencerFieldName()); + assertEquals(influence2.getInfluencerFieldValues(), serialisedInfFieldValues2); + } + + @SuppressWarnings("unchecked") + public void testToXContentDoesNotIncludesReservedWordInputFields() throws IOException { + AnomalyRecord record = createTestInstance(); + record.setByFieldName(AnomalyRecord.BUCKET_SPAN.getPreferredName()); + record.setByFieldValue("bar"); + + XContentBuilder builder = toXContent(record, XContentType.JSON); + XContentParser parser = createParser(builder); + Object value = parser.map().get(AnomalyRecord.BUCKET_SPAN.getPreferredName()); + assertNotEquals("bar", value); + assertEquals((Long)record.getBucketSpan(), (Long)value); + } } diff --git a/elasticsearch/src/test/java/org/elasticsearch/xpack/ml/job/results/InfluencerTests.java b/elasticsearch/src/test/java/org/elasticsearch/xpack/ml/job/results/InfluencerTests.java index 4c30243c347..897516a7089 100644 --- a/elasticsearch/src/test/java/org/elasticsearch/xpack/ml/job/results/InfluencerTests.java +++ b/elasticsearch/src/test/java/org/elasticsearch/xpack/ml/job/results/InfluencerTests.java @@ -6,9 +6,12 @@ package org.elasticsearch.xpack.ml.job.results; import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.ml.support.AbstractSerializingTestCase; +import java.io.IOException; import java.util.Date; public class InfluencerTests extends AbstractSerializingTestCase { @@ -37,4 +40,22 @@ public class InfluencerTests extends AbstractSerializingTestCase { return Influencer.PARSER.apply(parser, null); } + public void testToXContentIncludesNameValueField() throws IOException { + Influencer influencer = createTestInstance("foo"); + XContentBuilder builder = toXContent(influencer, XContentType.JSON); + XContentParser parser = createParser(builder); + String serialisedFieldName = (String) parser.map().get(influencer.getInfluencerFieldName()); + assertNotNull(serialisedFieldName); + assertEquals(influencer.getInfluencerFieldValue(), serialisedFieldName); + } + + public void testToXContentDoesNotIncludeNameValueFieldWhenReservedWord() throws IOException { + Influencer influencer = new Influencer("foo", AnomalyRecord.ANOMALY_SCORE.getPreferredName(), "bar", new Date(), 300L, 0); + XContentBuilder builder = toXContent(influencer, XContentType.JSON); + XContentParser parser = createParser(builder); + Object serialisedFieldValue = parser.map().get(AnomalyRecord.ANOMALY_SCORE.getPreferredName()); + assertNotEquals("bar", serialisedFieldValue); + assertEquals(0.0, (Double)serialisedFieldValue, 0.0001); + } + } diff --git a/elasticsearch/src/test/java/org/elasticsearch/xpack/ml/job/results/ReservedFieldNamesTests.java b/elasticsearch/src/test/java/org/elasticsearch/xpack/ml/job/results/ReservedFieldNamesTests.java new file mode 100644 index 00000000000..d421896e482 --- /dev/null +++ b/elasticsearch/src/test/java/org/elasticsearch/xpack/ml/job/results/ReservedFieldNamesTests.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.job.results; + +import org.elasticsearch.test.ESTestCase; + +public class ReservedFieldNamesTests extends ESTestCase { + + public void testIsValidFieldName() throws Exception { + assertTrue(ReservedFieldNames.isValidFieldName("host")); + assertTrue(ReservedFieldNames.isValidFieldName("host.actual")); + assertFalse(ReservedFieldNames.isValidFieldName("actual.host")); + assertFalse(ReservedFieldNames.isValidFieldName(AnomalyRecord.BUCKET_SPAN.getPreferredName())); + } + +} \ No newline at end of file