[ML] Ignore unknown fields when parsing ML cluster state (elastic/x-pack-elasticsearch#1924)
ML has two types of custom cluster state: 1. jobs 2. datafeeds These need to be parsed from JSON in two situations: 1. Create/update of the job/datafeed 2. Restoring cluster state on startup Previously we used exactly the same parser in both situations, but this severely limits our ability to add new features. This is because the parser was very strict. This was good when accepting create/update requests from users, but when restoring cluster state from disk it meant that we could not add new fields, as that would prevent reloading in mixed version clusters. This commit introduces a second parser that tolerates unknown fields for each object that is stored in cluster state. Then we use this more tolerant parser when parsing cluster state, but still use the strict parser when parsing REST requests. relates elastic/x-pack-elasticsearch#1732 Original commit: elastic/x-pack-elasticsearch@754e51d1ec
This commit is contained in:
parent
adc6fd5a0f
commit
984d2ca2ba
|
@ -233,7 +233,7 @@ public class MachineLearning implements ActionPlugin {
|
|||
return Arrays.asList(
|
||||
// Custom metadata
|
||||
new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField("ml"),
|
||||
parser -> MlMetadata.ML_METADATA_PARSER.parse(parser, null).build()),
|
||||
parser -> MlMetadata.METADATA_PARSER.parse(parser, null).build()),
|
||||
new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(PersistentTasksCustomMetaData.TYPE),
|
||||
PersistentTasksCustomMetaData::fromXContent),
|
||||
|
||||
|
|
|
@ -53,11 +53,13 @@ public class MlMetadata implements MetaData.Custom {
|
|||
|
||||
public static final String TYPE = "ml";
|
||||
public static final MlMetadata EMPTY_METADATA = new MlMetadata(Collections.emptySortedMap(), Collections.emptySortedMap());
|
||||
public static final ObjectParser<Builder, Void> ML_METADATA_PARSER = new ObjectParser<>("ml_metadata", Builder::new);
|
||||
// This parser follows the pattern that metadata is parsed leniently (to allow for enhancements)
|
||||
public static final ObjectParser<Builder, Void> METADATA_PARSER = new ObjectParser<>("ml_metadata", true, Builder::new);
|
||||
|
||||
static {
|
||||
ML_METADATA_PARSER.declareObjectArray(Builder::putJobs, (p, c) -> Job.PARSER.apply(p, c).build(), JOBS_FIELD);
|
||||
ML_METADATA_PARSER.declareObjectArray(Builder::putDatafeeds, (p, c) -> DatafeedConfig.PARSER.apply(p, c).build(), DATAFEEDS_FIELD);
|
||||
METADATA_PARSER.declareObjectArray(Builder::putJobs, (p, c) -> Job.METADATA_PARSER.apply(p, c).build(), JOBS_FIELD);
|
||||
METADATA_PARSER.declareObjectArray(Builder::putDatafeeds,
|
||||
(p, c) -> DatafeedConfig.METADATA_PARSER.apply(p, c).build(), DATAFEEDS_FIELD);
|
||||
}
|
||||
|
||||
private final SortedMap<String, Job> jobs;
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ml;
|
||||
|
||||
/**
|
||||
* In order to allow enhancements that require additions to the ML custom cluster state to be made in minor versions,
|
||||
* when we parse our metadata from persisted cluster state we ignore unknown fields. However, we don't want to be
|
||||
* lenient when parsing config as this would mean user mistakes could go undetected. Therefore, for all JSON objects
|
||||
* that are used in both custom cluster state and config we have two parsers, one tolerant of unknown fields (for
|
||||
* parsing cluster state) and one strict (for parsing config). This class enumerates the two options.
|
||||
*/
|
||||
public enum MlParserType {
|
||||
|
||||
METADATA, CONFIG;
|
||||
|
||||
}
|
|
@ -74,7 +74,7 @@ public class PutDatafeedAction extends Action<PutDatafeedAction.Request, PutData
|
|||
public static class Request extends AcknowledgedRequest<Request> implements ToXContent {
|
||||
|
||||
public static Request parseRequest(String datafeedId, XContentParser parser) {
|
||||
DatafeedConfig.Builder datafeed = DatafeedConfig.PARSER.apply(parser, null);
|
||||
DatafeedConfig.Builder datafeed = DatafeedConfig.CONFIG_PARSER.apply(parser, null);
|
||||
datafeed.setId(datafeedId);
|
||||
return new Request(datafeed.build());
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ public class PutJobAction extends Action<PutJobAction.Request, PutJobAction.Resp
|
|||
public static class Request extends AcknowledgedRequest<Request> implements ToXContent {
|
||||
|
||||
public static Request parseRequest(String jobId, XContentParser parser) {
|
||||
Job.Builder jobBuilder = Job.PARSER.apply(parser, null);
|
||||
Job.Builder jobBuilder = Job.CONFIG_PARSER.apply(parser, null);
|
||||
if (jobBuilder.getId() == null) {
|
||||
jobBuilder.setId(jobId);
|
||||
} else if (!Strings.isNullOrEmpty(jobId) && !jobId.equals(jobBuilder.getId())) {
|
||||
|
|
|
@ -62,7 +62,7 @@ extends Action<ValidateDetectorAction.Request, ValidateDetectorAction.Response,
|
|||
private Detector detector;
|
||||
|
||||
public static Request parseRequest(XContentParser parser) {
|
||||
Detector detector = Detector.PARSER.apply(parser, null).build();
|
||||
Detector detector = Detector.CONFIG_PARSER.apply(parser, null).build();
|
||||
return new Request(detector);
|
||||
}
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ extends Action<ValidateJobConfigAction.Request, ValidateJobConfigAction.Response
|
|||
private Job job;
|
||||
|
||||
public static Request parseRequest(XContentParser parser) {
|
||||
Job.Builder job = Job.PARSER.apply(parser, null);
|
||||
Job.Builder job = Job.CONFIG_PARSER.apply(parser, null);
|
||||
// When jobs are PUT their ID must be supplied in the URL - assume this will
|
||||
// be valid unless an invalid job ID is specified in the JSON to be validated
|
||||
job.setId(job.getId() != null ? job.getId() : "ok");
|
||||
|
|
|
@ -16,10 +16,13 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
|
|||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.ml.MlParserType;
|
||||
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.EnumMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
|
@ -30,22 +33,33 @@ public class ChunkingConfig implements ToXContentObject, Writeable {
|
|||
public static final ParseField MODE_FIELD = new ParseField("mode");
|
||||
public static final ParseField TIME_SPAN_FIELD = new ParseField("time_span");
|
||||
|
||||
public static final ConstructingObjectParser<ChunkingConfig, Void> PARSER = new ConstructingObjectParser<>(
|
||||
"chunking_config", a -> new ChunkingConfig((Mode) a[0], (TimeValue) a[1]));
|
||||
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
|
||||
public static final ConstructingObjectParser<ChunkingConfig, Void> METADATA_PARSER = new ConstructingObjectParser<>(
|
||||
"chunking_config", true, a -> new ChunkingConfig((Mode) a[0], (TimeValue) a[1]));
|
||||
public static final ConstructingObjectParser<ChunkingConfig, Void> CONFIG_PARSER = new ConstructingObjectParser<>(
|
||||
"chunking_config", false, a -> new ChunkingConfig((Mode) a[0], (TimeValue) a[1]));
|
||||
public static final Map<MlParserType, ConstructingObjectParser<ChunkingConfig, Void>> PARSERS =
|
||||
new EnumMap<>(MlParserType.class);
|
||||
|
||||
static {
|
||||
PARSER.declareField(ConstructingObjectParser.constructorArg(), p -> {
|
||||
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
return Mode.fromString(p.text());
|
||||
}
|
||||
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
|
||||
}, MODE_FIELD, ValueType.STRING);
|
||||
PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> {
|
||||
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
return TimeValue.parseTimeValue(p.text(), TIME_SPAN_FIELD.getPreferredName());
|
||||
}
|
||||
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
|
||||
}, TIME_SPAN_FIELD, ValueType.STRING);
|
||||
PARSERS.put(MlParserType.METADATA, METADATA_PARSER);
|
||||
PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER);
|
||||
for (MlParserType parserType : MlParserType.values()) {
|
||||
ConstructingObjectParser<ChunkingConfig, Void> parser = PARSERS.get(parserType);
|
||||
assert parser != null;
|
||||
parser.declareField(ConstructingObjectParser.constructorArg(), p -> {
|
||||
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
return Mode.fromString(p.text());
|
||||
}
|
||||
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
|
||||
}, MODE_FIELD, ValueType.STRING);
|
||||
parser.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> {
|
||||
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
return TimeValue.parseTimeValue(p.text(), TIME_SPAN_FIELD.getPreferredName());
|
||||
}
|
||||
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
|
||||
}, TIME_SPAN_FIELD, ValueType.STRING);
|
||||
}
|
||||
}
|
||||
|
||||
private final Mode mode;
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactories;
|
|||
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.xpack.ml.MlParserType;
|
||||
import org.elasticsearch.xpack.ml.job.config.Job;
|
||||
import org.elasticsearch.xpack.ml.job.messages.Messages;
|
||||
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
|
||||
|
@ -37,7 +38,9 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.EnumMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
|
@ -74,35 +77,42 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
|||
public static final ParseField SOURCE = new ParseField("_source");
|
||||
public static final ParseField CHUNKING_CONFIG = new ParseField("chunking_config");
|
||||
|
||||
public static final ObjectParser<Builder, Void> PARSER = new ObjectParser<>("datafeed_config", Builder::new);
|
||||
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
|
||||
public static final ObjectParser<Builder, Void> METADATA_PARSER = new ObjectParser<>("datafeed_config", true, Builder::new);
|
||||
public static final ObjectParser<Builder, Void> CONFIG_PARSER = new ObjectParser<>("datafeed_config", false, Builder::new);
|
||||
public static final Map<MlParserType, ObjectParser<Builder, Void>> PARSERS = new EnumMap<>(MlParserType.class);
|
||||
|
||||
static {
|
||||
PARSER.declareString(Builder::setId, ID);
|
||||
PARSER.declareString(Builder::setJobId, Job.ID);
|
||||
PARSER.declareStringArray(Builder::setIndices, INDEXES);
|
||||
PARSER.declareStringArray(Builder::setIndices, INDICES);
|
||||
PARSER.declareStringArray(Builder::setTypes, TYPES);
|
||||
PARSER.declareString((builder, val) ->
|
||||
builder.setQueryDelay(TimeValue.parseTimeValue(val, QUERY_DELAY.getPreferredName())), QUERY_DELAY);
|
||||
PARSER.declareString((builder, val) ->
|
||||
builder.setFrequency(TimeValue.parseTimeValue(val, FREQUENCY.getPreferredName())), FREQUENCY);
|
||||
PARSER.declareObject(Builder::setQuery,
|
||||
(p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), QUERY);
|
||||
PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p),
|
||||
AGGREGATIONS);
|
||||
PARSER.declareObject(Builder::setAggregations,(p, c) -> AggregatorFactories.parseAggregators(p), AGGS);
|
||||
PARSER.declareObject(Builder::setScriptFields, (p, c) -> {
|
||||
PARSERS.put(MlParserType.METADATA, METADATA_PARSER);
|
||||
PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER);
|
||||
for (MlParserType parserType : MlParserType.values()) {
|
||||
ObjectParser<Builder, Void> parser = PARSERS.get(parserType);
|
||||
assert parser != null;
|
||||
parser.declareString(Builder::setId, ID);
|
||||
parser.declareString(Builder::setJobId, Job.ID);
|
||||
parser.declareStringArray(Builder::setIndices, INDEXES);
|
||||
parser.declareStringArray(Builder::setIndices, INDICES);
|
||||
parser.declareStringArray(Builder::setTypes, TYPES);
|
||||
parser.declareString((builder, val) ->
|
||||
builder.setQueryDelay(TimeValue.parseTimeValue(val, QUERY_DELAY.getPreferredName())), QUERY_DELAY);
|
||||
parser.declareString((builder, val) ->
|
||||
builder.setFrequency(TimeValue.parseTimeValue(val, FREQUENCY.getPreferredName())), FREQUENCY);
|
||||
parser.declareObject(Builder::setQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), QUERY);
|
||||
parser.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGREGATIONS);
|
||||
parser.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGS);
|
||||
parser.declareObject(Builder::setScriptFields, (p, c) -> {
|
||||
List<SearchSourceBuilder.ScriptField> parsedScriptFields = new ArrayList<>();
|
||||
while (p.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
parsedScriptFields.add(new SearchSourceBuilder.ScriptField(p));
|
||||
}
|
||||
parsedScriptFields.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName));
|
||||
return parsedScriptFields;
|
||||
}, SCRIPT_FIELDS);
|
||||
PARSER.declareInt(Builder::setScrollSize, SCROLL_SIZE);
|
||||
// TODO this is to read former _source field. Remove in v7.0.0
|
||||
PARSER.declareBoolean((builder, value) -> {}, SOURCE);
|
||||
PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSER, CHUNKING_CONFIG);
|
||||
}
|
||||
parsedScriptFields.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName));
|
||||
return parsedScriptFields;
|
||||
}, SCRIPT_FIELDS);
|
||||
parser.declareInt(Builder::setScrollSize, SCROLL_SIZE);
|
||||
// TODO this is to read former _source field. Remove in v7.0.0
|
||||
parser.declareBoolean((builder, value) -> {}, SOURCE);
|
||||
parser.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSERS.get(parserType), CHUNKING_CONFIG);
|
||||
}
|
||||
}
|
||||
|
||||
private final String id;
|
||||
|
|
|
@ -63,7 +63,7 @@ public class DatafeedUpdate implements Writeable, ToXContentObject {
|
|||
return parsedScriptFields;
|
||||
}, DatafeedConfig.SCRIPT_FIELDS);
|
||||
PARSER.declareInt(Builder::setScrollSize, DatafeedConfig.SCROLL_SIZE);
|
||||
PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSER, DatafeedConfig.CHUNKING_CONFIG);
|
||||
PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.CONFIG_PARSER, DatafeedConfig.CHUNKING_CONFIG);
|
||||
}
|
||||
|
||||
private final String id;
|
||||
|
|
|
@ -14,6 +14,7 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.xpack.ml.MlParserType;
|
||||
import org.elasticsearch.xpack.ml.job.messages.Messages;
|
||||
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
|
||||
import org.elasticsearch.xpack.ml.utils.time.TimeUtils;
|
||||
|
@ -21,8 +22,10 @@ import org.elasticsearch.xpack.ml.utils.time.TimeUtils;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.SortedSet;
|
||||
|
@ -70,27 +73,42 @@ public class AnalysisConfig implements ToXContentObject, Writeable {
|
|||
|
||||
public static final long DEFAULT_RESULT_FINALIZATION_WINDOW = 2L;
|
||||
|
||||
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
|
||||
@SuppressWarnings("unchecked")
|
||||
public static final ConstructingObjectParser<AnalysisConfig.Builder, Void> PARSER =
|
||||
new ConstructingObjectParser<>(ANALYSIS_CONFIG.getPreferredName(), a -> new AnalysisConfig.Builder((List<Detector>) a[0]));
|
||||
public static final ConstructingObjectParser<AnalysisConfig.Builder, Void> METADATA_PARSER =
|
||||
new ConstructingObjectParser<>(ANALYSIS_CONFIG.getPreferredName(), true,
|
||||
a -> new AnalysisConfig.Builder((List<Detector>) a[0]));
|
||||
@SuppressWarnings("unchecked")
|
||||
public static final ConstructingObjectParser<AnalysisConfig.Builder, Void> CONFIG_PARSER =
|
||||
new ConstructingObjectParser<>(ANALYSIS_CONFIG.getPreferredName(), false,
|
||||
a -> new AnalysisConfig.Builder((List<Detector>) a[0]));
|
||||
public static final Map<MlParserType, ConstructingObjectParser<Builder, Void>> PARSERS =
|
||||
new EnumMap<>(MlParserType.class);
|
||||
|
||||
static {
|
||||
PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), (p, c) -> Detector.PARSER.apply(p, c).build(), DETECTORS);
|
||||
PARSER.declareString((builder, val) ->
|
||||
builder.setBucketSpan(TimeValue.parseTimeValue(val, BUCKET_SPAN.getPreferredName())), BUCKET_SPAN);
|
||||
PARSER.declareString(Builder::setCategorizationFieldName, CATEGORIZATION_FIELD_NAME);
|
||||
PARSER.declareStringArray(Builder::setCategorizationFilters, CATEGORIZATION_FILTERS);
|
||||
PARSER.declareString((builder, val) ->
|
||||
builder.setLatency(TimeValue.parseTimeValue(val, LATENCY.getPreferredName())), LATENCY);
|
||||
PARSER.declareString(Builder::setSummaryCountFieldName, SUMMARY_COUNT_FIELD_NAME);
|
||||
PARSER.declareStringArray(Builder::setInfluencers, INFLUENCERS);
|
||||
PARSER.declareBoolean(Builder::setOverlappingBuckets, OVERLAPPING_BUCKETS);
|
||||
PARSER.declareLong(Builder::setResultFinalizationWindow, RESULT_FINALIZATION_WINDOW);
|
||||
PARSER.declareBoolean(Builder::setMultivariateByFields, MULTIVARIATE_BY_FIELDS);
|
||||
PARSER.declareStringArray((builder, values) -> builder.setMultipleBucketSpans(
|
||||
values.stream().map(v -> TimeValue.parseTimeValue(v, MULTIPLE_BUCKET_SPANS.getPreferredName()))
|
||||
.collect(Collectors.toList())), MULTIPLE_BUCKET_SPANS);
|
||||
PARSER.declareBoolean(Builder::setUsePerPartitionNormalization, USER_PER_PARTITION_NORMALIZATION);
|
||||
PARSERS.put(MlParserType.METADATA, METADATA_PARSER);
|
||||
PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER);
|
||||
for (MlParserType parserType : MlParserType.values()) {
|
||||
ConstructingObjectParser<AnalysisConfig.Builder, Void> parser = PARSERS.get(parserType);
|
||||
assert parser != null;
|
||||
parser.declareObjectArray(ConstructingObjectParser.constructorArg(),
|
||||
(p, c) -> Detector.PARSERS.get(parserType).apply(p, c).build(), DETECTORS);
|
||||
parser.declareString((builder, val) ->
|
||||
builder.setBucketSpan(TimeValue.parseTimeValue(val, BUCKET_SPAN.getPreferredName())), BUCKET_SPAN);
|
||||
parser.declareString(Builder::setCategorizationFieldName, CATEGORIZATION_FIELD_NAME);
|
||||
parser.declareStringArray(Builder::setCategorizationFilters, CATEGORIZATION_FILTERS);
|
||||
parser.declareString((builder, val) ->
|
||||
builder.setLatency(TimeValue.parseTimeValue(val, LATENCY.getPreferredName())), LATENCY);
|
||||
parser.declareString(Builder::setSummaryCountFieldName, SUMMARY_COUNT_FIELD_NAME);
|
||||
parser.declareStringArray(Builder::setInfluencers, INFLUENCERS);
|
||||
parser.declareBoolean(Builder::setOverlappingBuckets, OVERLAPPING_BUCKETS);
|
||||
parser.declareLong(Builder::setResultFinalizationWindow, RESULT_FINALIZATION_WINDOW);
|
||||
parser.declareBoolean(Builder::setMultivariateByFields, MULTIVARIATE_BY_FIELDS);
|
||||
parser.declareStringArray((builder, values) -> builder.setMultipleBucketSpans(
|
||||
values.stream().map(v -> TimeValue.parseTimeValue(v, MULTIPLE_BUCKET_SPANS.getPreferredName()))
|
||||
.collect(Collectors.toList())), MULTIPLE_BUCKET_SPANS);
|
||||
parser.declareBoolean(Builder::setUsePerPartitionNormalization, USER_PER_PARTITION_NORMALIZATION);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -16,10 +16,13 @@ import org.elasticsearch.common.xcontent.ObjectParser;
|
|||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.ml.MlParserType;
|
||||
import org.elasticsearch.xpack.ml.job.messages.Messages;
|
||||
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.EnumMap;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
|
@ -34,19 +37,30 @@ public class AnalysisLimits implements ToXContentObject, Writeable {
|
|||
public static final ParseField MODEL_MEMORY_LIMIT = new ParseField("model_memory_limit");
|
||||
public static final ParseField CATEGORIZATION_EXAMPLES_LIMIT = new ParseField("categorization_examples_limit");
|
||||
|
||||
public static final ConstructingObjectParser<AnalysisLimits, Void> PARSER = new ConstructingObjectParser<>(
|
||||
"analysis_limits", a -> new AnalysisLimits((Long) a[0], (Long) a[1]));
|
||||
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
|
||||
public static final ConstructingObjectParser<AnalysisLimits, Void> METADATA_PARSER = new ConstructingObjectParser<>(
|
||||
"analysis_limits", true, a -> new AnalysisLimits((Long) a[0], (Long) a[1]));
|
||||
public static final ConstructingObjectParser<AnalysisLimits, Void> CONFIG_PARSER = new ConstructingObjectParser<>(
|
||||
"analysis_limits", false, a -> new AnalysisLimits((Long) a[0], (Long) a[1]));
|
||||
public static final Map<MlParserType, ConstructingObjectParser<AnalysisLimits, Void>> PARSERS =
|
||||
new EnumMap<>(MlParserType.class);
|
||||
|
||||
static {
|
||||
PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> {
|
||||
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
return ByteSizeValue.parseBytesSizeValue(p.text(), MODEL_MEMORY_LIMIT.getPreferredName()).getMb();
|
||||
} else if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) {
|
||||
return p.longValue();
|
||||
}
|
||||
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
|
||||
}, MODEL_MEMORY_LIMIT, ObjectParser.ValueType.VALUE);
|
||||
PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), CATEGORIZATION_EXAMPLES_LIMIT);
|
||||
PARSERS.put(MlParserType.METADATA, METADATA_PARSER);
|
||||
PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER);
|
||||
for (MlParserType parserType : MlParserType.values()) {
|
||||
ConstructingObjectParser<AnalysisLimits, Void> parser = PARSERS.get(parserType);
|
||||
assert parser != null;
|
||||
parser.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> {
|
||||
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
return ByteSizeValue.parseBytesSizeValue(p.text(), MODEL_MEMORY_LIMIT.getPreferredName()).getMb();
|
||||
} else if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) {
|
||||
return p.longValue();
|
||||
}
|
||||
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
|
||||
}, MODEL_MEMORY_LIMIT, ObjectParser.ValueType.VALUE);
|
||||
parser.declareLong(ConstructingObjectParser.optionalConstructorArg(), CATEGORIZATION_EXAMPLES_LIMIT);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -14,13 +14,16 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
|
|||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.ml.MlParserType;
|
||||
import org.elasticsearch.xpack.ml.job.messages.Messages;
|
||||
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
|
||||
import org.elasticsearch.xpack.ml.utils.time.DateTimeFormatterTimestampConverter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.time.ZoneOffset;
|
||||
import java.util.EnumMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
|
@ -122,14 +125,25 @@ public class DataDescription implements ToXContentObject, Writeable {
|
|||
private final Character fieldDelimiter;
|
||||
private final Character quoteCharacter;
|
||||
|
||||
public static final ObjectParser<Builder, Void> PARSER = new ObjectParser<>(DATA_DESCRIPTION_FIELD.getPreferredName(), Builder::new);
|
||||
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
|
||||
public static final ObjectParser<Builder, Void> METADATA_PARSER =
|
||||
new ObjectParser<>(DATA_DESCRIPTION_FIELD.getPreferredName(), true, Builder::new);
|
||||
public static final ObjectParser<Builder, Void> CONFIG_PARSER =
|
||||
new ObjectParser<>(DATA_DESCRIPTION_FIELD.getPreferredName(), false, Builder::new);
|
||||
public static final Map<MlParserType, ObjectParser<Builder, Void>> PARSERS = new EnumMap<>(MlParserType.class);
|
||||
|
||||
static {
|
||||
PARSER.declareString(Builder::setFormat, FORMAT_FIELD);
|
||||
PARSER.declareString(Builder::setTimeField, TIME_FIELD_NAME_FIELD);
|
||||
PARSER.declareString(Builder::setTimeFormat, TIME_FORMAT_FIELD);
|
||||
PARSER.declareField(Builder::setFieldDelimiter, DataDescription::extractChar, FIELD_DELIMITER_FIELD, ValueType.STRING);
|
||||
PARSER.declareField(Builder::setQuoteCharacter, DataDescription::extractChar, QUOTE_CHARACTER_FIELD, ValueType.STRING);
|
||||
PARSERS.put(MlParserType.METADATA, METADATA_PARSER);
|
||||
PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER);
|
||||
for (MlParserType parserType : MlParserType.values()) {
|
||||
ObjectParser<Builder, Void> parser = PARSERS.get(parserType);
|
||||
assert parser != null;
|
||||
parser.declareString(Builder::setFormat, FORMAT_FIELD);
|
||||
parser.declareString(Builder::setTimeField, TIME_FIELD_NAME_FIELD);
|
||||
parser.declareString(Builder::setTimeFormat, TIME_FORMAT_FIELD);
|
||||
parser.declareField(Builder::setFieldDelimiter, DataDescription::extractChar, FIELD_DELIMITER_FIELD, ValueType.STRING);
|
||||
parser.declareField(Builder::setQuoteCharacter, DataDescription::extractChar, QUOTE_CHARACTER_FIELD, ValueType.STRING);
|
||||
}
|
||||
}
|
||||
|
||||
public DataDescription(DataFormat dataFormat, String timeFieldName, String timeFormat, Character fieldDelimiter,
|
||||
|
|
|
@ -15,13 +15,16 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
|
|||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.ml.MlParserType;
|
||||
import org.elasticsearch.xpack.ml.job.messages.Messages;
|
||||
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -35,25 +38,36 @@ public class DetectionRule implements ToXContentObject, Writeable {
|
|||
public static final ParseField CONDITIONS_CONNECTIVE_FIELD = new ParseField("conditions_connective");
|
||||
public static final ParseField RULE_CONDITIONS_FIELD = new ParseField("rule_conditions");
|
||||
|
||||
public static final ObjectParser<Builder, Void> PARSER = new ObjectParser<>(DETECTION_RULE_FIELD.getPreferredName(), Builder::new);
|
||||
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
|
||||
public static final ObjectParser<Builder, Void> METADATA_PARSER =
|
||||
new ObjectParser<>(DETECTION_RULE_FIELD.getPreferredName(), true, Builder::new);
|
||||
public static final ObjectParser<Builder, Void> CONFIG_PARSER =
|
||||
new ObjectParser<>(DETECTION_RULE_FIELD.getPreferredName(), false, Builder::new);
|
||||
public static final Map<MlParserType, ObjectParser<Builder, Void>> PARSERS = new EnumMap<>(MlParserType.class);
|
||||
|
||||
static {
|
||||
PARSER.declareField(Builder::setRuleAction, p -> {
|
||||
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
return RuleAction.fromString(p.text());
|
||||
}
|
||||
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
|
||||
}, RULE_ACTION_FIELD, ValueType.STRING);
|
||||
PARSER.declareString(Builder::setTargetFieldName, TARGET_FIELD_NAME_FIELD);
|
||||
PARSER.declareString(Builder::setTargetFieldValue, TARGET_FIELD_VALUE_FIELD);
|
||||
PARSER.declareField(Builder::setConditionsConnective, p -> {
|
||||
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
return Connective.fromString(p.text());
|
||||
}
|
||||
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
|
||||
}, CONDITIONS_CONNECTIVE_FIELD, ValueType.STRING);
|
||||
PARSER.declareObjectArray(Builder::setRuleConditions,
|
||||
(parser, parseFieldMatcher) -> RuleCondition.PARSER.apply(parser, parseFieldMatcher), RULE_CONDITIONS_FIELD);
|
||||
PARSERS.put(MlParserType.METADATA, METADATA_PARSER);
|
||||
PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER);
|
||||
for (MlParserType parserType : MlParserType.values()) {
|
||||
ObjectParser<Builder, Void> parser = PARSERS.get(parserType);
|
||||
assert parser != null;
|
||||
parser.declareField(Builder::setRuleAction, p -> {
|
||||
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
return RuleAction.fromString(p.text());
|
||||
}
|
||||
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
|
||||
}, RULE_ACTION_FIELD, ValueType.STRING);
|
||||
parser.declareString(Builder::setTargetFieldName, TARGET_FIELD_NAME_FIELD);
|
||||
parser.declareString(Builder::setTargetFieldValue, TARGET_FIELD_VALUE_FIELD);
|
||||
parser.declareField(Builder::setConditionsConnective, p -> {
|
||||
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
return Connective.fromString(p.text());
|
||||
}
|
||||
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
|
||||
}, CONDITIONS_CONNECTIVE_FIELD, ValueType.STRING);
|
||||
parser.declareObjectArray(Builder::setRuleConditions, (p, c) ->
|
||||
RuleCondition.PARSERS.get(parserType).apply(p, c), RULE_CONDITIONS_FIELD);
|
||||
}
|
||||
}
|
||||
|
||||
private final RuleAction ruleAction;
|
||||
|
|
|
@ -16,6 +16,7 @@ import org.elasticsearch.common.xcontent.ObjectParser;
|
|||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.ml.MlParserType;
|
||||
import org.elasticsearch.xpack.ml.job.messages.Messages;
|
||||
import org.elasticsearch.xpack.ml.job.process.autodetect.writer.RecordWriter;
|
||||
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
|
||||
|
@ -25,9 +26,11 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -83,25 +86,34 @@ public class Detector implements ToXContentObject, Writeable {
|
|||
public static final ParseField DETECTOR_RULES_FIELD = new ParseField("detector_rules");
|
||||
public static final ParseField DETECTOR_INDEX = new ParseField("detector_index");
|
||||
|
||||
public static final ObjectParser<Builder, Void> PARSER = new ObjectParser<>("detector", Builder::new);
|
||||
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
|
||||
public static final ObjectParser<Builder, Void> METADATA_PARSER = new ObjectParser<>("detector", true, Builder::new);
|
||||
public static final ObjectParser<Builder, Void> CONFIG_PARSER = new ObjectParser<>("detector", false, Builder::new);
|
||||
public static final Map<MlParserType, ObjectParser<Builder, Void>> PARSERS = new EnumMap<>(MlParserType.class);
|
||||
|
||||
static {
|
||||
PARSER.declareString(Builder::setDetectorDescription, DETECTOR_DESCRIPTION_FIELD);
|
||||
PARSER.declareString(Builder::setFunction, FUNCTION_FIELD);
|
||||
PARSER.declareString(Builder::setFieldName, FIELD_NAME_FIELD);
|
||||
PARSER.declareString(Builder::setByFieldName, BY_FIELD_NAME_FIELD);
|
||||
PARSER.declareString(Builder::setOverFieldName, OVER_FIELD_NAME_FIELD);
|
||||
PARSER.declareString(Builder::setPartitionFieldName, PARTITION_FIELD_NAME_FIELD);
|
||||
PARSER.declareBoolean(Builder::setUseNull, USE_NULL_FIELD);
|
||||
PARSER.declareField(Builder::setExcludeFrequent, p -> {
|
||||
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
return ExcludeFrequent.forString(p.text());
|
||||
}
|
||||
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
|
||||
}, EXCLUDE_FREQUENT_FIELD, ObjectParser.ValueType.STRING);
|
||||
PARSER.declareObjectArray(Builder::setDetectorRules,
|
||||
(parser, parseFieldMatcher) -> DetectionRule.PARSER.apply(parser, parseFieldMatcher).build(), DETECTOR_RULES_FIELD);
|
||||
PARSER.declareInt(Builder::setDetectorIndex, DETECTOR_INDEX);
|
||||
PARSERS.put(MlParserType.METADATA, METADATA_PARSER);
|
||||
PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER);
|
||||
for (MlParserType parserType : MlParserType.values()) {
|
||||
ObjectParser<Builder, Void> parser = PARSERS.get(parserType);
|
||||
assert parser != null;
|
||||
parser.declareString(Builder::setDetectorDescription, DETECTOR_DESCRIPTION_FIELD);
|
||||
parser.declareString(Builder::setFunction, FUNCTION_FIELD);
|
||||
parser.declareString(Builder::setFieldName, FIELD_NAME_FIELD);
|
||||
parser.declareString(Builder::setByFieldName, BY_FIELD_NAME_FIELD);
|
||||
parser.declareString(Builder::setOverFieldName, OVER_FIELD_NAME_FIELD);
|
||||
parser.declareString(Builder::setPartitionFieldName, PARTITION_FIELD_NAME_FIELD);
|
||||
parser.declareBoolean(Builder::setUseNull, USE_NULL_FIELD);
|
||||
parser.declareField(Builder::setExcludeFrequent, p -> {
|
||||
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
return ExcludeFrequent.forString(p.text());
|
||||
}
|
||||
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
|
||||
}, EXCLUDE_FREQUENT_FIELD, ObjectParser.ValueType.STRING);
|
||||
parser.declareObjectArray(Builder::setDetectorRules, (p, c) ->
|
||||
DetectionRule.PARSERS.get(parserType).apply(p, c).build(), DETECTOR_RULES_FIELD);
|
||||
parser.declareInt(Builder::setDetectorIndex, DETECTOR_INDEX);
|
||||
}
|
||||
}
|
||||
|
||||
public static final String COUNT = "count";
|
||||
|
|
|
@ -20,6 +20,7 @@ import org.elasticsearch.common.xcontent.ToXContent;
|
|||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
import org.elasticsearch.xpack.ml.MlParserType;
|
||||
import org.elasticsearch.xpack.ml.job.messages.Messages;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.AnomalyDetectorsIndex;
|
||||
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
|
||||
|
@ -29,6 +30,7 @@ import org.elasticsearch.xpack.ml.utils.time.TimeUtils;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.EnumMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -77,55 +79,65 @@ public class Job extends AbstractDiffable<Job> implements Writeable, ToXContentO
|
|||
|
||||
public static final String ALL = "_all";
|
||||
|
||||
public static final ObjectParser<Builder, Void> PARSER = new ObjectParser<>("job_details", Builder::new);
|
||||
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
|
||||
public static final ObjectParser<Builder, Void> METADATA_PARSER = new ObjectParser<>("job_details", true, Builder::new);
|
||||
public static final ObjectParser<Builder, Void> CONFIG_PARSER = new ObjectParser<>("job_details", false, Builder::new);
|
||||
public static final Map<MlParserType, ObjectParser<Builder, Void>> PARSERS = new EnumMap<>(MlParserType.class);
|
||||
|
||||
public static final int MAX_JOB_ID_LENGTH = 64;
|
||||
public static final TimeValue MIN_BACKGROUND_PERSIST_INTERVAL = TimeValue.timeValueHours(1);
|
||||
|
||||
static {
|
||||
PARSER.declareString(Builder::setId, ID);
|
||||
PARSER.declareString(Builder::setJobType, JOB_TYPE);
|
||||
PARSER.declareString(Builder::setJobVersion, JOB_VERSION);
|
||||
PARSER.declareStringOrNull(Builder::setDescription, DESCRIPTION);
|
||||
PARSER.declareField(Builder::setCreateTime, p -> {
|
||||
if (p.currentToken() == Token.VALUE_NUMBER) {
|
||||
return new Date(p.longValue());
|
||||
} else if (p.currentToken() == Token.VALUE_STRING) {
|
||||
return new Date(TimeUtils.dateStringToEpoch(p.text()));
|
||||
}
|
||||
throw new IllegalArgumentException("unexpected token [" + p.currentToken() + "] for [" + CREATE_TIME.getPreferredName() + "]");
|
||||
}, CREATE_TIME, ValueType.VALUE);
|
||||
PARSER.declareField(Builder::setFinishedTime, p -> {
|
||||
if (p.currentToken() == Token.VALUE_NUMBER) {
|
||||
return new Date(p.longValue());
|
||||
} else if (p.currentToken() == Token.VALUE_STRING) {
|
||||
return new Date(TimeUtils.dateStringToEpoch(p.text()));
|
||||
}
|
||||
throw new IllegalArgumentException(
|
||||
"unexpected token [" + p.currentToken() + "] for [" + FINISHED_TIME.getPreferredName() + "]");
|
||||
}, FINISHED_TIME, ValueType.VALUE);
|
||||
PARSER.declareField(Builder::setLastDataTime, p -> {
|
||||
if (p.currentToken() == Token.VALUE_NUMBER) {
|
||||
return new Date(p.longValue());
|
||||
} else if (p.currentToken() == Token.VALUE_STRING) {
|
||||
return new Date(TimeUtils.dateStringToEpoch(p.text()));
|
||||
}
|
||||
throw new IllegalArgumentException(
|
||||
"unexpected token [" + p.currentToken() + "] for [" + LAST_DATA_TIME.getPreferredName() + "]");
|
||||
}, LAST_DATA_TIME, ValueType.VALUE);
|
||||
PARSER.declareObject(Builder::setAnalysisConfig, AnalysisConfig.PARSER, ANALYSIS_CONFIG);
|
||||
PARSER.declareObject(Builder::setAnalysisLimits, AnalysisLimits.PARSER, ANALYSIS_LIMITS);
|
||||
PARSER.declareObject(Builder::setDataDescription, DataDescription.PARSER, DATA_DESCRIPTION);
|
||||
PARSER.declareObject(Builder::setModelPlotConfig, ModelPlotConfig.PARSER, MODEL_PLOT_CONFIG);
|
||||
PARSER.declareLong(Builder::setRenormalizationWindowDays, RENORMALIZATION_WINDOW_DAYS);
|
||||
PARSER.declareString((builder, val) -> builder.setBackgroundPersistInterval(
|
||||
TimeValue.parseTimeValue(val, BACKGROUND_PERSIST_INTERVAL.getPreferredName())), BACKGROUND_PERSIST_INTERVAL);
|
||||
PARSER.declareLong(Builder::setResultsRetentionDays, RESULTS_RETENTION_DAYS);
|
||||
PARSER.declareLong(Builder::setModelSnapshotRetentionDays, MODEL_SNAPSHOT_RETENTION_DAYS);
|
||||
PARSER.declareField(Builder::setCustomSettings, (p, c) -> p.map(), CUSTOM_SETTINGS, ValueType.OBJECT);
|
||||
PARSER.declareStringOrNull(Builder::setModelSnapshotId, MODEL_SNAPSHOT_ID);
|
||||
PARSER.declareString(Builder::setResultsIndexName, RESULTS_INDEX_NAME);
|
||||
PARSER.declareBoolean(Builder::setDeleted, DELETED);
|
||||
PARSERS.put(MlParserType.METADATA, METADATA_PARSER);
|
||||
PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER);
|
||||
for (MlParserType parserType : MlParserType.values()) {
|
||||
ObjectParser<Builder, Void> parser = PARSERS.get(parserType);
|
||||
assert parser != null;
|
||||
parser.declareString(Builder::setId, ID);
|
||||
parser.declareString(Builder::setJobType, JOB_TYPE);
|
||||
parser.declareString(Builder::setJobVersion, JOB_VERSION);
|
||||
parser.declareStringOrNull(Builder::setDescription, DESCRIPTION);
|
||||
parser.declareField(Builder::setCreateTime, p -> {
|
||||
if (p.currentToken() == Token.VALUE_NUMBER) {
|
||||
return new Date(p.longValue());
|
||||
} else if (p.currentToken() == Token.VALUE_STRING) {
|
||||
return new Date(TimeUtils.dateStringToEpoch(p.text()));
|
||||
}
|
||||
throw new IllegalArgumentException("unexpected token [" + p.currentToken() +
|
||||
"] for [" + CREATE_TIME.getPreferredName() + "]");
|
||||
}, CREATE_TIME, ValueType.VALUE);
|
||||
parser.declareField(Builder::setFinishedTime, p -> {
|
||||
if (p.currentToken() == Token.VALUE_NUMBER) {
|
||||
return new Date(p.longValue());
|
||||
} else if (p.currentToken() == Token.VALUE_STRING) {
|
||||
return new Date(TimeUtils.dateStringToEpoch(p.text()));
|
||||
}
|
||||
throw new IllegalArgumentException(
|
||||
"unexpected token [" + p.currentToken() + "] for [" + FINISHED_TIME.getPreferredName() + "]");
|
||||
}, FINISHED_TIME, ValueType.VALUE);
|
||||
parser.declareField(Builder::setLastDataTime, p -> {
|
||||
if (p.currentToken() == Token.VALUE_NUMBER) {
|
||||
return new Date(p.longValue());
|
||||
} else if (p.currentToken() == Token.VALUE_STRING) {
|
||||
return new Date(TimeUtils.dateStringToEpoch(p.text()));
|
||||
}
|
||||
throw new IllegalArgumentException(
|
||||
"unexpected token [" + p.currentToken() + "] for [" + LAST_DATA_TIME.getPreferredName() + "]");
|
||||
}, LAST_DATA_TIME, ValueType.VALUE);
|
||||
parser.declareObject(Builder::setAnalysisConfig, AnalysisConfig.PARSERS.get(parserType), ANALYSIS_CONFIG);
|
||||
parser.declareObject(Builder::setAnalysisLimits, AnalysisLimits.PARSERS.get(parserType), ANALYSIS_LIMITS);
|
||||
parser.declareObject(Builder::setDataDescription, DataDescription.PARSERS.get(parserType), DATA_DESCRIPTION);
|
||||
parser.declareObject(Builder::setModelPlotConfig, ModelPlotConfig.PARSERS.get(parserType), MODEL_PLOT_CONFIG);
|
||||
parser.declareLong(Builder::setRenormalizationWindowDays, RENORMALIZATION_WINDOW_DAYS);
|
||||
parser.declareString((builder, val) -> builder.setBackgroundPersistInterval(
|
||||
TimeValue.parseTimeValue(val, BACKGROUND_PERSIST_INTERVAL.getPreferredName())), BACKGROUND_PERSIST_INTERVAL);
|
||||
parser.declareLong(Builder::setResultsRetentionDays, RESULTS_RETENTION_DAYS);
|
||||
parser.declareLong(Builder::setModelSnapshotRetentionDays, MODEL_SNAPSHOT_RETENTION_DAYS);
|
||||
parser.declareField(Builder::setCustomSettings, (p, c) -> p.map(), CUSTOM_SETTINGS, ValueType.OBJECT);
|
||||
parser.declareStringOrNull(Builder::setModelSnapshotId, MODEL_SNAPSHOT_ID);
|
||||
parser.declareString(Builder::setResultsIndexName, RESULTS_INDEX_NAME);
|
||||
parser.declareBoolean(Builder::setDeleted, DELETED);
|
||||
}
|
||||
}
|
||||
|
||||
private final String jobId;
|
||||
|
|
|
@ -27,14 +27,14 @@ public class JobUpdate implements Writeable, ToXContentObject {
|
|||
public static final ParseField DETECTORS = new ParseField("detectors");
|
||||
|
||||
public static final ConstructingObjectParser<Builder, Void> PARSER = new ConstructingObjectParser<>(
|
||||
"job_update", args -> new Builder((String) args[0]));
|
||||
"job_update", args -> new Builder((String) args[0]));
|
||||
|
||||
static {
|
||||
PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), Job.ID);
|
||||
PARSER.declareStringOrNull(Builder::setDescription, Job.DESCRIPTION);
|
||||
PARSER.declareObjectArray(Builder::setDetectorUpdates, DetectorUpdate.PARSER, DETECTORS);
|
||||
PARSER.declareObject(Builder::setModelPlotConfig, ModelPlotConfig.PARSER, Job.MODEL_PLOT_CONFIG);
|
||||
PARSER.declareObject(Builder::setAnalysisLimits, AnalysisLimits.PARSER, Job.ANALYSIS_LIMITS);
|
||||
PARSER.declareObject(Builder::setModelPlotConfig, ModelPlotConfig.CONFIG_PARSER, Job.MODEL_PLOT_CONFIG);
|
||||
PARSER.declareObject(Builder::setAnalysisLimits, AnalysisLimits.CONFIG_PARSER, Job.ANALYSIS_LIMITS);
|
||||
PARSER.declareString((builder, val) -> builder.setBackgroundPersistInterval(
|
||||
TimeValue.parseTimeValue(val, Job.BACKGROUND_PERSIST_INTERVAL.getPreferredName())), Job.BACKGROUND_PERSIST_INTERVAL);
|
||||
PARSER.declareLong(Builder::setRenormalizationWindowDays, Job.RENORMALIZATION_WINDOW_DAYS);
|
||||
|
@ -326,7 +326,7 @@ public class JobUpdate implements Writeable, ToXContentObject {
|
|||
PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), Detector.DETECTOR_INDEX);
|
||||
PARSER.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), Job.DESCRIPTION);
|
||||
PARSER.declareObjectArray(ConstructingObjectParser.optionalConstructorArg(),
|
||||
(parser, parseFieldMatcher) -> DetectionRule.PARSER.apply(parser, parseFieldMatcher).build(), RULES);
|
||||
(parser, parseFieldMatcher) -> DetectionRule.CONFIG_PARSER.apply(parser, parseFieldMatcher).build(), RULES);
|
||||
}
|
||||
|
||||
private int detectorIndex;
|
||||
|
|
|
@ -12,8 +12,11 @@ import org.elasticsearch.common.io.stream.Writeable;
|
|||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.xpack.ml.MlParserType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.EnumMap;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
public class ModelPlotConfig implements ToXContentObject, Writeable {
|
||||
|
@ -22,13 +25,25 @@ public class ModelPlotConfig implements ToXContentObject, Writeable {
|
|||
private static final ParseField ENABLED_FIELD = new ParseField("enabled");
|
||||
public static final ParseField TERMS_FIELD = new ParseField("terms");
|
||||
|
||||
public static final ConstructingObjectParser<ModelPlotConfig, Void> PARSER =
|
||||
new ConstructingObjectParser<>(TYPE_FIELD.getPreferredName(),
|
||||
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
|
||||
public static final ConstructingObjectParser<ModelPlotConfig, Void> METADATA_PARSER =
|
||||
new ConstructingObjectParser<>(TYPE_FIELD.getPreferredName(), true,
|
||||
a -> new ModelPlotConfig((boolean) a[0], (String) a[1]));
|
||||
public static final ConstructingObjectParser<ModelPlotConfig, Void> CONFIG_PARSER =
|
||||
new ConstructingObjectParser<>(TYPE_FIELD.getPreferredName(), false,
|
||||
a -> new ModelPlotConfig((boolean) a[0], (String) a[1]));
|
||||
public static final Map<MlParserType, ConstructingObjectParser<ModelPlotConfig, Void>> PARSERS =
|
||||
new EnumMap<>(MlParserType.class);
|
||||
|
||||
static {
|
||||
PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), ENABLED_FIELD);
|
||||
PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), TERMS_FIELD);
|
||||
PARSERS.put(MlParserType.METADATA, METADATA_PARSER);
|
||||
PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER);
|
||||
for (MlParserType parserType : MlParserType.values()) {
|
||||
ConstructingObjectParser<ModelPlotConfig, Void> parser = PARSERS.get(parserType);
|
||||
assert parser != null;
|
||||
parser.declareBoolean(ConstructingObjectParser.constructorArg(), ENABLED_FIELD);
|
||||
parser.declareString(ConstructingObjectParser.optionalConstructorArg(), TERMS_FIELD);
|
||||
}
|
||||
}
|
||||
|
||||
private final boolean enabled;
|
||||
|
|
|
@ -15,11 +15,14 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
|
|||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.ml.MlParserType;
|
||||
import org.elasticsearch.xpack.ml.job.messages.Messages;
|
||||
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.EnumMap;
|
||||
import java.util.EnumSet;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
public class RuleCondition implements ToXContentObject, Writeable {
|
||||
|
@ -29,21 +32,33 @@ public class RuleCondition implements ToXContentObject, Writeable {
|
|||
public static final ParseField FIELD_VALUE_FIELD = new ParseField("field_value");
|
||||
public static final ParseField VALUE_FILTER_FIELD = new ParseField("value_filter");
|
||||
|
||||
public static final ConstructingObjectParser<RuleCondition, Void> PARSER =
|
||||
new ConstructingObjectParser<>(RULE_CONDITION_FIELD.getPreferredName(),
|
||||
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
|
||||
public static final ConstructingObjectParser<RuleCondition, Void> METADATA_PARSER =
|
||||
new ConstructingObjectParser<>(RULE_CONDITION_FIELD.getPreferredName(), true,
|
||||
a -> new RuleCondition((RuleConditionType) a[0], (String) a[1], (String) a[2], (Condition) a[3], (String) a[4]));
|
||||
public static final ConstructingObjectParser<RuleCondition, Void> CONFIG_PARSER =
|
||||
new ConstructingObjectParser<>(RULE_CONDITION_FIELD.getPreferredName(), false,
|
||||
a -> new RuleCondition((RuleConditionType) a[0], (String) a[1], (String) a[2], (Condition) a[3], (String) a[4]));
|
||||
public static final Map<MlParserType, ConstructingObjectParser<RuleCondition, Void>> PARSERS =
|
||||
new EnumMap<>(MlParserType.class);
|
||||
|
||||
static {
|
||||
PARSER.declareField(ConstructingObjectParser.constructorArg(), p -> {
|
||||
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
return RuleConditionType.fromString(p.text());
|
||||
}
|
||||
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
|
||||
}, CONDITION_TYPE_FIELD, ValueType.STRING);
|
||||
PARSER.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), FIELD_NAME_FIELD);
|
||||
PARSER.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), FIELD_VALUE_FIELD);
|
||||
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), Condition.PARSER, Condition.CONDITION_FIELD);
|
||||
PARSER.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), VALUE_FILTER_FIELD);
|
||||
PARSERS.put(MlParserType.METADATA, METADATA_PARSER);
|
||||
PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER);
|
||||
for (MlParserType parserType : MlParserType.values()) {
|
||||
ConstructingObjectParser<RuleCondition, Void> parser = PARSERS.get(parserType);
|
||||
assert parser != null;
|
||||
parser.declareField(ConstructingObjectParser.constructorArg(), p -> {
|
||||
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
return RuleConditionType.fromString(p.text());
|
||||
}
|
||||
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
|
||||
}, CONDITION_TYPE_FIELD, ValueType.STRING);
|
||||
parser.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), FIELD_NAME_FIELD);
|
||||
parser.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), FIELD_VALUE_FIELD);
|
||||
parser.declareObject(ConstructingObjectParser.optionalConstructorArg(), Condition.PARSER, Condition.CONDITION_FIELD);
|
||||
parser.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), VALUE_FILTER_FIELD);
|
||||
}
|
||||
}
|
||||
|
||||
private final RuleConditionType conditionType;
|
||||
|
|
|
@ -79,7 +79,7 @@ public class MlMetadataTests extends AbstractSerializingTestCase<MlMetadata> {
|
|||
|
||||
@Override
|
||||
protected MlMetadata doParseInstance(XContentParser parser) {
|
||||
return MlMetadata.ML_METADATA_PARSER.apply(parser, null).build();
|
||||
return MlMetadata.METADATA_PARSER.apply(parser, null).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -29,7 +29,7 @@ public class ChunkingConfigTests extends AbstractSerializingTestCase<ChunkingCon
|
|||
|
||||
@Override
|
||||
protected ChunkingConfig doParseInstance(XContentParser parser) {
|
||||
return ChunkingConfig.PARSER.apply(parser, null);
|
||||
return ChunkingConfig.CONFIG_PARSER.apply(parser, null);
|
||||
}
|
||||
|
||||
public void testConstructorGivenAutoAndTimeSpan() {
|
||||
|
@ -63,4 +63,4 @@ public class ChunkingConfigTests extends AbstractSerializingTestCase<ChunkingCon
|
|||
private static TimeValue randomPositiveSecondsMinutesHours() {
|
||||
return new TimeValue(randomIntBetween(1, 1000), randomFrom(Arrays.asList(TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,7 +12,9 @@ import org.elasticsearch.common.io.stream.Writeable;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.search.SearchModule;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||
|
@ -117,7 +119,29 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
|
|||
|
||||
@Override
|
||||
protected DatafeedConfig doParseInstance(XContentParser parser) {
|
||||
return DatafeedConfig.PARSER.apply(parser, null).build();
|
||||
return DatafeedConfig.CONFIG_PARSER.apply(parser, null).build();
|
||||
}
|
||||
|
||||
private static final String FUTURE_DATAFEED = "{\n" +
|
||||
" \"datafeed_id\": \"farequote-datafeed\",\n" +
|
||||
" \"job_id\": \"farequote\",\n" +
|
||||
" \"frequency\": \"1h\",\n" +
|
||||
" \"indices\": [\"farequote1\", \"farequote2\"],\n" +
|
||||
" \"tomorrows_technology_today\": \"amazing\",\n" +
|
||||
" \"scroll_size\": 1234\n" +
|
||||
"}";
|
||||
|
||||
public void testFutureConfigParse() throws IOException {
|
||||
XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, FUTURE_DATAFEED);
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> DatafeedConfig.CONFIG_PARSER.apply(parser, null).build());
|
||||
assertEquals("[datafeed_config] unknown field [tomorrows_technology_today], parser not found", e.getMessage());
|
||||
}
|
||||
|
||||
public void testFutureMetadataParse() throws IOException {
|
||||
XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, FUTURE_DATAFEED);
|
||||
// Unlike the config version of this test, the metadata parser should tolerate the unknown future field
|
||||
assertNotNull(DatafeedConfig.METADATA_PARSER.apply(parser, null).build());
|
||||
}
|
||||
|
||||
public void testCopyConstructor() {
|
||||
|
|
|
@ -87,7 +87,7 @@ public class AnalysisConfigTests extends AbstractSerializingTestCase<AnalysisCon
|
|||
|
||||
@Override
|
||||
protected AnalysisConfig doParseInstance(XContentParser parser) {
|
||||
return AnalysisConfig.PARSER.apply(parser, null).build();
|
||||
return AnalysisConfig.CONFIG_PARSER.apply(parser, null).build();
|
||||
}
|
||||
|
||||
public void testFieldConfiguration_singleDetector_notPreSummarised() {
|
||||
|
|
|
@ -35,20 +35,20 @@ public class AnalysisLimitsTests extends AbstractSerializingTestCase<AnalysisLim
|
|||
|
||||
@Override
|
||||
protected AnalysisLimits doParseInstance(XContentParser parser) {
|
||||
return AnalysisLimits.PARSER.apply(parser, null);
|
||||
return AnalysisLimits.CONFIG_PARSER.apply(parser, null);
|
||||
}
|
||||
|
||||
public void testParseModelMemoryLimitGivenNegativeNumber() throws IOException {
|
||||
String json = "{\"model_memory_limit\": -1}";
|
||||
XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, json);
|
||||
ParsingException e = expectThrows(ParsingException.class, () -> AnalysisLimits.PARSER.apply(parser, null));
|
||||
ParsingException e = expectThrows(ParsingException.class, () -> AnalysisLimits.CONFIG_PARSER.apply(parser, null));
|
||||
assertThat(e.getRootCause().getMessage(), containsString("model_memory_limit must be at least 1 MiB. Value = -1"));
|
||||
}
|
||||
|
||||
public void testParseModelMemoryLimitGivenZero() throws IOException {
|
||||
String json = "{\"model_memory_limit\": 0}";
|
||||
XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, json);
|
||||
ParsingException e = expectThrows(ParsingException.class, () -> AnalysisLimits.PARSER.apply(parser, null));
|
||||
ParsingException e = expectThrows(ParsingException.class, () -> AnalysisLimits.CONFIG_PARSER.apply(parser, null));
|
||||
assertThat(e.getRootCause().getMessage(), containsString("model_memory_limit must be at least 1 MiB. Value = 0"));
|
||||
}
|
||||
|
||||
|
@ -56,7 +56,7 @@ public class AnalysisLimitsTests extends AbstractSerializingTestCase<AnalysisLim
|
|||
String json = "{\"model_memory_limit\": 2048}";
|
||||
XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, json);
|
||||
|
||||
AnalysisLimits limits = AnalysisLimits.PARSER.apply(parser, null);
|
||||
AnalysisLimits limits = AnalysisLimits.CONFIG_PARSER.apply(parser, null);
|
||||
|
||||
assertThat(limits.getModelMemoryLimit(), equalTo(2048L));
|
||||
}
|
||||
|
@ -64,21 +64,21 @@ public class AnalysisLimitsTests extends AbstractSerializingTestCase<AnalysisLim
|
|||
public void testParseModelMemoryLimitGivenNegativeString() throws IOException {
|
||||
String json = "{\"model_memory_limit\":\"-4MB\"}";
|
||||
XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, json);
|
||||
ParsingException e = expectThrows(ParsingException.class, () -> AnalysisLimits.PARSER.apply(parser, null));
|
||||
ParsingException e = expectThrows(ParsingException.class, () -> AnalysisLimits.CONFIG_PARSER.apply(parser, null));
|
||||
assertThat(e.getRootCause().getMessage(), containsString("model_memory_limit must be at least 1 MiB. Value = -4"));
|
||||
}
|
||||
|
||||
public void testParseModelMemoryLimitGivenZeroString() throws IOException {
|
||||
String json = "{\"model_memory_limit\":\"0MB\"}";
|
||||
XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, json);
|
||||
ParsingException e = expectThrows(ParsingException.class, () -> AnalysisLimits.PARSER.apply(parser, null));
|
||||
ParsingException e = expectThrows(ParsingException.class, () -> AnalysisLimits.CONFIG_PARSER.apply(parser, null));
|
||||
assertThat(e.getRootCause().getMessage(), containsString("model_memory_limit must be at least 1 MiB. Value = 0"));
|
||||
}
|
||||
|
||||
public void testParseModelMemoryLimitGivenLessThanOneMBString() throws IOException {
|
||||
String json = "{\"model_memory_limit\":\"1000Kb\"}";
|
||||
XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, json);
|
||||
ParsingException e = expectThrows(ParsingException.class, () -> AnalysisLimits.PARSER.apply(parser, null));
|
||||
ParsingException e = expectThrows(ParsingException.class, () -> AnalysisLimits.CONFIG_PARSER.apply(parser, null));
|
||||
assertThat(e.getRootCause().getMessage(), containsString("model_memory_limit must be at least 1 MiB. Value = 0"));
|
||||
}
|
||||
|
||||
|
@ -86,7 +86,7 @@ public class AnalysisLimitsTests extends AbstractSerializingTestCase<AnalysisLim
|
|||
String json = "{\"model_memory_limit\":\"4g\"}";
|
||||
XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, json);
|
||||
|
||||
AnalysisLimits limits = AnalysisLimits.PARSER.apply(parser, null);
|
||||
AnalysisLimits limits = AnalysisLimits.CONFIG_PARSER.apply(parser, null);
|
||||
|
||||
assertThat(limits.getModelMemoryLimit(), equalTo(4096L));
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ public class AnalysisLimitsTests extends AbstractSerializingTestCase<AnalysisLim
|
|||
String json = "{\"model_memory_limit\":\"1300kb\"}";
|
||||
XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, json);
|
||||
|
||||
AnalysisLimits limits = AnalysisLimits.PARSER.apply(parser, null);
|
||||
AnalysisLimits limits = AnalysisLimits.CONFIG_PARSER.apply(parser, null);
|
||||
|
||||
assertThat(limits.getModelMemoryLimit(), equalTo(1L));
|
||||
}
|
||||
|
|
|
@ -200,7 +200,7 @@ public class DataDescriptionTests extends AbstractSerializingTestCase<DataDescri
|
|||
BytesArray json = new BytesArray("{ \"format\":\"INEXISTENT_FORMAT\" }");
|
||||
XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, json);
|
||||
ParsingException ex = expectThrows(ParsingException.class,
|
||||
() -> DataDescription.PARSER.apply(parser, null));
|
||||
() -> DataDescription.CONFIG_PARSER.apply(parser, null));
|
||||
assertThat(ex.getMessage(), containsString("[data_description] failed to parse field [format]"));
|
||||
Throwable cause = ex.getCause();
|
||||
assertNotNull(cause);
|
||||
|
@ -213,7 +213,7 @@ public class DataDescriptionTests extends AbstractSerializingTestCase<DataDescri
|
|||
BytesArray json = new BytesArray("{ \"field_delimiter\":\",,\" }");
|
||||
XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, json);
|
||||
ParsingException ex = expectThrows(ParsingException.class,
|
||||
() -> DataDescription.PARSER.apply(parser, null));
|
||||
() -> DataDescription.CONFIG_PARSER.apply(parser, null));
|
||||
assertThat(ex.getMessage(), containsString("[data_description] failed to parse field [field_delimiter]"));
|
||||
Throwable cause = ex.getCause();
|
||||
assertNotNull(cause);
|
||||
|
@ -226,7 +226,7 @@ public class DataDescriptionTests extends AbstractSerializingTestCase<DataDescri
|
|||
BytesArray json = new BytesArray("{ \"quote_character\":\"''\" }");
|
||||
XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, json);
|
||||
ParsingException ex = expectThrows(ParsingException.class,
|
||||
() -> DataDescription.PARSER.apply(parser, null));
|
||||
() -> DataDescription.CONFIG_PARSER.apply(parser, null));
|
||||
assertThat(ex.getMessage(), containsString("[data_description] failed to parse field [quote_character]"));
|
||||
Throwable cause = ex.getCause();
|
||||
assertNotNull(cause);
|
||||
|
@ -270,6 +270,6 @@ public class DataDescriptionTests extends AbstractSerializingTestCase<DataDescri
|
|||
|
||||
@Override
|
||||
protected DataDescription doParseInstance(XContentParser parser) {
|
||||
return DataDescription.PARSER.apply(parser, null).build();
|
||||
return DataDescription.CONFIG_PARSER.apply(parser, null).build();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -119,6 +119,6 @@ public class DetectionRuleTests extends AbstractSerializingTestCase<DetectionRul
|
|||
|
||||
@Override
|
||||
protected DetectionRule doParseInstance(XContentParser parser) {
|
||||
return DetectionRule.PARSER.apply(parser, null).build();
|
||||
return DetectionRule.CONFIG_PARSER.apply(parser, null).build();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -199,7 +199,7 @@ public class DetectorTests extends AbstractSerializingTestCase<Detector> {
|
|||
|
||||
@Override
|
||||
protected Detector doParseInstance(XContentParser parser) {
|
||||
return Detector.PARSER.apply(parser, null).build();
|
||||
return Detector.CONFIG_PARSER.apply(parser, null).build();
|
||||
}
|
||||
|
||||
public void testVerifyFieldNames_givenInvalidChars() {
|
||||
|
|
|
@ -82,6 +82,6 @@ public class JobBuilderTests extends AbstractSerializingTestCase<Job.Builder> {
|
|||
|
||||
@Override
|
||||
protected Job.Builder doParseInstance(XContentParser parser) {
|
||||
return Job.PARSER.apply(parser, null);
|
||||
return Job.CONFIG_PARSER.apply(parser, null);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -34,6 +35,21 @@ import static org.hamcrest.Matchers.nullValue;
|
|||
|
||||
public class JobTests extends AbstractSerializingTestCase<Job> {
|
||||
|
||||
private static final String FUTURE_JOB = "{\n" +
|
||||
" \"job_id\": \"farequote\",\n" +
|
||||
" \"create_time\": 1234567890000,\n" +
|
||||
" \"tomorrows_technology_today\": \"wow\",\n" +
|
||||
" \"analysis_config\": {\n" +
|
||||
" \"bucket_span\": \"1h\",\n" +
|
||||
" \"something_new\": \"gasp\",\n" +
|
||||
" \"detectors\": [{\"function\": \"metric\", \"field_name\": \"responsetime\", \"by_field_name\": \"airline\"}]\n" +
|
||||
" },\n" +
|
||||
" \"data_description\": {\n" +
|
||||
" \"time_field\": \"time\",\n" +
|
||||
" \"the_future\": 123\n" +
|
||||
" }\n" +
|
||||
"}";
|
||||
|
||||
@Override
|
||||
protected Job createTestInstance() {
|
||||
return createRandomizedJob();
|
||||
|
@ -46,7 +62,20 @@ public class JobTests extends AbstractSerializingTestCase<Job> {
|
|||
|
||||
@Override
|
||||
protected Job doParseInstance(XContentParser parser) {
|
||||
return Job.PARSER.apply(parser, null).build();
|
||||
return Job.CONFIG_PARSER.apply(parser, null).build();
|
||||
}
|
||||
|
||||
public void testFutureConfigParse() throws IOException {
|
||||
XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, FUTURE_JOB);
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> Job.CONFIG_PARSER.apply(parser, null).build());
|
||||
assertEquals("[job_details] unknown field [tomorrows_technology_today], parser not found", e.getMessage());
|
||||
}
|
||||
|
||||
public void testFutureMetadataParse() throws IOException {
|
||||
XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, FUTURE_JOB);
|
||||
// Unlike the config version of this test, the metadata parser should tolerate the unknown future field
|
||||
assertNotNull(Job.METADATA_PARSER.apply(parser, null).build());
|
||||
}
|
||||
|
||||
public void testConstructor_GivenEmptyJobConfiguration() {
|
||||
|
|
|
@ -31,6 +31,6 @@ public class ModelPlotConfigTests extends AbstractSerializingTestCase<ModelPlotC
|
|||
|
||||
@Override
|
||||
protected ModelPlotConfig doParseInstance(XContentParser parser) {
|
||||
return ModelPlotConfig.PARSER.apply(parser, null);
|
||||
return ModelPlotConfig.CONFIG_PARSER.apply(parser, null);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ public class RuleConditionTests extends AbstractSerializingTestCase<RuleConditio
|
|||
|
||||
@Override
|
||||
protected RuleCondition doParseInstance(XContentParser parser) {
|
||||
return RuleCondition.PARSER.apply(parser, null);
|
||||
return RuleCondition.CONFIG_PARSER.apply(parser, null);
|
||||
}
|
||||
|
||||
public void testConstructor() {
|
||||
|
|
Loading…
Reference in New Issue