[ML] Consistent pattern for strict/lenient parser names (#32399)

Previously we had two patterns for naming of strict
and lenient parsers.

Some classes had CONFIG_PARSER and METADATA_PARSER,
and used an enum to pass the parser type to nested
parsers.

Other classes had STRICT_PARSER and LENIENT_PARSER
and used ternary operators to pass the parser type
to nested parsers.

This change makes all ML classes use the second of
the patterns described above.
This commit is contained in:
David Roberts 2018-07-26 16:55:40 +01:00 committed by GitHub
parent 63a0436764
commit 0afa265ac9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
36 changed files with 311 additions and 408 deletions

View File

@ -373,7 +373,7 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl
return Arrays.asList(
// ML - Custom metadata
new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField("ml"),
parser -> MlMetadata.METADATA_PARSER.parse(parser, null).build()),
parser -> MlMetadata.LENIENT_PARSER.parse(parser, null).build()),
// ML - Persistent action requests
new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(StartDatafeedAction.TASK_NAME),
StartDatafeedAction.DatafeedParams::fromXContent),

View File

@ -61,12 +61,12 @@ public class MlMetadata implements XPackPlugin.XPackMetaDataCustom {
public static final MlMetadata EMPTY_METADATA = new MlMetadata(Collections.emptySortedMap(), Collections.emptySortedMap());
// This parser follows the pattern that metadata is parsed leniently (to allow for enhancements)
public static final ObjectParser<Builder, Void> METADATA_PARSER = new ObjectParser<>("ml_metadata", true, Builder::new);
public static final ObjectParser<Builder, Void> LENIENT_PARSER = new ObjectParser<>("ml_metadata", true, Builder::new);
static {
METADATA_PARSER.declareObjectArray(Builder::putJobs, (p, c) -> Job.METADATA_PARSER.apply(p, c).build(), JOBS_FIELD);
METADATA_PARSER.declareObjectArray(Builder::putDatafeeds,
(p, c) -> DatafeedConfig.METADATA_PARSER.apply(p, c).build(), DATAFEEDS_FIELD);
LENIENT_PARSER.declareObjectArray(Builder::putJobs, (p, c) -> Job.LENIENT_PARSER.apply(p, c).build(), JOBS_FIELD);
LENIENT_PARSER.declareObjectArray(Builder::putDatafeeds,
(p, c) -> DatafeedConfig.LENIENT_PARSER.apply(p, c).build(), DATAFEEDS_FIELD);
}
private final SortedMap<String, Job> jobs;

View File

@ -1,19 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.ml;
/**
* In order to allow enhancements that require additions to the ML custom cluster state to be made in minor versions,
* when we parse our metadata from persisted cluster state we ignore unknown fields. However, we don't want to be
* lenient when parsing config as this would mean user mistakes could go undetected. Therefore, for all JSON objects
* that are used in both custom cluster state and config we have two parsers, one tolerant of unknown fields (for
* parsing cluster state) and one strict (for parsing config). This class enumerates the two options.
*/
public enum MlParserType {
METADATA, CONFIG;
}

View File

@ -39,7 +39,7 @@ public class PutDatafeedAction extends Action<PutDatafeedAction.Response> {
public static class Request extends AcknowledgedRequest<Request> implements ToXContentObject {
public static Request parseRequest(String datafeedId, XContentParser parser) {
DatafeedConfig.Builder datafeed = DatafeedConfig.CONFIG_PARSER.apply(parser, null);
DatafeedConfig.Builder datafeed = DatafeedConfig.STRICT_PARSER.apply(parser, null);
datafeed.setId(datafeedId);
return new Request(datafeed.build());
}

View File

@ -42,7 +42,7 @@ public class PutJobAction extends Action<PutJobAction.Response> {
public static class Request extends AcknowledgedRequest<Request> implements ToXContentObject {
public static Request parseRequest(String jobId, XContentParser parser) {
Job.Builder jobBuilder = Job.CONFIG_PARSER.apply(parser, null);
Job.Builder jobBuilder = Job.STRICT_PARSER.apply(parser, null);
if (jobBuilder.getId() == null) {
jobBuilder.setId(jobId);
} else if (!Strings.isNullOrEmpty(jobId) && !jobId.equals(jobBuilder.getId())) {

View File

@ -48,7 +48,7 @@ public class ValidateDetectorAction extends Action<ValidateDetectorAction.Respon
private Detector detector;
public static Request parseRequest(XContentParser parser) {
Detector detector = Detector.CONFIG_PARSER.apply(parser, null).build();
Detector detector = Detector.STRICT_PARSER.apply(parser, null).build();
return new Request(detector);
}

View File

@ -49,7 +49,7 @@ public class ValidateJobConfigAction extends Action<ValidateJobConfigAction.Resp
private Job job;
public static Request parseRequest(XContentParser parser) {
Job.Builder job = Job.CONFIG_PARSER.apply(parser, null);
Job.Builder job = Job.STRICT_PARSER.apply(parser, null);
// When jobs are PUT their ID must be supplied in the URL - assume this will
// be valid unless an invalid job ID is specified in the JSON to be validated
job.setId(job.getId() != null ? job.getId() : "ok");

View File

@ -16,13 +16,10 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.xpack.core.ml.MlParserType;
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.EnumMap;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
/**
@ -34,32 +31,27 @@ public class ChunkingConfig implements ToXContentObject, Writeable {
public static final ParseField TIME_SPAN_FIELD = new ParseField("time_span");
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
public static final ConstructingObjectParser<ChunkingConfig, Void> METADATA_PARSER = new ConstructingObjectParser<>(
"chunking_config", true, a -> new ChunkingConfig((Mode) a[0], (TimeValue) a[1]));
public static final ConstructingObjectParser<ChunkingConfig, Void> CONFIG_PARSER = new ConstructingObjectParser<>(
"chunking_config", false, a -> new ChunkingConfig((Mode) a[0], (TimeValue) a[1]));
public static final Map<MlParserType, ConstructingObjectParser<ChunkingConfig, Void>> PARSERS =
new EnumMap<>(MlParserType.class);
public static final ConstructingObjectParser<ChunkingConfig, Void> LENIENT_PARSER = createParser(true);
public static final ConstructingObjectParser<ChunkingConfig, Void> STRICT_PARSER = createParser(false);
static {
PARSERS.put(MlParserType.METADATA, METADATA_PARSER);
PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER);
for (MlParserType parserType : MlParserType.values()) {
ConstructingObjectParser<ChunkingConfig, Void> parser = PARSERS.get(parserType);
assert parser != null;
parser.declareField(ConstructingObjectParser.constructorArg(), p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return Mode.fromString(p.text());
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, MODE_FIELD, ValueType.STRING);
parser.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return TimeValue.parseTimeValue(p.text(), TIME_SPAN_FIELD.getPreferredName());
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, TIME_SPAN_FIELD, ValueType.STRING);
}
private static ConstructingObjectParser<ChunkingConfig, Void> createParser(boolean ignoreUnknownFields) {
ConstructingObjectParser<ChunkingConfig, Void> parser = new ConstructingObjectParser<>(
"chunking_config", ignoreUnknownFields, a -> new ChunkingConfig((Mode) a[0], (TimeValue) a[1]));
parser.declareField(ConstructingObjectParser.constructorArg(), p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return Mode.fromString(p.text());
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, MODE_FIELD, ValueType.STRING);
parser.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return TimeValue.parseTimeValue(p.text(), TIME_SPAN_FIELD.getPreferredName());
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, TIME_SPAN_FIELD, ValueType.STRING);
return parser;
}
private final Mode mode;

View File

@ -25,7 +25,6 @@ import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.xpack.core.ml.MlParserType;
import org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils;
import org.elasticsearch.xpack.core.ml.job.config.Job;
import org.elasticsearch.xpack.core.ml.job.messages.Messages;
@ -38,7 +37,6 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.EnumMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
@ -87,44 +85,46 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
public static final ParseField HEADERS = new ParseField("headers");
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
public static final ObjectParser<Builder, Void> METADATA_PARSER = new ObjectParser<>("datafeed_config", true, Builder::new);
public static final ObjectParser<Builder, Void> CONFIG_PARSER = new ObjectParser<>("datafeed_config", false, Builder::new);
public static final Map<MlParserType, ObjectParser<Builder, Void>> PARSERS = new EnumMap<>(MlParserType.class);
public static final ObjectParser<Builder, Void> LENIENT_PARSER = createParser(true);
public static final ObjectParser<Builder, Void> STRICT_PARSER = createParser(false);
static {
PARSERS.put(MlParserType.METADATA, METADATA_PARSER);
PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER);
for (MlParserType parserType : MlParserType.values()) {
ObjectParser<Builder, Void> parser = PARSERS.get(parserType);
assert parser != null;
parser.declareString(Builder::setId, ID);
parser.declareString(Builder::setJobId, Job.ID);
parser.declareStringArray(Builder::setIndices, INDEXES);
parser.declareStringArray(Builder::setIndices, INDICES);
parser.declareStringArray(Builder::setTypes, TYPES);
parser.declareString((builder, val) ->
builder.setQueryDelay(TimeValue.parseTimeValue(val, QUERY_DELAY.getPreferredName())), QUERY_DELAY);
parser.declareString((builder, val) ->
builder.setFrequency(TimeValue.parseTimeValue(val, FREQUENCY.getPreferredName())), FREQUENCY);
parser.declareObject(Builder::setQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), QUERY);
parser.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGREGATIONS);
parser.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGS);
parser.declareObject(Builder::setScriptFields, (p, c) -> {
List<SearchSourceBuilder.ScriptField> parsedScriptFields = new ArrayList<>();
while (p.nextToken() != XContentParser.Token.END_OBJECT) {
parsedScriptFields.add(new SearchSourceBuilder.ScriptField(p));
}
parsedScriptFields.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName));
return parsedScriptFields;
}, SCRIPT_FIELDS);
parser.declareInt(Builder::setScrollSize, SCROLL_SIZE);
// TODO this is to read former _source field. Remove in v7.0.0
parser.declareBoolean((builder, value) -> {}, SOURCE);
parser.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSERS.get(parserType), CHUNKING_CONFIG);
private static ObjectParser<Builder, Void> createParser(boolean ignoreUnknownFields) {
ObjectParser<Builder, Void> parser = new ObjectParser<>("datafeed_config", ignoreUnknownFields, Builder::new);
parser.declareString(Builder::setId, ID);
parser.declareString(Builder::setJobId, Job.ID);
parser.declareStringArray(Builder::setIndices, INDEXES);
parser.declareStringArray(Builder::setIndices, INDICES);
parser.declareStringArray(Builder::setTypes, TYPES);
parser.declareString((builder, val) ->
builder.setQueryDelay(TimeValue.parseTimeValue(val, QUERY_DELAY.getPreferredName())), QUERY_DELAY);
parser.declareString((builder, val) ->
builder.setFrequency(TimeValue.parseTimeValue(val, FREQUENCY.getPreferredName())), FREQUENCY);
parser.declareObject(Builder::setQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), QUERY);
parser.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGREGATIONS);
parser.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGS);
parser.declareObject(Builder::setScriptFields, (p, c) -> {
List<SearchSourceBuilder.ScriptField> parsedScriptFields = new ArrayList<>();
while (p.nextToken() != XContentParser.Token.END_OBJECT) {
parsedScriptFields.add(new SearchSourceBuilder.ScriptField(p));
}
parsedScriptFields.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName));
return parsedScriptFields;
}, SCRIPT_FIELDS);
parser.declareInt(Builder::setScrollSize, SCROLL_SIZE);
// TODO this is to read former _source field. Remove in v7.0.0
parser.declareBoolean((builder, value) -> {
}, SOURCE);
parser.declareObject(Builder::setChunkingConfig, ignoreUnknownFields ? ChunkingConfig.LENIENT_PARSER : ChunkingConfig.STRICT_PARSER,
CHUNKING_CONFIG);
if (ignoreUnknownFields) {
// Headers are not parsed by the strict (config) parser, so headers supplied in the _body_ of a REST request will be rejected.
// (For config, headers are explicitly transferred from the auth headers by code in the put/update datafeed actions.)
parser.declareObject(Builder::setHeaders, (p, c) -> p.mapStrings(), HEADERS);
}
// Headers are only parsed by the metadata parser, so headers supplied in the _body_ of a REST request will be rejected.
// (For config headers are explicitly transferred from the auth headers by code in the put/update datafeed actions.)
METADATA_PARSER.declareObject(Builder::setHeaders, (p, c) -> p.mapStrings(), HEADERS);
return parser;
}
private final String id;

View File

@ -68,7 +68,7 @@ public class DatafeedUpdate implements Writeable, ToXContentObject {
return parsedScriptFields;
}, DatafeedConfig.SCRIPT_FIELDS);
PARSER.declareInt(Builder::setScrollSize, DatafeedConfig.SCROLL_SIZE);
PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.CONFIG_PARSER, DatafeedConfig.CHUNKING_CONFIG);
PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.STRICT_PARSER, DatafeedConfig.CHUNKING_CONFIG);
}
private final String id;

View File

@ -16,7 +16,6 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.xpack.core.ml.MlParserType;
import org.elasticsearch.xpack.core.ml.job.messages.Messages;
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils;
@ -24,10 +23,8 @@ import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.EnumMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.SortedSet;
@ -76,46 +73,38 @@ public class AnalysisConfig implements ToXContentObject, Writeable {
public static final long DEFAULT_RESULT_FINALIZATION_WINDOW = 2L;
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
@SuppressWarnings("unchecked")
public static final ConstructingObjectParser<AnalysisConfig.Builder, Void> METADATA_PARSER =
new ConstructingObjectParser<>(ANALYSIS_CONFIG.getPreferredName(), true,
a -> new AnalysisConfig.Builder((List<Detector>) a[0]));
@SuppressWarnings("unchecked")
public static final ConstructingObjectParser<AnalysisConfig.Builder, Void> CONFIG_PARSER =
new ConstructingObjectParser<>(ANALYSIS_CONFIG.getPreferredName(), false,
a -> new AnalysisConfig.Builder((List<Detector>) a[0]));
public static final Map<MlParserType, ConstructingObjectParser<Builder, Void>> PARSERS =
new EnumMap<>(MlParserType.class);
public static final ConstructingObjectParser<AnalysisConfig.Builder, Void> LENIENT_PARSER = createParser(true);
public static final ConstructingObjectParser<AnalysisConfig.Builder, Void> STRICT_PARSER = createParser(false);
static {
PARSERS.put(MlParserType.METADATA, METADATA_PARSER);
PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER);
for (MlParserType parserType : MlParserType.values()) {
ConstructingObjectParser<AnalysisConfig.Builder, Void> parser = PARSERS.get(parserType);
assert parser != null;
parser.declareObjectArray(ConstructingObjectParser.constructorArg(),
(p, c) -> Detector.PARSERS.get(parserType).apply(p, c).build(), DETECTORS);
parser.declareString((builder, val) ->
builder.setBucketSpan(TimeValue.parseTimeValue(val, BUCKET_SPAN.getPreferredName())), BUCKET_SPAN);
parser.declareString(Builder::setCategorizationFieldName, CATEGORIZATION_FIELD_NAME);
parser.declareStringArray(Builder::setCategorizationFilters, CATEGORIZATION_FILTERS);
// This one is nasty - the syntax for analyzers takes either names or objects at many levels, hence it's not
// possible to simply declare whether the field is a string or object and a completely custom parser is required
parser.declareField(Builder::setCategorizationAnalyzerConfig,
(p, c) -> CategorizationAnalyzerConfig.buildFromXContentFragment(p, parserType),
CATEGORIZATION_ANALYZER, ObjectParser.ValueType.OBJECT_OR_STRING);
parser.declareString((builder, val) ->
builder.setLatency(TimeValue.parseTimeValue(val, LATENCY.getPreferredName())), LATENCY);
parser.declareString(Builder::setSummaryCountFieldName, SUMMARY_COUNT_FIELD_NAME);
parser.declareStringArray(Builder::setInfluencers, INFLUENCERS);
parser.declareBoolean(Builder::setOverlappingBuckets, OVERLAPPING_BUCKETS);
parser.declareLong(Builder::setResultFinalizationWindow, RESULT_FINALIZATION_WINDOW);
parser.declareBoolean(Builder::setMultivariateByFields, MULTIVARIATE_BY_FIELDS);
parser.declareStringArray((builder, values) -> builder.setMultipleBucketSpans(
values.stream().map(v -> TimeValue.parseTimeValue(v, MULTIPLE_BUCKET_SPANS.getPreferredName()))
.collect(Collectors.toList())), MULTIPLE_BUCKET_SPANS);
parser.declareBoolean(Builder::setUsePerPartitionNormalization, USER_PER_PARTITION_NORMALIZATION);
}
@SuppressWarnings("unchecked")
private static ConstructingObjectParser<AnalysisConfig.Builder, Void> createParser(boolean ignoreUnknownFields) {
ConstructingObjectParser<AnalysisConfig.Builder, Void> parser = new ConstructingObjectParser<>(ANALYSIS_CONFIG.getPreferredName(),
ignoreUnknownFields, a -> new AnalysisConfig.Builder((List<Detector>) a[0]));
parser.declareObjectArray(ConstructingObjectParser.constructorArg(),
(p, c) -> (ignoreUnknownFields ? Detector.LENIENT_PARSER : Detector.STRICT_PARSER).apply(p, c).build(), DETECTORS);
parser.declareString((builder, val) ->
builder.setBucketSpan(TimeValue.parseTimeValue(val, BUCKET_SPAN.getPreferredName())), BUCKET_SPAN);
parser.declareString(Builder::setCategorizationFieldName, CATEGORIZATION_FIELD_NAME);
parser.declareStringArray(Builder::setCategorizationFilters, CATEGORIZATION_FILTERS);
// This one is nasty - the syntax for analyzers takes either names or objects at many levels, hence it's not
// possible to simply declare whether the field is a string or object and a completely custom parser is required
parser.declareField(Builder::setCategorizationAnalyzerConfig,
(p, c) -> CategorizationAnalyzerConfig.buildFromXContentFragment(p, ignoreUnknownFields),
CATEGORIZATION_ANALYZER, ObjectParser.ValueType.OBJECT_OR_STRING);
parser.declareString((builder, val) ->
builder.setLatency(TimeValue.parseTimeValue(val, LATENCY.getPreferredName())), LATENCY);
parser.declareString(Builder::setSummaryCountFieldName, SUMMARY_COUNT_FIELD_NAME);
parser.declareStringArray(Builder::setInfluencers, INFLUENCERS);
parser.declareBoolean(Builder::setOverlappingBuckets, OVERLAPPING_BUCKETS);
parser.declareLong(Builder::setResultFinalizationWindow, RESULT_FINALIZATION_WINDOW);
parser.declareBoolean(Builder::setMultivariateByFields, MULTIVARIATE_BY_FIELDS);
parser.declareStringArray((builder, values) -> builder.setMultipleBucketSpans(
values.stream().map(v -> TimeValue.parseTimeValue(v, MULTIPLE_BUCKET_SPANS.getPreferredName()))
.collect(Collectors.toList())), MULTIPLE_BUCKET_SPANS);
parser.declareBoolean(Builder::setUsePerPartitionNormalization, USER_PER_PARTITION_NORMALIZATION);
return parser;
}
/**

View File

@ -17,13 +17,10 @@ import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.xpack.core.ml.MlParserType;
import org.elasticsearch.xpack.core.ml.job.messages.Messages;
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.EnumMap;
import java.util.Map;
import java.util.Objects;
/**
@ -50,31 +47,26 @@ public class AnalysisLimits implements ToXContentObject, Writeable {
public static final ParseField CATEGORIZATION_EXAMPLES_LIMIT = new ParseField("categorization_examples_limit");
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
public static final ConstructingObjectParser<AnalysisLimits, Void> METADATA_PARSER = new ConstructingObjectParser<>(
"analysis_limits", true, a -> new AnalysisLimits(
a[0] == null ? PRE_6_1_DEFAULT_MODEL_MEMORY_LIMIT_MB : (Long) a[0],
a[1] == null ? DEFAULT_CATEGORIZATION_EXAMPLES_LIMIT : (Long) a[1]));
public static final ConstructingObjectParser<AnalysisLimits, Void> CONFIG_PARSER = new ConstructingObjectParser<>(
"analysis_limits", false, a -> new AnalysisLimits((Long) a[0], (Long) a[1]));
public static final Map<MlParserType, ConstructingObjectParser<AnalysisLimits, Void>> PARSERS =
new EnumMap<>(MlParserType.class);
public static final ConstructingObjectParser<AnalysisLimits, Void> LENIENT_PARSER = createParser(true);
public static final ConstructingObjectParser<AnalysisLimits, Void> STRICT_PARSER = createParser(false);
static {
PARSERS.put(MlParserType.METADATA, METADATA_PARSER);
PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER);
for (MlParserType parserType : MlParserType.values()) {
ConstructingObjectParser<AnalysisLimits, Void> parser = PARSERS.get(parserType);
assert parser != null;
parser.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return ByteSizeValue.parseBytesSizeValue(p.text(), MODEL_MEMORY_LIMIT.getPreferredName()).getMb();
} else if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) {
return p.longValue();
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, MODEL_MEMORY_LIMIT, ObjectParser.ValueType.VALUE);
parser.declareLong(ConstructingObjectParser.optionalConstructorArg(), CATEGORIZATION_EXAMPLES_LIMIT);
}
private static ConstructingObjectParser<AnalysisLimits, Void> createParser(boolean ignoreUnknownFields) {
ConstructingObjectParser<AnalysisLimits, Void> parser = new ConstructingObjectParser<>(
"analysis_limits", ignoreUnknownFields, a -> ignoreUnknownFields ? new AnalysisLimits(
a[0] == null ? PRE_6_1_DEFAULT_MODEL_MEMORY_LIMIT_MB : (Long) a[0],
a[1] == null ? DEFAULT_CATEGORIZATION_EXAMPLES_LIMIT : (Long) a[1]) : new AnalysisLimits((Long) a[0], (Long) a[1]));
parser.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return ByteSizeValue.parseBytesSizeValue(p.text(), MODEL_MEMORY_LIMIT.getPreferredName()).getMb();
} else if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) {
return p.longValue();
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, MODEL_MEMORY_LIMIT, ObjectParser.ValueType.VALUE);
parser.declareLong(ConstructingObjectParser.optionalConstructorArg(), CATEGORIZATION_EXAMPLES_LIMIT);
return parser;
}
/**

View File

@ -17,7 +17,6 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.rest.action.admin.indices.RestAnalyzeAction;
import org.elasticsearch.xpack.core.ml.MlParserType;
import java.io.IOException;
import java.util.ArrayList;
@ -61,7 +60,8 @@ public class CategorizationAnalyzerConfig implements ToXContentFragment, Writeab
/**
* This method is only used in the unit tests - in production code this config is always parsed as a fragment.
*/
public static CategorizationAnalyzerConfig buildFromXContentObject(XContentParser parser, MlParserType parserType) throws IOException {
public static CategorizationAnalyzerConfig buildFromXContentObject(XContentParser parser, boolean ignoreUnknownFields)
throws IOException {
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
throw new IllegalArgumentException("Expected start object but got [" + parser.currentToken() + "]");
@ -71,7 +71,7 @@ public class CategorizationAnalyzerConfig implements ToXContentFragment, Writeab
throw new IllegalArgumentException("Expected [" + CATEGORIZATION_ANALYZER + "] field but got [" + parser.currentToken() + "]");
}
parser.nextToken();
CategorizationAnalyzerConfig categorizationAnalyzerConfig = buildFromXContentFragment(parser, parserType);
CategorizationAnalyzerConfig categorizationAnalyzerConfig = buildFromXContentFragment(parser, ignoreUnknownFields);
parser.nextToken();
return categorizationAnalyzerConfig;
}
@ -83,7 +83,7 @@ public class CategorizationAnalyzerConfig implements ToXContentFragment, Writeab
*
* The parser is strict when parsing config and lenient when parsing cluster state.
*/
static CategorizationAnalyzerConfig buildFromXContentFragment(XContentParser parser, MlParserType parserType) throws IOException {
static CategorizationAnalyzerConfig buildFromXContentFragment(XContentParser parser, boolean ignoreUnknownFields) throws IOException {
CategorizationAnalyzerConfig.Builder builder = new CategorizationAnalyzerConfig.Builder();
@ -131,7 +131,7 @@ public class CategorizationAnalyzerConfig implements ToXContentFragment, Writeab
}
}
// Be lenient when parsing cluster state - assume unknown fields are from future versions
} else if (parserType == MlParserType.CONFIG) {
} else if (ignoreUnknownFields == false) {
throw new IllegalArgumentException("Parameter [" + currentFieldName + "] in [" + CATEGORIZATION_ANALYZER +
"] is unknown or of the wrong type [" + token + "]");
}

View File

@ -14,16 +14,13 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.xpack.core.ml.MlParserType;
import org.elasticsearch.xpack.core.ml.job.messages.Messages;
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.core.ml.utils.time.DateTimeFormatterTimestampConverter;
import java.io.IOException;
import java.time.ZoneOffset;
import java.util.EnumMap;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
/**
@ -126,24 +123,20 @@ public class DataDescription implements ToXContentObject, Writeable {
private final Character quoteCharacter;
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
public static final ObjectParser<Builder, Void> METADATA_PARSER =
new ObjectParser<>(DATA_DESCRIPTION_FIELD.getPreferredName(), true, Builder::new);
public static final ObjectParser<Builder, Void> CONFIG_PARSER =
new ObjectParser<>(DATA_DESCRIPTION_FIELD.getPreferredName(), false, Builder::new);
public static final Map<MlParserType, ObjectParser<Builder, Void>> PARSERS = new EnumMap<>(MlParserType.class);
public static final ObjectParser<Builder, Void> LENIENT_PARSER = createParser(true);
public static final ObjectParser<Builder, Void> STRICT_PARSER = createParser(false);
static {
PARSERS.put(MlParserType.METADATA, METADATA_PARSER);
PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER);
for (MlParserType parserType : MlParserType.values()) {
ObjectParser<Builder, Void> parser = PARSERS.get(parserType);
assert parser != null;
parser.declareString(Builder::setFormat, FORMAT_FIELD);
parser.declareString(Builder::setTimeField, TIME_FIELD_NAME_FIELD);
parser.declareString(Builder::setTimeFormat, TIME_FORMAT_FIELD);
parser.declareField(Builder::setFieldDelimiter, DataDescription::extractChar, FIELD_DELIMITER_FIELD, ValueType.STRING);
parser.declareField(Builder::setQuoteCharacter, DataDescription::extractChar, QUOTE_CHARACTER_FIELD, ValueType.STRING);
}
private static ObjectParser<Builder, Void> createParser(boolean ignoreUnknownFields) {
ObjectParser<Builder, Void> parser =
new ObjectParser<>(DATA_DESCRIPTION_FIELD.getPreferredName(), ignoreUnknownFields, Builder::new);
parser.declareString(Builder::setFormat, FORMAT_FIELD);
parser.declareString(Builder::setTimeField, TIME_FIELD_NAME_FIELD);
parser.declareString(Builder::setTimeFormat, TIME_FORMAT_FIELD);
parser.declareField(Builder::setFieldDelimiter, DataDescription::extractChar, FIELD_DELIMITER_FIELD, ValueType.STRING);
parser.declareField(Builder::setQuoteCharacter, DataDescription::extractChar, QUOTE_CHARACTER_FIELD, ValueType.STRING);
return parser;
}
public DataDescription(DataFormat dataFormat, String timeFieldName, String timeFormat, Character fieldDelimiter,

View File

@ -13,17 +13,14 @@ import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.xpack.core.ml.MlParserType;
import org.elasticsearch.xpack.core.ml.job.messages.Messages;
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumMap;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
@ -37,23 +34,18 @@ public class DetectionRule implements ToXContentObject, Writeable {
public static final ParseField CONDITIONS_FIELD = new ParseField("conditions");
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
public static final ObjectParser<Builder, Void> METADATA_PARSER =
new ObjectParser<>(DETECTION_RULE_FIELD.getPreferredName(), true, Builder::new);
public static final ObjectParser<Builder, Void> CONFIG_PARSER =
new ObjectParser<>(DETECTION_RULE_FIELD.getPreferredName(), false, Builder::new);
public static final Map<MlParserType, ObjectParser<Builder, Void>> PARSERS = new EnumMap<>(MlParserType.class);
public static final ObjectParser<Builder, Void> LENIENT_PARSER = createParser(true);
public static final ObjectParser<Builder, Void> STRICT_PARSER = createParser(false);
static {
PARSERS.put(MlParserType.METADATA, METADATA_PARSER);
PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER);
for (MlParserType parserType : MlParserType.values()) {
ObjectParser<Builder, Void> parser = PARSERS.get(parserType);
assert parser != null;
parser.declareStringArray(Builder::setActions, ACTIONS_FIELD);
parser.declareObject(Builder::setScope, RuleScope.parser(parserType), SCOPE_FIELD);
parser.declareObjectArray(Builder::setConditions, (p, c) ->
RuleCondition.PARSERS.get(parserType).apply(p, c), CONDITIONS_FIELD);
}
private static ObjectParser<Builder, Void> createParser(boolean ignoreUnknownFields) {
ObjectParser<Builder, Void> parser = new ObjectParser<>(DETECTION_RULE_FIELD.getPreferredName(), ignoreUnknownFields, Builder::new);
parser.declareStringArray(Builder::setActions, ACTIONS_FIELD);
parser.declareObject(Builder::setScope, RuleScope.parser(ignoreUnknownFields), SCOPE_FIELD);
parser.declareObjectArray(Builder::setConditions, ignoreUnknownFields ? RuleCondition.LENIENT_PARSER : RuleCondition.STRICT_PARSER,
CONDITIONS_FIELD);
return parser;
}
private final EnumSet<RuleAction> actions;

View File

@ -16,7 +16,6 @@ import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.xpack.core.ml.MlParserType;
import org.elasticsearch.xpack.core.ml.job.messages.Messages;
import org.elasticsearch.xpack.core.ml.job.process.autodetect.writer.RecordWriter;
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
@ -26,12 +25,10 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumMap;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.TreeSet;
@ -89,33 +86,31 @@ public class Detector implements ToXContentObject, Writeable {
public static final ParseField DETECTOR_INDEX = new ParseField("detector_index");
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
public static final ObjectParser<Builder, Void> METADATA_PARSER = new ObjectParser<>("detector", true, Builder::new);
public static final ObjectParser<Builder, Void> CONFIG_PARSER = new ObjectParser<>("detector", false, Builder::new);
public static final Map<MlParserType, ObjectParser<Builder, Void>> PARSERS = new EnumMap<>(MlParserType.class);
public static final ObjectParser<Builder, Void> LENIENT_PARSER = createParser(true);
public static final ObjectParser<Builder, Void> STRICT_PARSER = createParser(false);
static {
PARSERS.put(MlParserType.METADATA, METADATA_PARSER);
PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER);
for (MlParserType parserType : MlParserType.values()) {
ObjectParser<Builder, Void> parser = PARSERS.get(parserType);
assert parser != null;
parser.declareString(Builder::setDetectorDescription, DETECTOR_DESCRIPTION_FIELD);
parser.declareString(Builder::setFunction, FUNCTION_FIELD);
parser.declareString(Builder::setFieldName, FIELD_NAME_FIELD);
parser.declareString(Builder::setByFieldName, BY_FIELD_NAME_FIELD);
parser.declareString(Builder::setOverFieldName, OVER_FIELD_NAME_FIELD);
parser.declareString(Builder::setPartitionFieldName, PARTITION_FIELD_NAME_FIELD);
parser.declareBoolean(Builder::setUseNull, USE_NULL_FIELD);
parser.declareField(Builder::setExcludeFrequent, p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return ExcludeFrequent.forString(p.text());
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, EXCLUDE_FREQUENT_FIELD, ObjectParser.ValueType.STRING);
parser.declareObjectArray(Builder::setRules, (p, c) ->
DetectionRule.PARSERS.get(parserType).apply(p, c).build(), CUSTOM_RULES_FIELD);
parser.declareInt(Builder::setDetectorIndex, DETECTOR_INDEX);
}
private static ObjectParser<Builder, Void> createParser(boolean ignoreUnknownFields) {
ObjectParser<Builder, Void> parser = new ObjectParser<>("detector", ignoreUnknownFields, Builder::new);
parser.declareString(Builder::setDetectorDescription, DETECTOR_DESCRIPTION_FIELD);
parser.declareString(Builder::setFunction, FUNCTION_FIELD);
parser.declareString(Builder::setFieldName, FIELD_NAME_FIELD);
parser.declareString(Builder::setByFieldName, BY_FIELD_NAME_FIELD);
parser.declareString(Builder::setOverFieldName, OVER_FIELD_NAME_FIELD);
parser.declareString(Builder::setPartitionFieldName, PARTITION_FIELD_NAME_FIELD);
parser.declareBoolean(Builder::setUseNull, USE_NULL_FIELD);
parser.declareField(Builder::setExcludeFrequent, p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return ExcludeFrequent.forString(p.text());
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, EXCLUDE_FREQUENT_FIELD, ObjectParser.ValueType.STRING);
parser.declareObjectArray(Builder::setRules,
(p, c) -> (ignoreUnknownFields ? DetectionRule.LENIENT_PARSER : DetectionRule.STRICT_PARSER).apply(p, c).build(),
CUSTOM_RULES_FIELD);
parser.declareInt(Builder::setDetectorIndex, DETECTOR_INDEX);
return parser;
}
public static final String BY = "by";

View File

@ -14,12 +14,9 @@ import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.xpack.core.ml.MlParserType;
import java.io.IOException;
import java.util.EnumMap;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
public class FilterRef implements ToXContentObject, Writeable {
@ -42,28 +39,22 @@ public class FilterRef implements ToXContentObject, Writeable {
}
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
public static final ConstructingObjectParser<FilterRef, Void> METADATA_PARSER =
new ConstructingObjectParser<>(FILTER_REF_FIELD.getPreferredName(), true,
a -> new FilterRef((String) a[0], (FilterType) a[1]));
public static final ConstructingObjectParser<FilterRef, Void> CONFIG_PARSER =
new ConstructingObjectParser<>(FILTER_REF_FIELD.getPreferredName(), false,
a -> new FilterRef((String) a[0], (FilterType) a[1]));
public static final Map<MlParserType, ConstructingObjectParser<FilterRef, Void>> PARSERS = new EnumMap<>(MlParserType.class);
public static final ConstructingObjectParser<FilterRef, Void> LENIENT_PARSER = createParser(true);
public static final ConstructingObjectParser<FilterRef, Void> STRICT_PARSER = createParser(false);
static {
PARSERS.put(MlParserType.METADATA, METADATA_PARSER);
PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER);
for (MlParserType parserType : MlParserType.values()) {
ConstructingObjectParser<FilterRef, Void> parser = PARSERS.get(parserType);
assert parser != null;
parser.declareString(ConstructingObjectParser.constructorArg(), FILTER_ID);
parser.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return FilterType.fromString(p.text());
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, FILTER_TYPE, ObjectParser.ValueType.STRING);
}
private static ConstructingObjectParser<FilterRef, Void> createParser(boolean ignoreUnknownFields) {
ConstructingObjectParser<FilterRef, Void> parser = new ConstructingObjectParser<>(FILTER_REF_FIELD.getPreferredName(),
ignoreUnknownFields, a -> new FilterRef((String) a[0], (FilterType) a[1]));
parser.declareString(ConstructingObjectParser.constructorArg(), FILTER_ID);
parser.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return FilterType.fromString(p.text());
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, FILTER_TYPE, ObjectParser.ValueType.STRING);
return parser;
}
private final String filterId;

View File

@ -21,7 +21,6 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.xpack.core.ml.MlParserType;
import org.elasticsearch.xpack.core.ml.job.messages.Messages;
import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields;
import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts;
@ -34,7 +33,6 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.EnumMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
@ -85,69 +83,70 @@ public class Job extends AbstractDiffable<Job> implements Writeable, ToXContentO
public static final ParseField RESULTS_FIELD = new ParseField("jobs");
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
public static final ObjectParser<Builder, Void> METADATA_PARSER = new ObjectParser<>("job_details", true, Builder::new);
public static final ObjectParser<Builder, Void> CONFIG_PARSER = new ObjectParser<>("job_details", false, Builder::new);
public static final Map<MlParserType, ObjectParser<Builder, Void>> PARSERS = new EnumMap<>(MlParserType.class);
public static final ObjectParser<Builder, Void> LENIENT_PARSER = createParser(true);
public static final ObjectParser<Builder, Void> STRICT_PARSER = createParser(false);
public static final TimeValue MIN_BACKGROUND_PERSIST_INTERVAL = TimeValue.timeValueHours(1);
public static final ByteSizeValue PROCESS_MEMORY_OVERHEAD = new ByteSizeValue(100, ByteSizeUnit.MB);
public static final long DEFAULT_MODEL_SNAPSHOT_RETENTION_DAYS = 1;
static {
PARSERS.put(MlParserType.METADATA, METADATA_PARSER);
PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER);
for (MlParserType parserType : MlParserType.values()) {
ObjectParser<Builder, Void> parser = PARSERS.get(parserType);
assert parser != null;
parser.declareString(Builder::setId, ID);
parser.declareString(Builder::setJobType, JOB_TYPE);
parser.declareString(Builder::setJobVersion, JOB_VERSION);
parser.declareStringArray(Builder::setGroups, GROUPS);
parser.declareStringOrNull(Builder::setDescription, DESCRIPTION);
parser.declareField(Builder::setCreateTime, p -> {
if (p.currentToken() == Token.VALUE_NUMBER) {
return new Date(p.longValue());
} else if (p.currentToken() == Token.VALUE_STRING) {
return new Date(TimeUtils.dateStringToEpoch(p.text()));
}
throw new IllegalArgumentException("unexpected token [" + p.currentToken() +
"] for [" + CREATE_TIME.getPreferredName() + "]");
}, CREATE_TIME, ValueType.VALUE);
parser.declareField(Builder::setFinishedTime, p -> {
if (p.currentToken() == Token.VALUE_NUMBER) {
return new Date(p.longValue());
} else if (p.currentToken() == Token.VALUE_STRING) {
return new Date(TimeUtils.dateStringToEpoch(p.text()));
}
throw new IllegalArgumentException(
"unexpected token [" + p.currentToken() + "] for [" + FINISHED_TIME.getPreferredName() + "]");
}, FINISHED_TIME, ValueType.VALUE);
parser.declareField(Builder::setLastDataTime, p -> {
if (p.currentToken() == Token.VALUE_NUMBER) {
return new Date(p.longValue());
} else if (p.currentToken() == Token.VALUE_STRING) {
return new Date(TimeUtils.dateStringToEpoch(p.text()));
}
throw new IllegalArgumentException(
"unexpected token [" + p.currentToken() + "] for [" + LAST_DATA_TIME.getPreferredName() + "]");
}, LAST_DATA_TIME, ValueType.VALUE);
parser.declareLong(Builder::setEstablishedModelMemory, ESTABLISHED_MODEL_MEMORY);
parser.declareObject(Builder::setAnalysisConfig, AnalysisConfig.PARSERS.get(parserType), ANALYSIS_CONFIG);
parser.declareObject(Builder::setAnalysisLimits, AnalysisLimits.PARSERS.get(parserType), ANALYSIS_LIMITS);
parser.declareObject(Builder::setDataDescription, DataDescription.PARSERS.get(parserType), DATA_DESCRIPTION);
parser.declareObject(Builder::setModelPlotConfig, ModelPlotConfig.PARSERS.get(parserType), MODEL_PLOT_CONFIG);
parser.declareLong(Builder::setRenormalizationWindowDays, RENORMALIZATION_WINDOW_DAYS);
parser.declareString((builder, val) -> builder.setBackgroundPersistInterval(
TimeValue.parseTimeValue(val, BACKGROUND_PERSIST_INTERVAL.getPreferredName())), BACKGROUND_PERSIST_INTERVAL);
parser.declareLong(Builder::setResultsRetentionDays, RESULTS_RETENTION_DAYS);
parser.declareLong(Builder::setModelSnapshotRetentionDays, MODEL_SNAPSHOT_RETENTION_DAYS);
parser.declareField(Builder::setCustomSettings, (p, c) -> p.map(), CUSTOM_SETTINGS, ValueType.OBJECT);
parser.declareStringOrNull(Builder::setModelSnapshotId, MODEL_SNAPSHOT_ID);
parser.declareStringOrNull(Builder::setModelSnapshotMinVersion, MODEL_SNAPSHOT_MIN_VERSION);
parser.declareString(Builder::setResultsIndexName, RESULTS_INDEX_NAME);
parser.declareBoolean(Builder::setDeleted, DELETED);
}
private static ObjectParser<Builder, Void> createParser(boolean ignoreUnknownFields) {
ObjectParser<Builder, Void> parser = new ObjectParser<>("job_details", ignoreUnknownFields, Builder::new);
parser.declareString(Builder::setId, ID);
parser.declareString(Builder::setJobType, JOB_TYPE);
parser.declareString(Builder::setJobVersion, JOB_VERSION);
parser.declareStringArray(Builder::setGroups, GROUPS);
parser.declareStringOrNull(Builder::setDescription, DESCRIPTION);
parser.declareField(Builder::setCreateTime, p -> {
if (p.currentToken() == Token.VALUE_NUMBER) {
return new Date(p.longValue());
} else if (p.currentToken() == Token.VALUE_STRING) {
return new Date(TimeUtils.dateStringToEpoch(p.text()));
}
throw new IllegalArgumentException("unexpected token [" + p.currentToken() +
"] for [" + CREATE_TIME.getPreferredName() + "]");
}, CREATE_TIME, ValueType.VALUE);
parser.declareField(Builder::setFinishedTime, p -> {
if (p.currentToken() == Token.VALUE_NUMBER) {
return new Date(p.longValue());
} else if (p.currentToken() == Token.VALUE_STRING) {
return new Date(TimeUtils.dateStringToEpoch(p.text()));
}
throw new IllegalArgumentException(
"unexpected token [" + p.currentToken() + "] for [" + FINISHED_TIME.getPreferredName() + "]");
}, FINISHED_TIME, ValueType.VALUE);
parser.declareField(Builder::setLastDataTime, p -> {
if (p.currentToken() == Token.VALUE_NUMBER) {
return new Date(p.longValue());
} else if (p.currentToken() == Token.VALUE_STRING) {
return new Date(TimeUtils.dateStringToEpoch(p.text()));
}
throw new IllegalArgumentException(
"unexpected token [" + p.currentToken() + "] for [" + LAST_DATA_TIME.getPreferredName() + "]");
}, LAST_DATA_TIME, ValueType.VALUE);
parser.declareLong(Builder::setEstablishedModelMemory, ESTABLISHED_MODEL_MEMORY);
parser.declareObject(Builder::setAnalysisConfig, ignoreUnknownFields ? AnalysisConfig.LENIENT_PARSER : AnalysisConfig.STRICT_PARSER,
ANALYSIS_CONFIG);
parser.declareObject(Builder::setAnalysisLimits, ignoreUnknownFields ? AnalysisLimits.LENIENT_PARSER : AnalysisLimits.STRICT_PARSER,
ANALYSIS_LIMITS);
parser.declareObject(Builder::setDataDescription,
ignoreUnknownFields ? DataDescription.LENIENT_PARSER : DataDescription.STRICT_PARSER, DATA_DESCRIPTION);
parser.declareObject(Builder::setModelPlotConfig,
ignoreUnknownFields ? ModelPlotConfig.LENIENT_PARSER : ModelPlotConfig.STRICT_PARSER, MODEL_PLOT_CONFIG);
parser.declareLong(Builder::setRenormalizationWindowDays, RENORMALIZATION_WINDOW_DAYS);
parser.declareString((builder, val) -> builder.setBackgroundPersistInterval(
TimeValue.parseTimeValue(val, BACKGROUND_PERSIST_INTERVAL.getPreferredName())), BACKGROUND_PERSIST_INTERVAL);
parser.declareLong(Builder::setResultsRetentionDays, RESULTS_RETENTION_DAYS);
parser.declareLong(Builder::setModelSnapshotRetentionDays, MODEL_SNAPSHOT_RETENTION_DAYS);
parser.declareField(Builder::setCustomSettings, (p, c) -> p.map(), CUSTOM_SETTINGS, ValueType.OBJECT);
parser.declareStringOrNull(Builder::setModelSnapshotId, MODEL_SNAPSHOT_ID);
parser.declareStringOrNull(Builder::setModelSnapshotMinVersion, MODEL_SNAPSHOT_MIN_VERSION);
parser.declareString(Builder::setResultsIndexName, RESULTS_INDEX_NAME);
parser.declareBoolean(Builder::setDeleted, DELETED);
return parser;
}
private final String jobId;

View File

@ -44,8 +44,8 @@ public class JobUpdate implements Writeable, ToXContentObject {
parser.declareStringArray(Builder::setGroups, Job.GROUPS);
parser.declareStringOrNull(Builder::setDescription, Job.DESCRIPTION);
parser.declareObjectArray(Builder::setDetectorUpdates, DetectorUpdate.PARSER, DETECTORS);
parser.declareObject(Builder::setModelPlotConfig, ModelPlotConfig.CONFIG_PARSER, Job.MODEL_PLOT_CONFIG);
parser.declareObject(Builder::setAnalysisLimits, AnalysisLimits.CONFIG_PARSER, Job.ANALYSIS_LIMITS);
parser.declareObject(Builder::setModelPlotConfig, ModelPlotConfig.STRICT_PARSER, Job.MODEL_PLOT_CONFIG);
parser.declareObject(Builder::setAnalysisLimits, AnalysisLimits.STRICT_PARSER, Job.ANALYSIS_LIMITS);
parser.declareString((builder, val) -> builder.setBackgroundPersistInterval(
TimeValue.parseTimeValue(val, Job.BACKGROUND_PERSIST_INTERVAL.getPreferredName())), Job.BACKGROUND_PERSIST_INTERVAL);
parser.declareLong(Builder::setRenormalizationWindowDays, Job.RENORMALIZATION_WINDOW_DAYS);
@ -533,7 +533,7 @@ public class JobUpdate implements Writeable, ToXContentObject {
PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), Detector.DETECTOR_INDEX);
PARSER.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), Job.DESCRIPTION);
PARSER.declareObjectArray(ConstructingObjectParser.optionalConstructorArg(), (parser, parseFieldMatcher) ->
DetectionRule.CONFIG_PARSER.apply(parser, parseFieldMatcher).build(), Detector.CUSTOM_RULES_FIELD);
DetectionRule.STRICT_PARSER.apply(parser, parseFieldMatcher).build(), Detector.CUSTOM_RULES_FIELD);
}
private int detectorIndex;

View File

@ -12,11 +12,8 @@ import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.xpack.core.ml.MlParserType;
import java.io.IOException;
import java.util.EnumMap;
import java.util.Map;
import java.util.Objects;
public class ModelPlotConfig implements ToXContentObject, Writeable {
@ -26,24 +23,17 @@ public class ModelPlotConfig implements ToXContentObject, Writeable {
public static final ParseField TERMS_FIELD = new ParseField("terms");
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
public static final ConstructingObjectParser<ModelPlotConfig, Void> METADATA_PARSER =
new ConstructingObjectParser<>(TYPE_FIELD.getPreferredName(), true,
a -> new ModelPlotConfig((boolean) a[0], (String) a[1]));
public static final ConstructingObjectParser<ModelPlotConfig, Void> CONFIG_PARSER =
new ConstructingObjectParser<>(TYPE_FIELD.getPreferredName(), false,
a -> new ModelPlotConfig((boolean) a[0], (String) a[1]));
public static final Map<MlParserType, ConstructingObjectParser<ModelPlotConfig, Void>> PARSERS =
new EnumMap<>(MlParserType.class);
public static final ConstructingObjectParser<ModelPlotConfig, Void> LENIENT_PARSER = createParser(true);
public static final ConstructingObjectParser<ModelPlotConfig, Void> STRICT_PARSER = createParser(false);
static {
PARSERS.put(MlParserType.METADATA, METADATA_PARSER);
PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER);
for (MlParserType parserType : MlParserType.values()) {
ConstructingObjectParser<ModelPlotConfig, Void> parser = PARSERS.get(parserType);
assert parser != null;
parser.declareBoolean(ConstructingObjectParser.constructorArg(), ENABLED_FIELD);
parser.declareString(ConstructingObjectParser.optionalConstructorArg(), TERMS_FIELD);
}
private static ConstructingObjectParser<ModelPlotConfig, Void> createParser(boolean ignoreUnknownFields) {
ConstructingObjectParser<ModelPlotConfig, Void> parser = new ConstructingObjectParser<>(TYPE_FIELD.getPreferredName(),
ignoreUnknownFields, a -> new ModelPlotConfig((boolean) a[0], (String) a[1]));
parser.declareBoolean(ConstructingObjectParser.constructorArg(), ENABLED_FIELD);
parser.declareString(ConstructingObjectParser.optionalConstructorArg(), TERMS_FIELD);
return parser;
}
private final boolean enabled;

View File

@ -14,12 +14,9 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.xpack.core.ml.MlParserType;
import java.io.IOException;
import java.util.EnumMap;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
public class RuleCondition implements ToXContentObject, Writeable {
@ -30,35 +27,28 @@ public class RuleCondition implements ToXContentObject, Writeable {
public static final ParseField VALUE_FIELD = new ParseField("value");
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
public static final ConstructingObjectParser<RuleCondition, Void> METADATA_PARSER =
new ConstructingObjectParser<>(RULE_CONDITION_FIELD.getPreferredName(), true,
a -> new RuleCondition((AppliesTo) a[0], (Operator) a[1], (double) a[2]));
public static final ConstructingObjectParser<RuleCondition, Void> CONFIG_PARSER =
new ConstructingObjectParser<>(RULE_CONDITION_FIELD.getPreferredName(), false,
a -> new RuleCondition((AppliesTo) a[0], (Operator) a[1], (double) a[2]));
public static final Map<MlParserType, ConstructingObjectParser<RuleCondition, Void>> PARSERS =
new EnumMap<>(MlParserType.class);
public static final ConstructingObjectParser<RuleCondition, Void> LENIENT_PARSER = createParser(true);
public static final ConstructingObjectParser<RuleCondition, Void> STRICT_PARSER = createParser(false);
static {
PARSERS.put(MlParserType.METADATA, METADATA_PARSER);
PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER);
for (MlParserType parserType : MlParserType.values()) {
ConstructingObjectParser<RuleCondition, Void> parser = PARSERS.get(parserType);
assert parser != null;
parser.declareField(ConstructingObjectParser.constructorArg(), p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return AppliesTo.fromString(p.text());
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, APPLIES_TO_FIELD, ValueType.STRING);
parser.declareField(ConstructingObjectParser.constructorArg(), p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return Operator.fromString(p.text());
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, Operator.OPERATOR_FIELD, ValueType.STRING);
parser.declareDouble(ConstructingObjectParser.constructorArg(), VALUE_FIELD);
}
private static ConstructingObjectParser<RuleCondition, Void> createParser(boolean ignoreUnknownFields) {
ConstructingObjectParser<RuleCondition, Void> parser = new ConstructingObjectParser<>(RULE_CONDITION_FIELD.getPreferredName(),
ignoreUnknownFields, a -> new RuleCondition((AppliesTo) a[0], (Operator) a[1], (double) a[2]));
parser.declareField(ConstructingObjectParser.constructorArg(), p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return AppliesTo.fromString(p.text());
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, APPLIES_TO_FIELD, ValueType.STRING);
parser.declareField(ConstructingObjectParser.constructorArg(), p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return Operator.fromString(p.text());
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, Operator.OPERATOR_FIELD, ValueType.STRING);
parser.declareDouble(ConstructingObjectParser.constructorArg(), VALUE_FIELD);
return parser;
}
private final AppliesTo appliesTo;

View File

@ -17,7 +17,6 @@ import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.xpack.core.ml.MlParserType;
import org.elasticsearch.xpack.core.ml.job.messages.Messages;
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
@ -32,13 +31,14 @@ import java.util.stream.Collectors;
public class RuleScope implements ToXContentObject, Writeable {
public static ContextParser<Void, RuleScope> parser(MlParserType parserType) {
public static ContextParser<Void, RuleScope> parser(boolean ignoreUnknownFields) {
return (p, c) -> {
Map<String, Object> unparsedScope = p.map();
if (unparsedScope.isEmpty()) {
return new RuleScope();
}
ConstructingObjectParser<FilterRef, Void> filterRefParser = FilterRef.PARSERS.get(parserType);
ConstructingObjectParser<FilterRef, Void> filterRefParser =
ignoreUnknownFields ? FilterRef.LENIENT_PARSER : FilterRef.STRICT_PARSER;
Map<String, FilterRef> scope = new HashMap<>();
for (Map.Entry<String, Object> entry : unparsedScope.entrySet()) {
try (XContentBuilder builder = XContentFactory.jsonBuilder()) {

View File

@ -32,7 +32,7 @@ public class ChunkingConfigTests extends AbstractSerializingTestCase<ChunkingCon
@Override
protected ChunkingConfig doParseInstance(XContentParser parser) {
return ChunkingConfig.CONFIG_PARSER.apply(parser, null);
return ChunkingConfig.STRICT_PARSER.apply(parser, null);
}
public void testConstructorGivenAutoAndTimeSpan() {

View File

@ -140,7 +140,7 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
@Override
protected DatafeedConfig doParseInstance(XContentParser parser) {
return DatafeedConfig.CONFIG_PARSER.apply(parser, null).build();
return DatafeedConfig.STRICT_PARSER.apply(parser, null).build();
}
private static final String FUTURE_DATAFEED = "{\n" +
@ -156,7 +156,7 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_DATAFEED);
XContentParseException e = expectThrows(XContentParseException.class,
() -> DatafeedConfig.CONFIG_PARSER.apply(parser, null).build());
() -> DatafeedConfig.STRICT_PARSER.apply(parser, null).build());
assertEquals("[6:5] [datafeed_config] unknown field [tomorrows_technology_today], parser not found", e.getMessage());
}
@ -164,7 +164,7 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_DATAFEED);
// Unlike the config version of this test, the metadata parser should tolerate the unknown future field
assertNotNull(DatafeedConfig.METADATA_PARSER.apply(parser, null).build());
assertNotNull(DatafeedConfig.LENIENT_PARSER.apply(parser, null).build());
}
public void testCopyConstructor() {

View File

@ -120,7 +120,7 @@ public class AnalysisConfigTests extends AbstractSerializingTestCase<AnalysisCon
@Override
protected AnalysisConfig doParseInstance(XContentParser parser) {
return AnalysisConfig.CONFIG_PARSER.apply(parser, null).build();
return AnalysisConfig.STRICT_PARSER.apply(parser, null).build();
}
public void testFieldConfiguration_singleDetector_notPreSummarised() {

View File

@ -41,14 +41,14 @@ public class AnalysisLimitsTests extends AbstractSerializingTestCase<AnalysisLim
@Override
protected AnalysisLimits doParseInstance(XContentParser parser) {
return AnalysisLimits.CONFIG_PARSER.apply(parser, null);
return AnalysisLimits.STRICT_PARSER.apply(parser, null);
}
public void testParseModelMemoryLimitGivenNegativeNumber() throws IOException {
String json = "{\"model_memory_limit\": -1}";
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json);
XContentParseException e = expectThrows(XContentParseException.class, () -> AnalysisLimits.CONFIG_PARSER.apply(parser, null));
XContentParseException e = expectThrows(XContentParseException.class, () -> AnalysisLimits.STRICT_PARSER.apply(parser, null));
assertThat(ExceptionsHelper.detailedMessage(e), containsString("model_memory_limit must be at least 1 MiB. Value = -1"));
}
@ -56,7 +56,7 @@ public class AnalysisLimitsTests extends AbstractSerializingTestCase<AnalysisLim
String json = "{\"model_memory_limit\": 0}";
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json);
XContentParseException e = expectThrows(XContentParseException.class, () -> AnalysisLimits.CONFIG_PARSER.apply(parser, null));
XContentParseException e = expectThrows(XContentParseException.class, () -> AnalysisLimits.STRICT_PARSER.apply(parser, null));
assertThat(ExceptionsHelper.detailedMessage(e), containsString("model_memory_limit must be at least 1 MiB. Value = 0"));
}
@ -65,7 +65,7 @@ public class AnalysisLimitsTests extends AbstractSerializingTestCase<AnalysisLim
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json);
AnalysisLimits limits = AnalysisLimits.CONFIG_PARSER.apply(parser, null);
AnalysisLimits limits = AnalysisLimits.STRICT_PARSER.apply(parser, null);
assertThat(limits.getModelMemoryLimit(), equalTo(2048L));
}
@ -74,7 +74,7 @@ public class AnalysisLimitsTests extends AbstractSerializingTestCase<AnalysisLim
String json = "{\"model_memory_limit\":\"-4MB\"}";
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json);
XContentParseException e = expectThrows(XContentParseException.class, () -> AnalysisLimits.CONFIG_PARSER.apply(parser, null));
XContentParseException e = expectThrows(XContentParseException.class, () -> AnalysisLimits.STRICT_PARSER.apply(parser, null));
assertThat(ExceptionsHelper.detailedMessage(e), containsString("Values less than -1 bytes are not supported: -4mb"));
}
@ -82,7 +82,7 @@ public class AnalysisLimitsTests extends AbstractSerializingTestCase<AnalysisLim
String json = "{\"model_memory_limit\":\"0MB\"}";
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json);
XContentParseException e = expectThrows(XContentParseException.class, () -> AnalysisLimits.CONFIG_PARSER.apply(parser, null));
XContentParseException e = expectThrows(XContentParseException.class, () -> AnalysisLimits.STRICT_PARSER.apply(parser, null));
assertThat(ExceptionsHelper.detailedMessage(e), containsString("model_memory_limit must be at least 1 MiB. Value = 0"));
}
@ -90,7 +90,7 @@ public class AnalysisLimitsTests extends AbstractSerializingTestCase<AnalysisLim
String json = "{\"model_memory_limit\":\"1000Kb\"}";
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json);
XContentParseException e = expectThrows(XContentParseException.class, () -> AnalysisLimits.CONFIG_PARSER.apply(parser, null));
XContentParseException e = expectThrows(XContentParseException.class, () -> AnalysisLimits.STRICT_PARSER.apply(parser, null));
assertThat(ExceptionsHelper.detailedMessage(e), containsString("model_memory_limit must be at least 1 MiB. Value = 0"));
}
@ -99,7 +99,7 @@ public class AnalysisLimitsTests extends AbstractSerializingTestCase<AnalysisLim
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json);
AnalysisLimits limits = AnalysisLimits.CONFIG_PARSER.apply(parser, null);
AnalysisLimits limits = AnalysisLimits.STRICT_PARSER.apply(parser, null);
assertThat(limits.getModelMemoryLimit(), equalTo(4096L));
}
@ -109,7 +109,7 @@ public class AnalysisLimitsTests extends AbstractSerializingTestCase<AnalysisLim
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json);
AnalysisLimits limits = AnalysisLimits.CONFIG_PARSER.apply(parser, null);
AnalysisLimits limits = AnalysisLimits.STRICT_PARSER.apply(parser, null);
assertThat(limits.getModelMemoryLimit(), equalTo(1L));
}

View File

@ -212,7 +212,7 @@ public class DataDescriptionTests extends AbstractSerializingTestCase<DataDescri
XContentParser parser = JsonXContent.jsonXContent
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json.streamInput());
XContentParseException ex = expectThrows(XContentParseException.class,
() -> DataDescription.CONFIG_PARSER.apply(parser, null));
() -> DataDescription.STRICT_PARSER.apply(parser, null));
assertThat(ex.getMessage(), containsString("[data_description] failed to parse field [format]"));
Throwable cause = ex.getCause();
assertNotNull(cause);
@ -226,7 +226,7 @@ public class DataDescriptionTests extends AbstractSerializingTestCase<DataDescri
XContentParser parser = JsonXContent.jsonXContent
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json.streamInput());
XContentParseException ex = expectThrows(XContentParseException.class,
() -> DataDescription.CONFIG_PARSER.apply(parser, null));
() -> DataDescription.STRICT_PARSER.apply(parser, null));
assertThat(ex.getMessage(), containsString("[data_description] failed to parse field [field_delimiter]"));
Throwable cause = ex.getCause();
assertNotNull(cause);
@ -240,7 +240,7 @@ public class DataDescriptionTests extends AbstractSerializingTestCase<DataDescri
XContentParser parser = JsonXContent.jsonXContent
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json.streamInput());
XContentParseException ex = expectThrows(XContentParseException.class,
() -> DataDescription.CONFIG_PARSER.apply(parser, null));
() -> DataDescription.STRICT_PARSER.apply(parser, null));
assertThat(ex.getMessage(), containsString("[data_description] failed to parse field [quote_character]"));
Throwable cause = ex.getCause();
assertNotNull(cause);
@ -284,7 +284,7 @@ public class DataDescriptionTests extends AbstractSerializingTestCase<DataDescri
@Override
protected DataDescription doParseInstance(XContentParser parser) {
return DataDescription.CONFIG_PARSER.apply(parser, null).build();
return DataDescription.STRICT_PARSER.apply(parser, null).build();
}
protected DataDescription mutateInstance(DataDescription instance) throws java.io.IOException {

View File

@ -90,7 +90,7 @@ public class DetectionRuleTests extends AbstractSerializingTestCase<DetectionRul
@Override
protected DetectionRule doParseInstance(XContentParser parser) {
return DetectionRule.CONFIG_PARSER.apply(parser, null).build();
return DetectionRule.STRICT_PARSER.apply(parser, null).build();
}
@Override

View File

@ -182,7 +182,7 @@ public class DetectorTests extends AbstractSerializingTestCase<Detector> {
@Override
protected Detector doParseInstance(XContentParser parser) {
return Detector.CONFIG_PARSER.apply(parser, null).build();
return Detector.STRICT_PARSER.apply(parser, null).build();
}
public void testVerifyFieldNames_givenInvalidChars() {

View File

@ -20,11 +20,11 @@ public class FilterRefTests extends AbstractSerializingTestCase<FilterRef> {
@Override
protected FilterRef doParseInstance(XContentParser parser) throws IOException {
return FilterRef.CONFIG_PARSER.parse(parser, null);
return FilterRef.STRICT_PARSER.parse(parser, null);
}
@Override
protected Writeable.Reader<FilterRef> instanceReader() {
return FilterRef::new;
}
}
}

View File

@ -74,14 +74,14 @@ public class JobTests extends AbstractSerializingTestCase<Job> {
@Override
protected Job doParseInstance(XContentParser parser) {
return Job.CONFIG_PARSER.apply(parser, null).build();
return Job.STRICT_PARSER.apply(parser, null).build();
}
public void testFutureConfigParse() throws IOException {
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_JOB);
XContentParseException e = expectThrows(XContentParseException.class,
() -> Job.CONFIG_PARSER.apply(parser, null).build());
() -> Job.STRICT_PARSER.apply(parser, null).build());
assertEquals("[4:5] [job_details] unknown field [tomorrows_technology_today], parser not found", e.getMessage());
}
@ -89,7 +89,7 @@ public class JobTests extends AbstractSerializingTestCase<Job> {
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_JOB);
// Unlike the config version of this test, the metadata parser should tolerate the unknown future field
assertNotNull(Job.METADATA_PARSER.apply(parser, null).build());
assertNotNull(Job.LENIENT_PARSER.apply(parser, null).build());
}
public void testConstructor_GivenEmptyJobConfiguration() {

View File

@ -31,6 +31,6 @@ public class ModelPlotConfigTests extends AbstractSerializingTestCase<ModelPlotC
@Override
protected ModelPlotConfig doParseInstance(XContentParser parser) {
return ModelPlotConfig.CONFIG_PARSER.apply(parser, null);
return ModelPlotConfig.STRICT_PARSER.apply(parser, null);
}
}

View File

@ -29,7 +29,7 @@ public class RuleConditionTests extends AbstractSerializingTestCase<RuleConditio
@Override
protected RuleCondition doParseInstance(XContentParser parser) {
return RuleCondition.CONFIG_PARSER.apply(parser, null);
return RuleCondition.STRICT_PARSER.apply(parser, null);
}
public void testEqualsGivenSameObject() {

View File

@ -79,7 +79,7 @@ public class MlMetadataTests extends AbstractSerializingTestCase<MlMetadata> {
@Override
protected MlMetadata doParseInstance(XContentParser parser) {
return MlMetadata.METADATA_PARSER.apply(parser, null).build();
return MlMetadata.LENIENT_PARSER.apply(parser, null).build();
}
@Override

View File

@ -9,7 +9,6 @@ import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractSerializingTestCase;
import org.elasticsearch.xpack.core.ml.job.config.CategorizationAnalyzerConfig;
import org.elasticsearch.xpack.core.ml.MlParserType;
import java.io.IOException;
import java.util.HashMap;
@ -72,7 +71,7 @@ public class CategorizationAnalyzerConfigTests extends AbstractSerializingTestCa
@Override
protected CategorizationAnalyzerConfig doParseInstance(XContentParser parser) throws IOException {
return CategorizationAnalyzerConfig.buildFromXContentObject(parser, MlParserType.CONFIG);
return CategorizationAnalyzerConfig.buildFromXContentObject(parser, false);
}
@Override

View File

@ -86,6 +86,6 @@ public class JobBuilderTests extends AbstractSerializingTestCase<Job.Builder> {
@Override
protected Job.Builder doParseInstance(XContentParser parser) {
return Job.CONFIG_PARSER.apply(parser, null);
return Job.STRICT_PARSER.apply(parser, null);
}
}